zlmdb 25.10.1__cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of zlmdb might be problematic. Click here for more details.
- flatbuffers/__init__.py +19 -0
- flatbuffers/_version.py +17 -0
- flatbuffers/builder.py +776 -0
- flatbuffers/compat.py +86 -0
- flatbuffers/encode.py +42 -0
- flatbuffers/flexbuffers.py +1527 -0
- flatbuffers/number_types.py +181 -0
- flatbuffers/packer.py +42 -0
- flatbuffers/reflection/AdvancedFeatures.py +10 -0
- flatbuffers/reflection/BaseType.py +24 -0
- flatbuffers/reflection/Enum.py +169 -0
- flatbuffers/reflection/EnumVal.py +96 -0
- flatbuffers/reflection/Field.py +208 -0
- flatbuffers/reflection/KeyValue.py +56 -0
- flatbuffers/reflection/Object.py +175 -0
- flatbuffers/reflection/RPCCall.py +131 -0
- flatbuffers/reflection/Schema.py +206 -0
- flatbuffers/reflection/SchemaFile.py +77 -0
- flatbuffers/reflection/Service.py +145 -0
- flatbuffers/reflection/Type.py +98 -0
- flatbuffers/reflection/__init__.py +0 -0
- flatbuffers/table.py +129 -0
- flatbuffers/util.py +43 -0
- zlmdb/__init__.py +312 -0
- zlmdb/_database.py +990 -0
- zlmdb/_errors.py +31 -0
- zlmdb/_meta.py +27 -0
- zlmdb/_pmap.py +1667 -0
- zlmdb/_schema.py +137 -0
- zlmdb/_transaction.py +181 -0
- zlmdb/_types.py +1596 -0
- zlmdb/_version.py +27 -0
- zlmdb/cli.py +41 -0
- zlmdb/flatbuffers/__init__.py +5 -0
- zlmdb/flatbuffers/reflection/AdvancedFeatures.py +10 -0
- zlmdb/flatbuffers/reflection/BaseType.py +25 -0
- zlmdb/flatbuffers/reflection/Enum.py +252 -0
- zlmdb/flatbuffers/reflection/EnumVal.py +144 -0
- zlmdb/flatbuffers/reflection/Field.py +325 -0
- zlmdb/flatbuffers/reflection/KeyValue.py +84 -0
- zlmdb/flatbuffers/reflection/Object.py +260 -0
- zlmdb/flatbuffers/reflection/RPCCall.py +195 -0
- zlmdb/flatbuffers/reflection/Schema.py +301 -0
- zlmdb/flatbuffers/reflection/SchemaFile.py +112 -0
- zlmdb/flatbuffers/reflection/Service.py +213 -0
- zlmdb/flatbuffers/reflection/Type.py +148 -0
- zlmdb/flatbuffers/reflection/__init__.py +0 -0
- zlmdb/flatbuffers/reflection.fbs +152 -0
- zlmdb/lmdb/__init__.py +37 -0
- zlmdb/lmdb/__main__.py +25 -0
- zlmdb/lmdb/_config.py +10 -0
- zlmdb/lmdb/_lmdb_cffi.cpython-312-aarch64-linux-gnu.so +0 -0
- zlmdb/lmdb/cffi.py +2606 -0
- zlmdb/lmdb/tool.py +670 -0
- zlmdb/tests/lmdb/__init__.py +0 -0
- zlmdb/tests/lmdb/address_book.py +287 -0
- zlmdb/tests/lmdb/crash_test.py +339 -0
- zlmdb/tests/lmdb/cursor_test.py +333 -0
- zlmdb/tests/lmdb/env_test.py +919 -0
- zlmdb/tests/lmdb/getmulti_test.py +92 -0
- zlmdb/tests/lmdb/iteration_test.py +258 -0
- zlmdb/tests/lmdb/package_test.py +70 -0
- zlmdb/tests/lmdb/test_lmdb.py +188 -0
- zlmdb/tests/lmdb/testlib.py +185 -0
- zlmdb/tests/lmdb/tool_test.py +60 -0
- zlmdb/tests/lmdb/txn_test.py +575 -0
- zlmdb/tests/orm/MNodeLog.py +853 -0
- zlmdb/tests/orm/__init__.py +0 -0
- zlmdb/tests/orm/_schema_fbs.py +215 -0
- zlmdb/tests/orm/_schema_mnode_log.py +1201 -0
- zlmdb/tests/orm/_schema_py2.py +250 -0
- zlmdb/tests/orm/_schema_py3.py +307 -0
- zlmdb/tests/orm/_test_flatbuffers.py +144 -0
- zlmdb/tests/orm/_test_serialization.py +144 -0
- zlmdb/tests/orm/test_basic.py +217 -0
- zlmdb/tests/orm/test_etcd.py +275 -0
- zlmdb/tests/orm/test_pmap_indexes.py +466 -0
- zlmdb/tests/orm/test_pmap_types.py +90 -0
- zlmdb/tests/orm/test_pmaps.py +295 -0
- zlmdb/tests/orm/test_select.py +619 -0
- zlmdb-25.10.1.dist-info/METADATA +264 -0
- zlmdb-25.10.1.dist-info/RECORD +87 -0
- zlmdb-25.10.1.dist-info/WHEEL +7 -0
- zlmdb-25.10.1.dist-info/entry_points.txt +2 -0
- zlmdb-25.10.1.dist-info/licenses/LICENSE +137 -0
- zlmdb-25.10.1.dist-info/licenses/NOTICE +41 -0
- zlmdb-25.10.1.dist-info/top_level.txt +2 -0
zlmdb/lmdb/cffi.py
ADDED
|
@@ -0,0 +1,2606 @@
|
|
|
1
|
+
#
|
|
2
|
+
# Copyright 2013 The py-lmdb authors, all rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# Redistribution and use in source and binary forms, with or without
|
|
5
|
+
# modification, are permitted only as authorized by the OpenLDAP
|
|
6
|
+
# Public License.
|
|
7
|
+
#
|
|
8
|
+
# A copy of this license is available in the file LICENSE in the
|
|
9
|
+
# top-level directory of the distribution or, alternatively, at
|
|
10
|
+
# <http://www.OpenLDAP.org/license.html>.
|
|
11
|
+
#
|
|
12
|
+
# OpenLDAP is a registered trademark of the OpenLDAP Foundation.
|
|
13
|
+
#
|
|
14
|
+
# Individual files and/or contributed packages may be copyright by
|
|
15
|
+
# other parties and/or subject to additional restrictions.
|
|
16
|
+
#
|
|
17
|
+
# This work also contains materials derived from public sources.
|
|
18
|
+
#
|
|
19
|
+
# Additional information about OpenLDAP can be obtained at
|
|
20
|
+
# <http://www.openldap.org/>.
|
|
21
|
+
#
|
|
22
|
+
|
|
23
|
+
"""
|
|
24
|
+
CPython/CFFI wrapper for OpenLDAP's "Lightning" MDB database.
|
|
25
|
+
|
|
26
|
+
Please see https://lmdb.readthedocs.io/
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
from __future__ import absolute_import
|
|
30
|
+
from __future__ import with_statement
|
|
31
|
+
|
|
32
|
+
import errno
|
|
33
|
+
import inspect
|
|
34
|
+
import os
|
|
35
|
+
import sys
|
|
36
|
+
import threading
|
|
37
|
+
|
|
38
|
+
is_win32 = sys.platform == "win32"
|
|
39
|
+
if is_win32:
|
|
40
|
+
import msvcrt
|
|
41
|
+
|
|
42
|
+
try:
|
|
43
|
+
import __builtin__ # type: ignore[import-not-found]
|
|
44
|
+
except ImportError:
|
|
45
|
+
import builtins as __builtin__ # type: ignore
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def _reading_docs():
|
|
49
|
+
"""Return True if Sphinx is currently parsing this file."""
|
|
50
|
+
return "sphinx" in __import__("sys").modules
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
try:
|
|
54
|
+
from zlmdb.lmdb import _config
|
|
55
|
+
except ImportError:
|
|
56
|
+
_config = None # type: ignore
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
__all__ = [
|
|
60
|
+
"Cursor",
|
|
61
|
+
"Environment",
|
|
62
|
+
"Transaction",
|
|
63
|
+
"_Database",
|
|
64
|
+
"enable_drop_gil",
|
|
65
|
+
"version",
|
|
66
|
+
]
|
|
67
|
+
|
|
68
|
+
__all__ += [
|
|
69
|
+
"BadDbiError",
|
|
70
|
+
"BadRslotError",
|
|
71
|
+
"BadTxnError",
|
|
72
|
+
"BadValsizeError",
|
|
73
|
+
"CorruptedError",
|
|
74
|
+
"CursorFullError",
|
|
75
|
+
"DbsFullError",
|
|
76
|
+
"DiskError",
|
|
77
|
+
"Error",
|
|
78
|
+
"IncompatibleError",
|
|
79
|
+
"InvalidError",
|
|
80
|
+
"InvalidParameterError",
|
|
81
|
+
"KeyExistsError",
|
|
82
|
+
"LockError",
|
|
83
|
+
"MapFullError",
|
|
84
|
+
"MapResizedError",
|
|
85
|
+
"MemoryError",
|
|
86
|
+
"NotFoundError",
|
|
87
|
+
"PageFullError",
|
|
88
|
+
"PageNotFoundError",
|
|
89
|
+
"PanicError",
|
|
90
|
+
"ReadersFullError",
|
|
91
|
+
"ReadonlyError",
|
|
92
|
+
"TlsFullError",
|
|
93
|
+
"TxnFullError",
|
|
94
|
+
"VersionMismatchError",
|
|
95
|
+
]
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
# Handle moronic Python 3 mess.
|
|
99
|
+
UnicodeType = getattr(__builtin__, "unicode", str)
|
|
100
|
+
BytesType = getattr(__builtin__, "bytes", str)
|
|
101
|
+
|
|
102
|
+
O_0755 = int("0755", 8)
|
|
103
|
+
O_0111 = int("0111", 8)
|
|
104
|
+
EMPTY_BYTES = UnicodeType().encode()
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
# Used to track context across CFFI callbacks.
|
|
108
|
+
_callbacks = threading.local()
|
|
109
|
+
|
|
110
|
+
_CFFI_CDEF = """
|
|
111
|
+
typedef int mode_t;
|
|
112
|
+
typedef ... MDB_env;
|
|
113
|
+
typedef struct MDB_txn MDB_txn;
|
|
114
|
+
typedef struct MDB_cursor MDB_cursor;
|
|
115
|
+
typedef unsigned int MDB_dbi;
|
|
116
|
+
enum MDB_cursor_op {
|
|
117
|
+
MDB_FIRST,
|
|
118
|
+
MDB_FIRST_DUP,
|
|
119
|
+
MDB_GET_BOTH,
|
|
120
|
+
MDB_GET_BOTH_RANGE,
|
|
121
|
+
MDB_GET_CURRENT,
|
|
122
|
+
MDB_GET_MULTIPLE,
|
|
123
|
+
MDB_LAST,
|
|
124
|
+
MDB_LAST_DUP,
|
|
125
|
+
MDB_NEXT,
|
|
126
|
+
MDB_NEXT_DUP,
|
|
127
|
+
MDB_NEXT_MULTIPLE,
|
|
128
|
+
MDB_NEXT_NODUP,
|
|
129
|
+
MDB_PREV,
|
|
130
|
+
MDB_PREV_DUP,
|
|
131
|
+
MDB_PREV_NODUP,
|
|
132
|
+
MDB_SET,
|
|
133
|
+
MDB_SET_KEY,
|
|
134
|
+
MDB_SET_RANGE,
|
|
135
|
+
...
|
|
136
|
+
};
|
|
137
|
+
typedef enum MDB_cursor_op MDB_cursor_op;
|
|
138
|
+
|
|
139
|
+
struct MDB_val {
|
|
140
|
+
size_t mv_size;
|
|
141
|
+
void *mv_data;
|
|
142
|
+
...;
|
|
143
|
+
};
|
|
144
|
+
typedef struct MDB_val MDB_val;
|
|
145
|
+
|
|
146
|
+
struct MDB_stat {
|
|
147
|
+
unsigned int ms_psize;
|
|
148
|
+
unsigned int ms_depth;
|
|
149
|
+
size_t ms_branch_pages;
|
|
150
|
+
size_t ms_leaf_pages;
|
|
151
|
+
size_t ms_overflow_pages;
|
|
152
|
+
size_t ms_entries;
|
|
153
|
+
...;
|
|
154
|
+
};
|
|
155
|
+
typedef struct MDB_stat MDB_stat;
|
|
156
|
+
|
|
157
|
+
struct MDB_envinfo {
|
|
158
|
+
void *me_mapaddr;
|
|
159
|
+
size_t me_mapsize;
|
|
160
|
+
size_t me_last_pgno;
|
|
161
|
+
size_t me_last_txnid;
|
|
162
|
+
unsigned int me_maxreaders;
|
|
163
|
+
unsigned int me_numreaders;
|
|
164
|
+
...;
|
|
165
|
+
};
|
|
166
|
+
typedef struct MDB_envinfo MDB_envinfo;
|
|
167
|
+
|
|
168
|
+
typedef int (*MDB_cmp_func)(const MDB_val *a, const MDB_val *b);
|
|
169
|
+
typedef void (*MDB_rel_func)(MDB_val *item, void *oldptr, void *newptr,
|
|
170
|
+
void *relctx);
|
|
171
|
+
|
|
172
|
+
char *mdb_strerror(int err);
|
|
173
|
+
int mdb_env_create(MDB_env **env);
|
|
174
|
+
int mdb_env_open(MDB_env *env, const char *path, unsigned int flags,
|
|
175
|
+
mode_t mode);
|
|
176
|
+
int mdb_env_copy2(MDB_env *env, const char *path, int flags);
|
|
177
|
+
int mdb_env_copyfd2(MDB_env *env, int fd, int flags);
|
|
178
|
+
int mdb_env_stat(MDB_env *env, MDB_stat *stat);
|
|
179
|
+
int mdb_env_info(MDB_env *env, MDB_envinfo *stat);
|
|
180
|
+
int mdb_env_get_maxkeysize(MDB_env *env);
|
|
181
|
+
int mdb_env_sync(MDB_env *env, int force);
|
|
182
|
+
void mdb_env_close(MDB_env *env);
|
|
183
|
+
int mdb_env_set_flags(MDB_env *env, unsigned int flags, int onoff);
|
|
184
|
+
int mdb_env_get_flags(MDB_env *env, unsigned int *flags);
|
|
185
|
+
int mdb_env_get_path(MDB_env *env, const char **path);
|
|
186
|
+
int mdb_env_set_mapsize(MDB_env *env, size_t size);
|
|
187
|
+
int mdb_env_set_maxreaders(MDB_env *env, unsigned int readers);
|
|
188
|
+
int mdb_env_get_maxreaders(MDB_env *env, unsigned int *readers);
|
|
189
|
+
int mdb_env_set_maxdbs(MDB_env *env, MDB_dbi dbs);
|
|
190
|
+
int mdb_txn_begin(MDB_env *env, MDB_txn *parent, unsigned int flags,
|
|
191
|
+
MDB_txn **txn);
|
|
192
|
+
int mdb_txn_commit(MDB_txn *txn);
|
|
193
|
+
void mdb_txn_reset(MDB_txn *txn);
|
|
194
|
+
int mdb_txn_renew(MDB_txn *txn);
|
|
195
|
+
void mdb_txn_abort(MDB_txn *txn);
|
|
196
|
+
size_t mdb_txn_id(MDB_txn *txn);
|
|
197
|
+
int mdb_dbi_open(MDB_txn *txn, const char *name, unsigned int flags,
|
|
198
|
+
MDB_dbi *dbi);
|
|
199
|
+
int mdb_stat(MDB_txn *txn, MDB_dbi dbi, MDB_stat *stat);
|
|
200
|
+
int mdb_drop(MDB_txn *txn, MDB_dbi dbi, int del_);
|
|
201
|
+
int mdb_get(MDB_txn *txn, MDB_dbi dbi, MDB_val *key, MDB_val *data);
|
|
202
|
+
int mdb_cursor_open(MDB_txn *txn, MDB_dbi dbi, MDB_cursor **cursor);
|
|
203
|
+
void mdb_cursor_close(MDB_cursor *cursor);
|
|
204
|
+
int mdb_cursor_del(MDB_cursor *cursor, unsigned int flags);
|
|
205
|
+
int mdb_cursor_count(MDB_cursor *cursor, size_t *countp);
|
|
206
|
+
int mdb_cursor_get(MDB_cursor *cursor, MDB_val *key, MDB_val*data, int op);
|
|
207
|
+
|
|
208
|
+
typedef int (MDB_msg_func)(const char *msg, void *ctx);
|
|
209
|
+
int mdb_reader_list(MDB_env *env, MDB_msg_func *func, void *ctx);
|
|
210
|
+
int mdb_reader_check(MDB_env *env, int *dead);
|
|
211
|
+
int mdb_dbi_flags(MDB_txn *txn, MDB_dbi dbi, unsigned int *flags);
|
|
212
|
+
|
|
213
|
+
#define MDB_VERSION_MAJOR ...
|
|
214
|
+
#define MDB_VERSION_MINOR ...
|
|
215
|
+
#define MDB_VERSION_PATCH ...
|
|
216
|
+
|
|
217
|
+
#define EACCES ...
|
|
218
|
+
#define EAGAIN ...
|
|
219
|
+
#define EINVAL ...
|
|
220
|
+
#define ENOMEM ...
|
|
221
|
+
#define ENOSPC ...
|
|
222
|
+
|
|
223
|
+
#define MDB_BAD_RSLOT ...
|
|
224
|
+
#define MDB_BAD_DBI ...
|
|
225
|
+
#define MDB_BAD_TXN ...
|
|
226
|
+
#define MDB_BAD_VALSIZE ...
|
|
227
|
+
#define MDB_CORRUPTED ...
|
|
228
|
+
#define MDB_CURSOR_FULL ...
|
|
229
|
+
#define MDB_DBS_FULL ...
|
|
230
|
+
#define MDB_INCOMPATIBLE ...
|
|
231
|
+
#define MDB_INVALID ...
|
|
232
|
+
#define MDB_KEYEXIST ...
|
|
233
|
+
#define MDB_MAP_FULL ...
|
|
234
|
+
#define MDB_MAP_RESIZED ...
|
|
235
|
+
#define MDB_NOTFOUND ...
|
|
236
|
+
#define MDB_PAGE_FULL ...
|
|
237
|
+
#define MDB_PAGE_NOTFOUND ...
|
|
238
|
+
#define MDB_PANIC ...
|
|
239
|
+
#define MDB_READERS_FULL ...
|
|
240
|
+
#define MDB_TLS_FULL ...
|
|
241
|
+
#define MDB_TXN_FULL ...
|
|
242
|
+
#define MDB_VERSION_MISMATCH ...
|
|
243
|
+
|
|
244
|
+
#define MDB_APPEND ...
|
|
245
|
+
#define MDB_APPENDDUP ...
|
|
246
|
+
#define MDB_CP_COMPACT ...
|
|
247
|
+
#define MDB_CREATE ...
|
|
248
|
+
#define MDB_DUPFIXED ...
|
|
249
|
+
#define MDB_DUPSORT ...
|
|
250
|
+
#define MDB_INTEGERDUP ...
|
|
251
|
+
#define MDB_INTEGERKEY ...
|
|
252
|
+
#define MDB_MAPASYNC ...
|
|
253
|
+
#define MDB_NODUPDATA ...
|
|
254
|
+
#define MDB_NOLOCK ...
|
|
255
|
+
#define MDB_NOMEMINIT ...
|
|
256
|
+
#define MDB_NOMETASYNC ...
|
|
257
|
+
#define MDB_NOOVERWRITE ...
|
|
258
|
+
#define MDB_NORDAHEAD ...
|
|
259
|
+
#define MDB_NOSUBDIR ...
|
|
260
|
+
#define MDB_NOSYNC ...
|
|
261
|
+
#define MDB_NOTLS ...
|
|
262
|
+
#define MDB_RDONLY ...
|
|
263
|
+
#define MDB_REVERSEKEY ...
|
|
264
|
+
#define MDB_WRITEMAP ...
|
|
265
|
+
|
|
266
|
+
// Helpers below inline MDB_vals. Avoids key alloc/dup on CPython, where
|
|
267
|
+
// CFFI will use PyString_AS_STRING when passed as an argument.
|
|
268
|
+
static int pymdb_del(MDB_txn *txn, MDB_dbi dbi,
|
|
269
|
+
char *key_s, size_t keylen,
|
|
270
|
+
char *val_s, size_t vallen);
|
|
271
|
+
static int pymdb_put(MDB_txn *txn, MDB_dbi dbi,
|
|
272
|
+
char *key_s, size_t keylen,
|
|
273
|
+
char *val_s, size_t vallen,
|
|
274
|
+
unsigned int flags);
|
|
275
|
+
static int pymdb_get(MDB_txn *txn, MDB_dbi dbi,
|
|
276
|
+
char *key_s, size_t keylen,
|
|
277
|
+
MDB_val *val_out);
|
|
278
|
+
static int pymdb_cursor_get(MDB_cursor *cursor,
|
|
279
|
+
char *key_s, size_t key_len,
|
|
280
|
+
char *data_s, size_t data_len,
|
|
281
|
+
MDB_val *key, MDB_val *data, int op);
|
|
282
|
+
static int pymdb_cursor_put(MDB_cursor *cursor,
|
|
283
|
+
char *key_s, size_t keylen,
|
|
284
|
+
char *val_s, size_t vallen, int flags);
|
|
285
|
+
|
|
286
|
+
// Prefaults a range
|
|
287
|
+
static void preload(int rc, void *x, size_t size);
|
|
288
|
+
|
|
289
|
+
"""
|
|
290
|
+
_CFFI_CDEF_PATCHED = """
|
|
291
|
+
int mdb_env_copy3(MDB_env *env, const char *path, unsigned int flags, MDB_txn *txn);
|
|
292
|
+
int mdb_env_copyfd3(MDB_env *env, int fd, unsigned int flags, MDB_txn *txn);
|
|
293
|
+
"""
|
|
294
|
+
|
|
295
|
+
_CFFI_VERIFY = """
|
|
296
|
+
#include <sys/stat.h>
|
|
297
|
+
#include "lmdb.h"
|
|
298
|
+
#include "preload.h"
|
|
299
|
+
|
|
300
|
+
// Helpers below inline MDB_vals. Avoids key alloc/dup on CPython, where
|
|
301
|
+
// CFFI will use PyString_AS_STRING when passed as an argument.
|
|
302
|
+
static int pymdb_get(MDB_txn *txn, MDB_dbi dbi, char *key_s, size_t keylen,
|
|
303
|
+
MDB_val *val_out)
|
|
304
|
+
{
|
|
305
|
+
MDB_val key = {keylen, key_s};
|
|
306
|
+
int rc = mdb_get(txn, dbi, &key, val_out);
|
|
307
|
+
return rc;
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
static int pymdb_put(MDB_txn *txn, MDB_dbi dbi, char *key_s, size_t keylen,
|
|
311
|
+
char *val_s, size_t vallen, unsigned int flags)
|
|
312
|
+
{
|
|
313
|
+
MDB_val key = {keylen, key_s};
|
|
314
|
+
MDB_val val = {vallen, val_s};
|
|
315
|
+
return mdb_put(txn, dbi, &key, &val, flags);
|
|
316
|
+
}
|
|
317
|
+
|
|
318
|
+
static int pymdb_del(MDB_txn *txn, MDB_dbi dbi, char *key_s, size_t keylen,
|
|
319
|
+
char *val_s, size_t vallen)
|
|
320
|
+
{
|
|
321
|
+
MDB_val key = {keylen, key_s};
|
|
322
|
+
MDB_val val = {vallen, val_s};
|
|
323
|
+
MDB_val *valptr;
|
|
324
|
+
if(vallen == 0) {
|
|
325
|
+
valptr = NULL;
|
|
326
|
+
} else {
|
|
327
|
+
valptr = &val;
|
|
328
|
+
}
|
|
329
|
+
return mdb_del(txn, dbi, &key, valptr);
|
|
330
|
+
}
|
|
331
|
+
|
|
332
|
+
static int pymdb_cursor_get(MDB_cursor *cursor,
|
|
333
|
+
char *key_s, size_t key_len,
|
|
334
|
+
char *data_s, size_t data_len,
|
|
335
|
+
MDB_val *key, MDB_val *data, int op)
|
|
336
|
+
{
|
|
337
|
+
MDB_val tmp_key = {key_len, key_s};
|
|
338
|
+
MDB_val tmp_data = {data_len, data_s};
|
|
339
|
+
int rc = mdb_cursor_get(cursor, &tmp_key, &tmp_data, op);
|
|
340
|
+
if(! rc) {
|
|
341
|
+
*key = tmp_key;
|
|
342
|
+
*data = tmp_data;
|
|
343
|
+
}
|
|
344
|
+
return rc;
|
|
345
|
+
}
|
|
346
|
+
|
|
347
|
+
static int pymdb_cursor_put(MDB_cursor *cursor, char *key_s, size_t keylen,
|
|
348
|
+
char *val_s, size_t vallen, int flags)
|
|
349
|
+
{
|
|
350
|
+
MDB_val tmpkey = {keylen, key_s};
|
|
351
|
+
MDB_val tmpval = {vallen, val_s};
|
|
352
|
+
return mdb_cursor_put(cursor, &tmpkey, &tmpval, flags);
|
|
353
|
+
}
|
|
354
|
+
|
|
355
|
+
"""
|
|
356
|
+
|
|
357
|
+
if not _reading_docs():
|
|
358
|
+
import cffi
|
|
359
|
+
|
|
360
|
+
# Try to use distutils-bundled CFFI configuration to avoid a recompile and
|
|
361
|
+
# potential compile errors during first module import.
|
|
362
|
+
_config_vars = (
|
|
363
|
+
_config.CONFIG
|
|
364
|
+
if _config
|
|
365
|
+
else {
|
|
366
|
+
"extra_compile_args": ["-w"],
|
|
367
|
+
"extra_sources": ["lib/mdb.c", "lib/midl.c"],
|
|
368
|
+
"extra_include_dirs": ["lib"],
|
|
369
|
+
"extra_library_dirs": [],
|
|
370
|
+
"libraries": [],
|
|
371
|
+
}
|
|
372
|
+
)
|
|
373
|
+
|
|
374
|
+
_have_patched_lmdb = "-DHAVE_PATCHED_LMDB=1" in _config.CONFIG["extra_compile_args"] # type: ignore
|
|
375
|
+
|
|
376
|
+
if _have_patched_lmdb:
|
|
377
|
+
_CFFI_CDEF += _CFFI_CDEF_PATCHED
|
|
378
|
+
|
|
379
|
+
# Try to load pre-compiled extension first (for binary wheels)
|
|
380
|
+
_lib = None
|
|
381
|
+
_ffi = None
|
|
382
|
+
try:
|
|
383
|
+
from zlmdb.lmdb import _lmdb_cffi # type: ignore[attr-defined]
|
|
384
|
+
|
|
385
|
+
_lib = _lmdb_cffi.lib
|
|
386
|
+
_ffi = _lmdb_cffi.ffi
|
|
387
|
+
except ImportError:
|
|
388
|
+
pass
|
|
389
|
+
|
|
390
|
+
# Fall back to creating our own FFI instance for source builds
|
|
391
|
+
if _ffi is None:
|
|
392
|
+
_ffi = cffi.FFI()
|
|
393
|
+
_ffi.cdef(_CFFI_CDEF)
|
|
394
|
+
|
|
395
|
+
# Fall back to ffi.verify() for source builds
|
|
396
|
+
if _lib is None:
|
|
397
|
+
_lib = _ffi.verify(
|
|
398
|
+
_CFFI_VERIFY,
|
|
399
|
+
modulename="lmdb_cffi",
|
|
400
|
+
ext_package="lmdb",
|
|
401
|
+
sources=_config_vars["extra_sources"],
|
|
402
|
+
extra_compile_args=_config_vars["extra_compile_args"],
|
|
403
|
+
include_dirs=_config_vars["extra_include_dirs"],
|
|
404
|
+
libraries=_config_vars["libraries"],
|
|
405
|
+
library_dirs=_config_vars["extra_library_dirs"],
|
|
406
|
+
)
|
|
407
|
+
|
|
408
|
+
@_ffi.callback("int(char *, void *)")
|
|
409
|
+
def _msg_func(s, _):
|
|
410
|
+
"""mdb_msg_func() callback. Appends `s` to _callbacks.msg_func list."""
|
|
411
|
+
_callbacks.msg_func.append(_ffi.string(s).decode())
|
|
412
|
+
return 0
|
|
413
|
+
|
|
414
|
+
|
|
415
|
+
class Error(Exception):
|
|
416
|
+
"""Raised when an LMDB-related error occurs, and no more specific
|
|
417
|
+
:py:class:`lmdb.Error` subclass exists."""
|
|
418
|
+
|
|
419
|
+
def __init__(self, what, code=0):
|
|
420
|
+
self.what = what
|
|
421
|
+
self.code = code
|
|
422
|
+
self.reason = _ffi.string(_lib.mdb_strerror(code))
|
|
423
|
+
msg = what
|
|
424
|
+
if code:
|
|
425
|
+
msg = "%s: %s" % (what, self.reason)
|
|
426
|
+
hint = getattr(self, "MDB_HINT", None)
|
|
427
|
+
if hint:
|
|
428
|
+
msg += " (%s)" % (hint,)
|
|
429
|
+
Exception.__init__(self, msg)
|
|
430
|
+
|
|
431
|
+
|
|
432
|
+
class KeyExistsError(Error):
|
|
433
|
+
"""Key/data pair already exists."""
|
|
434
|
+
|
|
435
|
+
MDB_NAME = "MDB_KEYEXIST"
|
|
436
|
+
|
|
437
|
+
|
|
438
|
+
class NotFoundError(Error):
|
|
439
|
+
"""No matching key/data pair found.
|
|
440
|
+
|
|
441
|
+
Normally py-lmdb indicates a missing key by returning ``None``, or a
|
|
442
|
+
user-supplied default value, however LMDB may return this error where
|
|
443
|
+
py-lmdb does not know to convert it into a non-exceptional return.
|
|
444
|
+
"""
|
|
445
|
+
|
|
446
|
+
MDB_NAME = "MDB_NOTFOUND"
|
|
447
|
+
|
|
448
|
+
|
|
449
|
+
class PageNotFoundError(Error):
|
|
450
|
+
"""Request page not found."""
|
|
451
|
+
|
|
452
|
+
MDB_NAME = "MDB_PAGE_NOTFOUND"
|
|
453
|
+
|
|
454
|
+
|
|
455
|
+
class CorruptedError(Error):
|
|
456
|
+
"""Located page was of the wrong type."""
|
|
457
|
+
|
|
458
|
+
MDB_NAME = "MDB_CORRUPTED"
|
|
459
|
+
|
|
460
|
+
|
|
461
|
+
class PanicError(Error):
|
|
462
|
+
"""Update of meta page failed."""
|
|
463
|
+
|
|
464
|
+
MDB_NAME = "MDB_PANIC"
|
|
465
|
+
|
|
466
|
+
|
|
467
|
+
class VersionMismatchError(Error):
|
|
468
|
+
"""Database environment version mismatch."""
|
|
469
|
+
|
|
470
|
+
MDB_NAME = "MDB_VERSION_MISMATCH"
|
|
471
|
+
|
|
472
|
+
|
|
473
|
+
class InvalidError(Error):
|
|
474
|
+
"""File is not an MDB file."""
|
|
475
|
+
|
|
476
|
+
MDB_NAME = "MDB_INVALID"
|
|
477
|
+
|
|
478
|
+
|
|
479
|
+
class MapFullError(Error):
|
|
480
|
+
"""Environment map_size= limit reached."""
|
|
481
|
+
|
|
482
|
+
MDB_NAME = "MDB_MAP_FULL"
|
|
483
|
+
MDB_HINT = "Please use a larger Environment(map_size=) parameter"
|
|
484
|
+
|
|
485
|
+
|
|
486
|
+
class DbsFullError(Error):
|
|
487
|
+
"""Environment max_dbs= limit reached."""
|
|
488
|
+
|
|
489
|
+
MDB_NAME = "MDB_DBS_FULL"
|
|
490
|
+
MDB_HINT = "Please use a larger Environment(max_dbs=) parameter"
|
|
491
|
+
|
|
492
|
+
|
|
493
|
+
class ReadersFullError(Error):
|
|
494
|
+
"""Environment max_readers= limit reached."""
|
|
495
|
+
|
|
496
|
+
MDB_NAME = "MDB_READERS_FULL"
|
|
497
|
+
MDB_HINT = "Please use a larger Environment(max_readers=) parameter"
|
|
498
|
+
|
|
499
|
+
|
|
500
|
+
class TlsFullError(Error):
|
|
501
|
+
"""Thread-local storage keys full - too many environments open."""
|
|
502
|
+
|
|
503
|
+
MDB_NAME = "MDB_TLS_FULL"
|
|
504
|
+
|
|
505
|
+
|
|
506
|
+
class TxnFullError(Error):
|
|
507
|
+
"""Transaciton has too many dirty pages - transaction too big."""
|
|
508
|
+
|
|
509
|
+
MDB_NAME = "MDB_TXN_FULL"
|
|
510
|
+
MDB_HINT = "Please do less work within your transaction"
|
|
511
|
+
|
|
512
|
+
|
|
513
|
+
class CursorFullError(Error):
|
|
514
|
+
"""Internal error - cursor stack limit reached."""
|
|
515
|
+
|
|
516
|
+
MDB_NAME = "MDB_CURSOR_FULL"
|
|
517
|
+
|
|
518
|
+
|
|
519
|
+
class PageFullError(Error):
|
|
520
|
+
"""Internal error - page has no more space."""
|
|
521
|
+
|
|
522
|
+
MDB_NAME = "MDB_PAGE_FULL"
|
|
523
|
+
|
|
524
|
+
|
|
525
|
+
class MapResizedError(Error):
|
|
526
|
+
"""Database contents grew beyond environment map_size=."""
|
|
527
|
+
|
|
528
|
+
MDB_NAME = "MDB_MAP_RESIZED"
|
|
529
|
+
|
|
530
|
+
|
|
531
|
+
class IncompatibleError(Error):
|
|
532
|
+
"""Operation and DB incompatible, or DB flags changed."""
|
|
533
|
+
|
|
534
|
+
MDB_NAME = "MDB_INCOMPATIBLE"
|
|
535
|
+
|
|
536
|
+
|
|
537
|
+
class BadRslotError(Error):
|
|
538
|
+
"""Invalid reuse of reader locktable slot."""
|
|
539
|
+
|
|
540
|
+
MDB_NAME = "MDB_BAD_RSLOT"
|
|
541
|
+
|
|
542
|
+
|
|
543
|
+
class BadDbiError(Error):
|
|
544
|
+
"""The specified DBI was changed unexpectedly."""
|
|
545
|
+
|
|
546
|
+
MDB_NAME = "MDB_BAD_DBI"
|
|
547
|
+
|
|
548
|
+
|
|
549
|
+
class BadTxnError(Error):
|
|
550
|
+
"""Transaction cannot recover - it must be aborted."""
|
|
551
|
+
|
|
552
|
+
MDB_NAME = "MDB_BAD_TXN"
|
|
553
|
+
|
|
554
|
+
|
|
555
|
+
class BadValsizeError(Error):
|
|
556
|
+
"""Too big key/data, key is empty, or wrong DUPFIXED size."""
|
|
557
|
+
|
|
558
|
+
MDB_NAME = "MDB_BAD_VALSIZE"
|
|
559
|
+
|
|
560
|
+
|
|
561
|
+
class ReadonlyError(Error):
|
|
562
|
+
"""An attempt was made to modify a read-only database."""
|
|
563
|
+
|
|
564
|
+
MDB_NAME = "EACCES"
|
|
565
|
+
|
|
566
|
+
|
|
567
|
+
class InvalidParameterError(Error):
|
|
568
|
+
"""An invalid parameter was specified."""
|
|
569
|
+
|
|
570
|
+
MDB_NAME = "EINVAL"
|
|
571
|
+
|
|
572
|
+
|
|
573
|
+
class LockError(Error):
|
|
574
|
+
"""The environment was locked by another process."""
|
|
575
|
+
|
|
576
|
+
MDB_NAME = "EAGAIN"
|
|
577
|
+
|
|
578
|
+
|
|
579
|
+
class MemoryError(Error):
|
|
580
|
+
"""Out of memory."""
|
|
581
|
+
|
|
582
|
+
MDB_NAME = "ENOMEM"
|
|
583
|
+
|
|
584
|
+
|
|
585
|
+
class DiskError(Error):
|
|
586
|
+
"""No more disk space."""
|
|
587
|
+
|
|
588
|
+
MDB_NAME = "ENOSPC"
|
|
589
|
+
|
|
590
|
+
|
|
591
|
+
# Prepare _error_map, a mapping of integer MDB_ERROR_CODE to exception class.
|
|
592
|
+
if not _reading_docs():
|
|
593
|
+
_error_map = {}
|
|
594
|
+
for obj in list(globals().values()):
|
|
595
|
+
if inspect.isclass(obj) and issubclass(obj, Error) and obj is not Error:
|
|
596
|
+
_error_map[getattr(_lib, obj.MDB_NAME)] = obj # type: ignore[attr-defined]
|
|
597
|
+
del obj
|
|
598
|
+
|
|
599
|
+
|
|
600
|
+
def _error(what, rc):
|
|
601
|
+
"""Lookup and instantiate the correct exception class for the error code
|
|
602
|
+
`rc`, using :py:class:`Error` if no better class exists."""
|
|
603
|
+
return _error_map.get(rc, Error)(what, rc)
|
|
604
|
+
|
|
605
|
+
|
|
606
|
+
class Some_LMDB_Resource_That_Was_Deleted_Or_Closed(object):
|
|
607
|
+
"""We need this because CFFI on PyPy treats None as cffi.NULL, instead of
|
|
608
|
+
throwing an exception it feeds LMDB null pointers. That means simply
|
|
609
|
+
replacing native handles with None during _invalidate() will cause NULL
|
|
610
|
+
pointer dereferences. Instead use this class, and its weird name to cause a
|
|
611
|
+
TypeError, with a very obvious string in the exception text.
|
|
612
|
+
|
|
613
|
+
The only alternatives to this are inserting a check around every single use
|
|
614
|
+
of a native handle to ensure the handle is still valid prior to calling
|
|
615
|
+
LMDB, or doing no crash-safety checking at all.
|
|
616
|
+
"""
|
|
617
|
+
|
|
618
|
+
def __nonzero__(self):
|
|
619
|
+
return 0
|
|
620
|
+
|
|
621
|
+
def __bool__(self):
|
|
622
|
+
return False
|
|
623
|
+
|
|
624
|
+
def __repr__(self):
|
|
625
|
+
return "<This used to be a LMDB resource but it was deleted or closed>"
|
|
626
|
+
|
|
627
|
+
|
|
628
|
+
_invalid = Some_LMDB_Resource_That_Was_Deleted_Or_Closed()
|
|
629
|
+
|
|
630
|
+
|
|
631
|
+
def _mvbuf(mv):
|
|
632
|
+
"""Convert a MDB_val cdata to a CFFI buffer object."""
|
|
633
|
+
return _ffi.buffer(mv.mv_data, mv.mv_size)
|
|
634
|
+
|
|
635
|
+
|
|
636
|
+
def _mvstr(mv):
|
|
637
|
+
"""Convert a MDB_val cdata to Python bytes."""
|
|
638
|
+
return _ffi.buffer(mv.mv_data, mv.mv_size)[:]
|
|
639
|
+
|
|
640
|
+
|
|
641
|
+
def preload(mv):
|
|
642
|
+
_lib.preload(0, mv.mv_data, mv.mv_size)
|
|
643
|
+
|
|
644
|
+
|
|
645
|
+
def enable_drop_gil():
|
|
646
|
+
"""Deprecated."""
|
|
647
|
+
|
|
648
|
+
|
|
649
|
+
def version(subpatch=False):
|
|
650
|
+
"""
|
|
651
|
+
Return a tuple of integers `(major, minor, patch)` describing the LMDB
|
|
652
|
+
library version that the binding is linked against. The version of the
|
|
653
|
+
binding itself is available from ``lmdb.__version__``.
|
|
654
|
+
|
|
655
|
+
`subpatch`:
|
|
656
|
+
If true, returns a 4 integer tuple consisting of the same plus
|
|
657
|
+
an extra integer that represents any patches applied by py-lmdb
|
|
658
|
+
itself (0 representing no patches).
|
|
659
|
+
|
|
660
|
+
"""
|
|
661
|
+
if subpatch:
|
|
662
|
+
return (
|
|
663
|
+
_lib.MDB_VERSION_MAJOR,
|
|
664
|
+
_lib.MDB_VERSION_MINOR,
|
|
665
|
+
_lib.MDB_VERSION_PATCH,
|
|
666
|
+
1 if _have_patched_lmdb else 0,
|
|
667
|
+
)
|
|
668
|
+
|
|
669
|
+
return (_lib.MDB_VERSION_MAJOR, _lib.MDB_VERSION_MINOR, _lib.MDB_VERSION_PATCH)
|
|
670
|
+
|
|
671
|
+
|
|
672
|
+
class Environment(object):
|
|
673
|
+
"""
|
|
674
|
+
Structure for a database environment. An environment may contain multiple
|
|
675
|
+
databases, all residing in the same shared-memory map and underlying disk
|
|
676
|
+
file.
|
|
677
|
+
|
|
678
|
+
To write to the environment a :py:class:`Transaction` must be created. One
|
|
679
|
+
simultaneous write transaction is allowed, however there is no limit on the
|
|
680
|
+
number of read transactions even when a write transaction exists.
|
|
681
|
+
|
|
682
|
+
This class is aliased to `lmdb.open`.
|
|
683
|
+
|
|
684
|
+
It is a serious error to have open the same LMDB file in the same process at
|
|
685
|
+
the same time. Failure to heed this may lead to data corruption and
|
|
686
|
+
interpreter crash.
|
|
687
|
+
|
|
688
|
+
Equivalent to `mdb_env_open()
|
|
689
|
+
<http://lmdb.tech/doc/group__mdb.html#ga1fe2740e25b1689dc412e7b9faadba1b>`_
|
|
690
|
+
|
|
691
|
+
`path`:
|
|
692
|
+
Location of directory (if `subdir=True`) or file prefix to store
|
|
693
|
+
the database.
|
|
694
|
+
|
|
695
|
+
`map_size`:
|
|
696
|
+
Maximum size database may grow to; used to size the memory mapping.
|
|
697
|
+
If database grows larger than ``map_size``, an exception will be
|
|
698
|
+
raised and the user must close and reopen :py:class:`Environment`.
|
|
699
|
+
On 64-bit there is no penalty for making this huge (say 1TB). Must
|
|
700
|
+
be <2GB on 32-bit.
|
|
701
|
+
|
|
702
|
+
.. note::
|
|
703
|
+
|
|
704
|
+
**The default map size is set low to encourage a crash**, so
|
|
705
|
+
users can figure out a good value before learning about this
|
|
706
|
+
option too late.
|
|
707
|
+
|
|
708
|
+
`subdir`:
|
|
709
|
+
If ``True``, `path` refers to a subdirectory to store the data and
|
|
710
|
+
lock files in, otherwise it refers to a filename prefix.
|
|
711
|
+
|
|
712
|
+
`readonly`:
|
|
713
|
+
If ``True``, disallow any write operations. Note the lock file is
|
|
714
|
+
still modified. If specified, the ``write`` flag to
|
|
715
|
+
:py:meth:`begin` or :py:class:`Transaction` is ignored.
|
|
716
|
+
|
|
717
|
+
`metasync`:
|
|
718
|
+
If ``False``, flush system buffers to disk only once per
|
|
719
|
+
transaction, omit the metadata flush. Defer that until the system
|
|
720
|
+
flushes files to disk, or next commit or :py:meth:`sync`.
|
|
721
|
+
|
|
722
|
+
This optimization maintains database integrity, but a system crash
|
|
723
|
+
may undo the last committed transaction. I.e. it preserves the ACI
|
|
724
|
+
(atomicity, consistency, isolation) but not D (durability) database
|
|
725
|
+
property.
|
|
726
|
+
|
|
727
|
+
`sync`:
|
|
728
|
+
If ``False``, don't flush system buffers to disk when committing a
|
|
729
|
+
transaction. This optimization means a system crash can corrupt the
|
|
730
|
+
database or lose the last transactions if buffers are not yet
|
|
731
|
+
flushed to disk.
|
|
732
|
+
|
|
733
|
+
The risk is governed by how often the system flushes dirty buffers
|
|
734
|
+
to disk and how often :py:meth:`sync` is called. However, if the
|
|
735
|
+
filesystem preserves write order and `writemap=False`, transactions
|
|
736
|
+
exhibit ACI (atomicity, consistency, isolation) properties and only
|
|
737
|
+
lose D (durability). I.e. database integrity is maintained, but a
|
|
738
|
+
system crash may undo the final transactions.
|
|
739
|
+
|
|
740
|
+
Note that `sync=False, writemap=True` leaves the system with no
|
|
741
|
+
hint for when to write transactions to disk, unless :py:meth:`sync`
|
|
742
|
+
is called. `map_async=True, writemap=True` may be preferable.
|
|
743
|
+
|
|
744
|
+
`mode`:
|
|
745
|
+
File creation mode.
|
|
746
|
+
|
|
747
|
+
`create`:
|
|
748
|
+
If ``False``, do not create the directory `path` if it is missing.
|
|
749
|
+
|
|
750
|
+
`readahead`:
|
|
751
|
+
If ``False``, LMDB will disable the OS filesystem readahead
|
|
752
|
+
mechanism, which may improve random read performance when a
|
|
753
|
+
database is larger than RAM.
|
|
754
|
+
|
|
755
|
+
`writemap`:
|
|
756
|
+
If ``True``, use a writeable memory map unless `readonly=True`.
|
|
757
|
+
This is faster and uses fewer mallocs, but loses protection from
|
|
758
|
+
application bugs like wild pointer writes and other bad updates
|
|
759
|
+
into the database. Incompatible with nested transactions.
|
|
760
|
+
|
|
761
|
+
Processes with and without `writemap` on the same environment do
|
|
762
|
+
not cooperate well.
|
|
763
|
+
|
|
764
|
+
`meminit`:
|
|
765
|
+
If ``False`` LMDB will not zero-initialize buffers prior to writing
|
|
766
|
+
them to disk. This improves performance but may cause old heap data
|
|
767
|
+
to be written saved in the unused portion of the buffer. Do not use
|
|
768
|
+
this option if your application manipulates confidential data (e.g.
|
|
769
|
+
plaintext passwords) in memory. This option is only meaningful when
|
|
770
|
+
`writemap=False`; new pages are always zero-initialized when
|
|
771
|
+
`writemap=True`.
|
|
772
|
+
|
|
773
|
+
`map_async`:
|
|
774
|
+
When ``writemap=True``, use asynchronous flushes to disk. As with
|
|
775
|
+
``sync=False``, a system crash can then corrupt the database or
|
|
776
|
+
lose the last transactions. Calling :py:meth:`sync` ensures
|
|
777
|
+
on-disk database integrity until next commit.
|
|
778
|
+
|
|
779
|
+
`max_readers`:
|
|
780
|
+
Maximum number of simultaneous read transactions. Can only be set
|
|
781
|
+
by the first process to open an environment, as it affects the size
|
|
782
|
+
of the lock file and shared memory area. Attempts to simultaneously
|
|
783
|
+
start more than this many *read* transactions will fail.
|
|
784
|
+
|
|
785
|
+
`max_dbs`:
|
|
786
|
+
Maximum number of databases available. If 0, assume environment
|
|
787
|
+
will be used as a single database.
|
|
788
|
+
|
|
789
|
+
`max_spare_txns`:
|
|
790
|
+
Read-only transactions to cache after becoming unused. Caching
|
|
791
|
+
transactions avoids two allocations, one lock and linear scan
|
|
792
|
+
of the shared environment per invocation of :py:meth:`begin`,
|
|
793
|
+
:py:class:`Transaction`, :py:meth:`get`, :py:meth:`gets`, or
|
|
794
|
+
:py:meth:`cursor`. Should match the process's maximum expected
|
|
795
|
+
concurrent transactions (e.g. thread count).
|
|
796
|
+
|
|
797
|
+
`lock`:
|
|
798
|
+
If ``False``, don't do any locking. If concurrent access is
|
|
799
|
+
anticipated, the caller must manage all concurrency itself. For
|
|
800
|
+
proper operation the caller must enforce single-writer semantics,
|
|
801
|
+
and must ensure that no readers are using old transactions while a
|
|
802
|
+
writer is active. The simplest approach is to use an exclusive lock
|
|
803
|
+
so that no readers may be active at all when a writer begins.
|
|
804
|
+
"""
|
|
805
|
+
|
|
806
|
+
def __init__(
|
|
807
|
+
self,
|
|
808
|
+
path,
|
|
809
|
+
map_size=10485760,
|
|
810
|
+
subdir=True,
|
|
811
|
+
readonly=False,
|
|
812
|
+
metasync=True,
|
|
813
|
+
sync=True,
|
|
814
|
+
map_async=False,
|
|
815
|
+
mode=O_0755,
|
|
816
|
+
create=True,
|
|
817
|
+
readahead=True,
|
|
818
|
+
writemap=False,
|
|
819
|
+
meminit=True,
|
|
820
|
+
max_readers=126,
|
|
821
|
+
max_dbs=0,
|
|
822
|
+
max_spare_txns=1,
|
|
823
|
+
lock=True,
|
|
824
|
+
):
|
|
825
|
+
self._max_spare_txns = max_spare_txns
|
|
826
|
+
self._spare_txns = []
|
|
827
|
+
|
|
828
|
+
envpp = _ffi.new("MDB_env **")
|
|
829
|
+
|
|
830
|
+
rc = _lib.mdb_env_create(envpp)
|
|
831
|
+
if rc:
|
|
832
|
+
raise _error("mdb_env_create", rc)
|
|
833
|
+
self._env = envpp[0]
|
|
834
|
+
self._deps = set()
|
|
835
|
+
self._creating_db_in_readonly = False
|
|
836
|
+
|
|
837
|
+
self.set_mapsize(map_size)
|
|
838
|
+
|
|
839
|
+
rc = _lib.mdb_env_set_maxreaders(self._env, max_readers)
|
|
840
|
+
if rc:
|
|
841
|
+
raise _error("mdb_env_set_maxreaders", rc)
|
|
842
|
+
|
|
843
|
+
rc = _lib.mdb_env_set_maxdbs(self._env, max_dbs)
|
|
844
|
+
if rc:
|
|
845
|
+
raise _error("mdb_env_set_maxdbs", rc)
|
|
846
|
+
|
|
847
|
+
if create and subdir and not readonly:
|
|
848
|
+
try:
|
|
849
|
+
os.mkdir(path, mode)
|
|
850
|
+
except EnvironmentError as e:
|
|
851
|
+
if e.errno != errno.EEXIST:
|
|
852
|
+
raise
|
|
853
|
+
|
|
854
|
+
flags = _lib.MDB_NOTLS
|
|
855
|
+
if not subdir:
|
|
856
|
+
flags |= _lib.MDB_NOSUBDIR
|
|
857
|
+
if readonly:
|
|
858
|
+
flags |= _lib.MDB_RDONLY
|
|
859
|
+
self.readonly = readonly
|
|
860
|
+
if not metasync:
|
|
861
|
+
flags |= _lib.MDB_NOMETASYNC
|
|
862
|
+
if not sync:
|
|
863
|
+
flags |= _lib.MDB_NOSYNC
|
|
864
|
+
if map_async:
|
|
865
|
+
flags |= _lib.MDB_MAPASYNC
|
|
866
|
+
if not readahead:
|
|
867
|
+
flags |= _lib.MDB_NORDAHEAD
|
|
868
|
+
if writemap:
|
|
869
|
+
flags |= _lib.MDB_WRITEMAP
|
|
870
|
+
if not meminit:
|
|
871
|
+
flags |= _lib.MDB_NOMEMINIT
|
|
872
|
+
if not lock:
|
|
873
|
+
flags |= _lib.MDB_NOLOCK
|
|
874
|
+
|
|
875
|
+
if isinstance(path, UnicodeType):
|
|
876
|
+
path = path.encode(sys.getfilesystemencoding())
|
|
877
|
+
|
|
878
|
+
rc = _lib.mdb_env_open(self._env, path, flags, mode & ~O_0111)
|
|
879
|
+
if rc:
|
|
880
|
+
raise _error(path, rc)
|
|
881
|
+
|
|
882
|
+
with self.begin(db=object()) as txn:
|
|
883
|
+
self._db = _Database(
|
|
884
|
+
env=self,
|
|
885
|
+
txn=txn,
|
|
886
|
+
name=None,
|
|
887
|
+
reverse_key=False,
|
|
888
|
+
dupsort=False,
|
|
889
|
+
create=True,
|
|
890
|
+
integerkey=False,
|
|
891
|
+
integerdup=False,
|
|
892
|
+
dupfixed=False,
|
|
893
|
+
)
|
|
894
|
+
|
|
895
|
+
self._dbs = {None: self._db}
|
|
896
|
+
|
|
897
|
+
def __enter__(self):
|
|
898
|
+
return self
|
|
899
|
+
|
|
900
|
+
def __exit__(self, _1, _2, _3):
|
|
901
|
+
self.close()
|
|
902
|
+
|
|
903
|
+
def __del__(self):
|
|
904
|
+
self.close()
|
|
905
|
+
|
|
906
|
+
_env = None
|
|
907
|
+
_deps = None
|
|
908
|
+
_spare_txns = None
|
|
909
|
+
_dbs = None
|
|
910
|
+
|
|
911
|
+
def set_mapsize(self, map_size):
|
|
912
|
+
"""Change the maximum size of the map file. This function will fail if
|
|
913
|
+
any transactions are active in the current process.
|
|
914
|
+
|
|
915
|
+
`map_size`:
|
|
916
|
+
The new size in bytes.
|
|
917
|
+
|
|
918
|
+
Equivalent to `mdb_env_set_mapsize()
|
|
919
|
+
<http://lmdb.tech/doc/group__mdb.html#gaa2506ec8dab3d969b0e609cd82e619e5>`_
|
|
920
|
+
|
|
921
|
+
Warning:
|
|
922
|
+
There's a data race in the underlying library that may cause
|
|
923
|
+
catastrophic loss of data if you use this method.
|
|
924
|
+
|
|
925
|
+
You are safe if one of the following are true:
|
|
926
|
+
* Only one process accessing a particular LMDB file ever calls
|
|
927
|
+
this method.
|
|
928
|
+
|
|
929
|
+
* You use locking external to this library to ensure that only one
|
|
930
|
+
process accessing the current LMDB file can be inside this function.
|
|
931
|
+
"""
|
|
932
|
+
rc = _lib.mdb_env_set_mapsize(self._env, map_size)
|
|
933
|
+
if rc:
|
|
934
|
+
raise _error("mdb_env_set_mapsize", rc)
|
|
935
|
+
|
|
936
|
+
def close(self):
|
|
937
|
+
"""Close the environment, invalidating any open iterators, cursors, and
|
|
938
|
+
transactions. Repeat calls to :py:meth:`close` have no effect.
|
|
939
|
+
|
|
940
|
+
Equivalent to `mdb_env_close()
|
|
941
|
+
<http://lmdb.tech/doc/group__mdb.html#ga4366c43ada8874588b6a62fbda2d1e95>`_
|
|
942
|
+
"""
|
|
943
|
+
if self._env:
|
|
944
|
+
if self._deps:
|
|
945
|
+
while self._deps:
|
|
946
|
+
self._deps.pop()._invalidate()
|
|
947
|
+
self._deps = None
|
|
948
|
+
|
|
949
|
+
if self._spare_txns:
|
|
950
|
+
while self._spare_txns:
|
|
951
|
+
_lib.mdb_txn_abort(self._spare_txns.pop())
|
|
952
|
+
self._spare_txns = None
|
|
953
|
+
|
|
954
|
+
if self._dbs:
|
|
955
|
+
self._dbs.clear()
|
|
956
|
+
self._dbs = None
|
|
957
|
+
self._db = None
|
|
958
|
+
|
|
959
|
+
_lib.mdb_env_close(self._env)
|
|
960
|
+
self._env = _invalid
|
|
961
|
+
|
|
962
|
+
def path(self):
|
|
963
|
+
"""Directory path or file name prefix where this environment is
|
|
964
|
+
stored.
|
|
965
|
+
|
|
966
|
+
Equivalent to `mdb_env_get_path()
|
|
967
|
+
<http://lmdb.tech/doc/group__mdb.html#gac699fdd8c4f8013577cb933fb6a757fe>`_
|
|
968
|
+
"""
|
|
969
|
+
path = _ffi.new("char **")
|
|
970
|
+
rc = _lib.mdb_env_get_path(self._env, path)
|
|
971
|
+
if rc:
|
|
972
|
+
raise _error("mdb_env_get_path", rc)
|
|
973
|
+
return _ffi.string(path[0]).decode(sys.getfilesystemencoding())
|
|
974
|
+
|
|
975
|
+
def copy(self, path, compact=False, txn=None):
|
|
976
|
+
"""Make a consistent copy of the environment in the given destination
|
|
977
|
+
directory.
|
|
978
|
+
|
|
979
|
+
`compact`:
|
|
980
|
+
If ``True``, perform compaction while copying: omit free pages and
|
|
981
|
+
sequentially renumber all pages in output. This option consumes
|
|
982
|
+
more CPU and runs more slowly than the default, but may produce a
|
|
983
|
+
smaller output database.
|
|
984
|
+
|
|
985
|
+
`txn`:
|
|
986
|
+
If provided, the backup will be taken from the database with
|
|
987
|
+
respect to that transaction, otherwise a temporary read-only
|
|
988
|
+
transaction will be created. Note: this parameter being non-None
|
|
989
|
+
is not available if the module was built with LMDB_PURE. Note:
|
|
990
|
+
this parameter may be set only if compact=True.
|
|
991
|
+
|
|
992
|
+
Equivalent to `mdb_env_copy2() or mdb_env_copy3()
|
|
993
|
+
<http://lmdb.tech/doc/group__mdb.html#ga5d51d6130325f7353db0955dbedbc378>`_
|
|
994
|
+
"""
|
|
995
|
+
flags = _lib.MDB_CP_COMPACT if compact else 0
|
|
996
|
+
if txn and not _have_patched_lmdb:
|
|
997
|
+
raise TypeError(
|
|
998
|
+
"Non-patched LMDB doesn't support transaction with env.copy"
|
|
999
|
+
)
|
|
1000
|
+
|
|
1001
|
+
if txn and not flags:
|
|
1002
|
+
raise TypeError("txn argument only compatible with compact=True")
|
|
1003
|
+
|
|
1004
|
+
encoded = path.encode(sys.getfilesystemencoding())
|
|
1005
|
+
if _have_patched_lmdb:
|
|
1006
|
+
rc = _lib.mdb_env_copy3(
|
|
1007
|
+
self._env, encoded, flags, txn._txn if txn else _ffi.NULL
|
|
1008
|
+
)
|
|
1009
|
+
if rc:
|
|
1010
|
+
raise _error("mdb_env_copy3", rc)
|
|
1011
|
+
else:
|
|
1012
|
+
rc = _lib.mdb_env_copy2(self._env, encoded, flags)
|
|
1013
|
+
if rc:
|
|
1014
|
+
raise _error("mdb_env_copy2", rc)
|
|
1015
|
+
|
|
1016
|
+
def copyfd(self, fd, compact=False, txn=None):
|
|
1017
|
+
"""Copy a consistent version of the environment to file descriptor
|
|
1018
|
+
`fd`.
|
|
1019
|
+
|
|
1020
|
+
`compact`:
|
|
1021
|
+
If ``True``, perform compaction while copying: omit free pages and
|
|
1022
|
+
sequentially renumber all pages in output. This option consumes
|
|
1023
|
+
more CPU and runs more slowly than the default, but may produce a
|
|
1024
|
+
smaller output database.
|
|
1025
|
+
|
|
1026
|
+
`txn`:
|
|
1027
|
+
If provided, the backup will be taken from the database with
|
|
1028
|
+
respect to that transaction, otherwise a temporary read-only
|
|
1029
|
+
transaction will be created. Note: this parameter being non-None
|
|
1030
|
+
is not available if the module was built with LMDB_PURE.
|
|
1031
|
+
|
|
1032
|
+
Equivalent to `mdb_env_copyfd2() or mdb_env_copyfd3
|
|
1033
|
+
<http://lmdb.tech/doc/group__mdb.html#ga5d51d6130325f7353db0955dbedbc378>`_
|
|
1034
|
+
"""
|
|
1035
|
+
if txn and not _have_patched_lmdb:
|
|
1036
|
+
raise TypeError(
|
|
1037
|
+
"Non-patched LMDB doesn't support transaction with env.copy"
|
|
1038
|
+
)
|
|
1039
|
+
if is_win32:
|
|
1040
|
+
# Convert C library handle to kernel handle.
|
|
1041
|
+
fd = msvcrt.get_osfhandle(fd)
|
|
1042
|
+
flags = _lib.MDB_CP_COMPACT if compact else 0
|
|
1043
|
+
|
|
1044
|
+
if txn and not flags:
|
|
1045
|
+
raise TypeError("txn argument only compatible with compact=True")
|
|
1046
|
+
|
|
1047
|
+
if _have_patched_lmdb:
|
|
1048
|
+
rc = _lib.mdb_env_copyfd3(
|
|
1049
|
+
self._env, fd, flags, txn._txn if txn else _ffi.NULL
|
|
1050
|
+
)
|
|
1051
|
+
if rc:
|
|
1052
|
+
raise _error("mdb_env_copyfd3", rc)
|
|
1053
|
+
else:
|
|
1054
|
+
rc = _lib.mdb_env_copyfd2(self._env, fd, flags)
|
|
1055
|
+
if rc:
|
|
1056
|
+
raise _error("mdb_env_copyfd2", rc)
|
|
1057
|
+
|
|
1058
|
+
def sync(self, force=False):
|
|
1059
|
+
"""Flush the data buffers to disk.
|
|
1060
|
+
|
|
1061
|
+
Equivalent to `mdb_env_sync()
|
|
1062
|
+
<http://lmdb.tech/doc/group__mdb.html#ga85e61f05aa68b520cc6c3b981dba5037>`_
|
|
1063
|
+
|
|
1064
|
+
Data is always written to disk when :py:meth:`Transaction.commit` is
|
|
1065
|
+
called, but the operating system may keep it buffered. MDB always
|
|
1066
|
+
flushes the OS buffers upon commit as well, unless the environment was
|
|
1067
|
+
opened with `sync=False` or `metasync=False`.
|
|
1068
|
+
|
|
1069
|
+
`force`:
|
|
1070
|
+
If ``True``, force a synchronous flush. Otherwise if the
|
|
1071
|
+
environment was opened with `sync=False` the flushes will be
|
|
1072
|
+
omitted, and with `map_async=True` they will be asynchronous.
|
|
1073
|
+
"""
|
|
1074
|
+
rc = _lib.mdb_env_sync(self._env, force)
|
|
1075
|
+
if rc:
|
|
1076
|
+
raise _error("mdb_env_sync", rc)
|
|
1077
|
+
|
|
1078
|
+
def _convert_stat(self, st):
|
|
1079
|
+
"""Convert a MDB_stat to a dict."""
|
|
1080
|
+
return {
|
|
1081
|
+
"psize": st.ms_psize,
|
|
1082
|
+
"depth": st.ms_depth,
|
|
1083
|
+
"branch_pages": st.ms_branch_pages,
|
|
1084
|
+
"leaf_pages": st.ms_leaf_pages,
|
|
1085
|
+
"overflow_pages": st.ms_overflow_pages,
|
|
1086
|
+
"entries": st.ms_entries,
|
|
1087
|
+
}
|
|
1088
|
+
|
|
1089
|
+
def stat(self):
|
|
1090
|
+
"""stat()
|
|
1091
|
+
|
|
1092
|
+
Return some environment statistics for the default database as a dict:
|
|
1093
|
+
|
|
1094
|
+
+--------------------+---------------------------------------+
|
|
1095
|
+
| ``psize`` | Size of a database page in bytes. |
|
|
1096
|
+
+--------------------+---------------------------------------+
|
|
1097
|
+
| ``depth`` | Height of the B-tree. |
|
|
1098
|
+
+--------------------+---------------------------------------+
|
|
1099
|
+
| ``branch_pages`` | Number of internal (non-leaf) pages. |
|
|
1100
|
+
+--------------------+---------------------------------------+
|
|
1101
|
+
| ``leaf_pages`` | Number of leaf pages. |
|
|
1102
|
+
+--------------------+---------------------------------------+
|
|
1103
|
+
| ``overflow_pages`` | Number of overflow pages. |
|
|
1104
|
+
+--------------------+---------------------------------------+
|
|
1105
|
+
| ``entries`` | Number of data items. |
|
|
1106
|
+
+--------------------+---------------------------------------+
|
|
1107
|
+
|
|
1108
|
+
Equivalent to `mdb_env_stat()
|
|
1109
|
+
<http://lmdb.tech/doc/group__mdb.html#gaf881dca452050efbd434cd16e4bae255>`_
|
|
1110
|
+
"""
|
|
1111
|
+
st = _ffi.new("MDB_stat *")
|
|
1112
|
+
rc = _lib.mdb_env_stat(self._env, st)
|
|
1113
|
+
if rc:
|
|
1114
|
+
raise _error("mdb_env_stat", rc)
|
|
1115
|
+
return self._convert_stat(st)
|
|
1116
|
+
|
|
1117
|
+
def info(self):
|
|
1118
|
+
"""Return some nice environment information as a dict:
|
|
1119
|
+
|
|
1120
|
+
+--------------------+---------------------------------------------+
|
|
1121
|
+
| ``map_addr`` | Address of database map in RAM. |
|
|
1122
|
+
+--------------------+---------------------------------------------+
|
|
1123
|
+
| ``map_size`` | Size of database map in RAM. |
|
|
1124
|
+
+--------------------+---------------------------------------------+
|
|
1125
|
+
| ``last_pgno`` | ID of last used page. |
|
|
1126
|
+
+--------------------+---------------------------------------------+
|
|
1127
|
+
| ``last_txnid`` | ID of last committed transaction. |
|
|
1128
|
+
+--------------------+---------------------------------------------+
|
|
1129
|
+
| ``max_readers`` | Number of reader slots allocated in the |
|
|
1130
|
+
| | lock file. Equivalent to the value of |
|
|
1131
|
+
| | `maxreaders=` specified by the first |
|
|
1132
|
+
| | process opening the Environment. |
|
|
1133
|
+
+--------------------+---------------------------------------------+
|
|
1134
|
+
| ``num_readers`` | Maximum number of reader slots in |
|
|
1135
|
+
| | simultaneous use since the lock file was |
|
|
1136
|
+
| | initialized. |
|
|
1137
|
+
+--------------------+---------------------------------------------+
|
|
1138
|
+
|
|
1139
|
+
Equivalent to `mdb_env_info()
|
|
1140
|
+
<http://lmdb.tech/doc/group__mdb.html#ga18769362c7e7d6cf91889a028a5c5947>`_
|
|
1141
|
+
"""
|
|
1142
|
+
info = _ffi.new("MDB_envinfo *")
|
|
1143
|
+
rc = _lib.mdb_env_info(self._env, info)
|
|
1144
|
+
if rc:
|
|
1145
|
+
raise _error("mdb_env_info", rc)
|
|
1146
|
+
return {
|
|
1147
|
+
"map_addr": int(_ffi.cast("long", info.me_mapaddr)),
|
|
1148
|
+
"map_size": info.me_mapsize,
|
|
1149
|
+
"last_pgno": info.me_last_pgno,
|
|
1150
|
+
"last_txnid": info.me_last_txnid,
|
|
1151
|
+
"max_readers": info.me_maxreaders,
|
|
1152
|
+
"num_readers": info.me_numreaders,
|
|
1153
|
+
}
|
|
1154
|
+
|
|
1155
|
+
def flags(self):
|
|
1156
|
+
"""Return a dict describing Environment constructor flags used to
|
|
1157
|
+
instantiate this environment."""
|
|
1158
|
+
flags_ = _ffi.new("unsigned int[]", 1)
|
|
1159
|
+
rc = _lib.mdb_env_get_flags(self._env, flags_)
|
|
1160
|
+
if rc:
|
|
1161
|
+
raise _error("mdb_env_get_flags", rc)
|
|
1162
|
+
flags = flags_[0]
|
|
1163
|
+
return {
|
|
1164
|
+
"subdir": not (flags & _lib.MDB_NOSUBDIR),
|
|
1165
|
+
"readonly": bool(flags & _lib.MDB_RDONLY),
|
|
1166
|
+
"metasync": not (flags & _lib.MDB_NOMETASYNC),
|
|
1167
|
+
"sync": not (flags & _lib.MDB_NOSYNC),
|
|
1168
|
+
"map_async": bool(flags & _lib.MDB_MAPASYNC),
|
|
1169
|
+
"readahead": not (flags & _lib.MDB_NORDAHEAD),
|
|
1170
|
+
"writemap": bool(flags & _lib.MDB_WRITEMAP),
|
|
1171
|
+
"meminit": not (flags & _lib.MDB_NOMEMINIT),
|
|
1172
|
+
"lock": not (flags & _lib.MDB_NOLOCK),
|
|
1173
|
+
}
|
|
1174
|
+
|
|
1175
|
+
def max_key_size(self):
|
|
1176
|
+
"""Return the maximum size in bytes of a record's key part. This
|
|
1177
|
+
matches the ``MDB_MAXKEYSIZE`` constant set at compile time."""
|
|
1178
|
+
return _lib.mdb_env_get_maxkeysize(self._env)
|
|
1179
|
+
|
|
1180
|
+
def max_readers(self):
|
|
1181
|
+
"""Return the maximum number of readers specified during open of the
|
|
1182
|
+
environment by the first process. This is the same as `max_readers=`
|
|
1183
|
+
specified to the constructor if this process was the first to open the
|
|
1184
|
+
environment."""
|
|
1185
|
+
readers_ = _ffi.new("unsigned int[]", 1)
|
|
1186
|
+
rc = _lib.mdb_env_get_maxreaders(self._env, readers_)
|
|
1187
|
+
if rc:
|
|
1188
|
+
raise _error("mdb_env_get_maxreaders", rc)
|
|
1189
|
+
return readers_[0]
|
|
1190
|
+
|
|
1191
|
+
def readers(self):
|
|
1192
|
+
"""Return a multi line Unicode string describing the current state of
|
|
1193
|
+
the reader lock table."""
|
|
1194
|
+
_callbacks.msg_func = []
|
|
1195
|
+
try:
|
|
1196
|
+
rc = _lib.mdb_reader_list(self._env, _msg_func, _ffi.NULL)
|
|
1197
|
+
if rc:
|
|
1198
|
+
raise _error("mdb_reader_list", rc)
|
|
1199
|
+
return UnicodeType().join(_callbacks.msg_func)
|
|
1200
|
+
finally:
|
|
1201
|
+
del _callbacks.msg_func
|
|
1202
|
+
|
|
1203
|
+
def reader_check(self):
|
|
1204
|
+
"""Search the reader lock table for stale entries, for example due to a
|
|
1205
|
+
crashed process. Returns the number of stale entries that were cleared.
|
|
1206
|
+
"""
|
|
1207
|
+
reaped = _ffi.new("int[]", 1)
|
|
1208
|
+
rc = _lib.mdb_reader_check(self._env, reaped)
|
|
1209
|
+
if rc:
|
|
1210
|
+
raise _error("mdb_reader_check", rc)
|
|
1211
|
+
return reaped[0]
|
|
1212
|
+
|
|
1213
|
+
def open_db(
|
|
1214
|
+
self,
|
|
1215
|
+
key=None,
|
|
1216
|
+
txn=None,
|
|
1217
|
+
reverse_key=False,
|
|
1218
|
+
dupsort=False,
|
|
1219
|
+
create=True,
|
|
1220
|
+
integerkey=False,
|
|
1221
|
+
integerdup=False,
|
|
1222
|
+
dupfixed=False,
|
|
1223
|
+
):
|
|
1224
|
+
"""
|
|
1225
|
+
Open a database, returning an instance of :py:class:`_Database`. Repeat
|
|
1226
|
+
:py:meth:`Environment.open_db` calls for the same name will return the
|
|
1227
|
+
same handle. As a special case, the main database is always open.
|
|
1228
|
+
|
|
1229
|
+
Equivalent to `mdb_dbi_open()
|
|
1230
|
+
<http://lmdb.tech/doc/group__mdb.html#gac08cad5b096925642ca359a6d6f0562a>`_
|
|
1231
|
+
|
|
1232
|
+
Named databases are implemented by *storing a special descriptor in the
|
|
1233
|
+
main database*. All databases in an environment *share the same file*.
|
|
1234
|
+
Because the descriptor is present in the main database, attempts to
|
|
1235
|
+
create a named database will fail if a key matching the database's name
|
|
1236
|
+
already exists. Furthermore *the key is visible to lookups and
|
|
1237
|
+
enumerations*. If your main database keyspace conflicts with the names
|
|
1238
|
+
you use for named databases, then move the contents of your main
|
|
1239
|
+
database to another named database.
|
|
1240
|
+
|
|
1241
|
+
::
|
|
1242
|
+
|
|
1243
|
+
>>> env = lmdb.open('/tmp/test', max_dbs=2)
|
|
1244
|
+
>>> with env.begin(write=True) as txn:
|
|
1245
|
+
... txn.put('somename', 'somedata')
|
|
1246
|
+
|
|
1247
|
+
>>> # Error: database cannot share name of existing key!
|
|
1248
|
+
>>> subdb = env.open_db('somename')
|
|
1249
|
+
|
|
1250
|
+
A newly created database will not exist if the transaction that created
|
|
1251
|
+
it aborted, nor if another process deleted it. The handle resides in
|
|
1252
|
+
the shared environment, it is not owned by the current transaction or
|
|
1253
|
+
process. Only one thread should call this function; it is not
|
|
1254
|
+
mutex-protected in a read-only transaction.
|
|
1255
|
+
|
|
1256
|
+
The `dupsort`, `integerkey`, `integerdup`, and `dupfixed` parameters are
|
|
1257
|
+
ignored if the database already exists. The state of those settings are
|
|
1258
|
+
persistent and immutable per database. See :py:meth:`_Database.flags`
|
|
1259
|
+
to view the state of those options for an opened database. A consequence
|
|
1260
|
+
of the immutability of these flags is that the default non-named database
|
|
1261
|
+
will never have these flags set.
|
|
1262
|
+
|
|
1263
|
+
Preexisting transactions, other than the current transaction and any
|
|
1264
|
+
parents, must not use the new handle, nor must their children.
|
|
1265
|
+
|
|
1266
|
+
`key`:
|
|
1267
|
+
Bytestring database name. If ``None``, indicates the main
|
|
1268
|
+
database should be returned, otherwise indicates a named
|
|
1269
|
+
database should be created inside the main database.
|
|
1270
|
+
|
|
1271
|
+
In other words, *a key representing the database will be
|
|
1272
|
+
visible in the main database, and the database name cannot
|
|
1273
|
+
conflict with any existing key.*
|
|
1274
|
+
|
|
1275
|
+
`txn`:
|
|
1276
|
+
Transaction used to create the database if it does not exist.
|
|
1277
|
+
If unspecified, a temporarily write transaction is used. Do not
|
|
1278
|
+
call :py:meth:`open_db` from inside an existing transaction
|
|
1279
|
+
without supplying it here. Note the passed transaction must
|
|
1280
|
+
have `write=True`.
|
|
1281
|
+
|
|
1282
|
+
`reverse_key`:
|
|
1283
|
+
If ``True``, keys are compared from right to left (e.g. DNS
|
|
1284
|
+
names).
|
|
1285
|
+
|
|
1286
|
+
`dupsort`:
|
|
1287
|
+
Duplicate keys may be used in the database. (Or, from another
|
|
1288
|
+
perspective, keys may have multiple data items, stored in
|
|
1289
|
+
sorted order.) By default keys must be unique and may have only
|
|
1290
|
+
a single data item.
|
|
1291
|
+
|
|
1292
|
+
`create`:
|
|
1293
|
+
If ``True``, create the database if it doesn't exist, otherwise
|
|
1294
|
+
raise an exception.
|
|
1295
|
+
|
|
1296
|
+
`integerkey`:
|
|
1297
|
+
If ``True``, indicates keys in the database are C unsigned
|
|
1298
|
+
or ``size_t`` integers encoded in native byte order. Keys must
|
|
1299
|
+
all be either unsigned or ``size_t``, they cannot be mixed in a
|
|
1300
|
+
single database.
|
|
1301
|
+
|
|
1302
|
+
`integerdup`:
|
|
1303
|
+
If ``True``, values in the
|
|
1304
|
+
database are C unsigned or ``size_t`` integers encoded in
|
|
1305
|
+
native byte order. Implies `dupsort` and `dupfixed` are
|
|
1306
|
+
``True``.
|
|
1307
|
+
|
|
1308
|
+
`dupfixed`:
|
|
1309
|
+
If ``True``, values for each key
|
|
1310
|
+
in database are of fixed size, allowing each additional
|
|
1311
|
+
duplicate value for a key to be stored without a header
|
|
1312
|
+
indicating its size. Implies `dupsort` is ``True``.
|
|
1313
|
+
"""
|
|
1314
|
+
if isinstance(key, UnicodeType):
|
|
1315
|
+
raise TypeError("key must be bytes")
|
|
1316
|
+
|
|
1317
|
+
if key is None and (
|
|
1318
|
+
reverse_key or dupsort or integerkey or integerdup or dupfixed
|
|
1319
|
+
):
|
|
1320
|
+
raise ValueError("May not set flags on the main database")
|
|
1321
|
+
|
|
1322
|
+
db = self._dbs.get(key)
|
|
1323
|
+
if db:
|
|
1324
|
+
return db
|
|
1325
|
+
|
|
1326
|
+
if integerdup:
|
|
1327
|
+
dupfixed = True
|
|
1328
|
+
|
|
1329
|
+
if dupfixed:
|
|
1330
|
+
dupsort = True
|
|
1331
|
+
|
|
1332
|
+
if txn:
|
|
1333
|
+
db = _Database(
|
|
1334
|
+
self,
|
|
1335
|
+
txn,
|
|
1336
|
+
key,
|
|
1337
|
+
reverse_key,
|
|
1338
|
+
dupsort,
|
|
1339
|
+
create,
|
|
1340
|
+
integerkey,
|
|
1341
|
+
integerdup,
|
|
1342
|
+
dupfixed,
|
|
1343
|
+
)
|
|
1344
|
+
else:
|
|
1345
|
+
try:
|
|
1346
|
+
self._creating_db_in_readonly = True
|
|
1347
|
+
with self.begin(write=not self.readonly) as txn:
|
|
1348
|
+
db = _Database(
|
|
1349
|
+
self,
|
|
1350
|
+
txn,
|
|
1351
|
+
key,
|
|
1352
|
+
reverse_key,
|
|
1353
|
+
dupsort,
|
|
1354
|
+
create,
|
|
1355
|
+
integerkey,
|
|
1356
|
+
integerdup,
|
|
1357
|
+
dupfixed,
|
|
1358
|
+
)
|
|
1359
|
+
finally:
|
|
1360
|
+
self._creating_db_in_readonly = False
|
|
1361
|
+
self._dbs[key] = db
|
|
1362
|
+
return db
|
|
1363
|
+
|
|
1364
|
+
def begin(self, db=None, parent=None, write=False, buffers=False):
|
|
1365
|
+
"""Shortcut for :py:class:`lmdb.Transaction`"""
|
|
1366
|
+
return Transaction(self, db, parent, write, buffers)
|
|
1367
|
+
|
|
1368
|
+
|
|
1369
|
+
class _Database(object):
|
|
1370
|
+
"""
|
|
1371
|
+
Internal database handle. This class is opaque, save a single method.
|
|
1372
|
+
|
|
1373
|
+
Should not be constructed directly. Use :py:meth:`Environment.open_db`
|
|
1374
|
+
instead.
|
|
1375
|
+
"""
|
|
1376
|
+
|
|
1377
|
+
def __init__(
|
|
1378
|
+
self,
|
|
1379
|
+
env,
|
|
1380
|
+
txn,
|
|
1381
|
+
name,
|
|
1382
|
+
reverse_key,
|
|
1383
|
+
dupsort,
|
|
1384
|
+
create,
|
|
1385
|
+
integerkey,
|
|
1386
|
+
integerdup,
|
|
1387
|
+
dupfixed,
|
|
1388
|
+
):
|
|
1389
|
+
env._deps.add(self)
|
|
1390
|
+
self._deps = set()
|
|
1391
|
+
self._name = name
|
|
1392
|
+
|
|
1393
|
+
flags = 0
|
|
1394
|
+
if reverse_key:
|
|
1395
|
+
flags |= _lib.MDB_REVERSEKEY
|
|
1396
|
+
if dupsort:
|
|
1397
|
+
flags |= _lib.MDB_DUPSORT
|
|
1398
|
+
if create:
|
|
1399
|
+
flags |= _lib.MDB_CREATE
|
|
1400
|
+
if integerkey:
|
|
1401
|
+
flags |= _lib.MDB_INTEGERKEY
|
|
1402
|
+
if integerdup:
|
|
1403
|
+
flags |= _lib.MDB_INTEGERDUP
|
|
1404
|
+
if dupfixed:
|
|
1405
|
+
flags |= _lib.MDB_DUPFIXED
|
|
1406
|
+
dbipp = _ffi.new("MDB_dbi *")
|
|
1407
|
+
self._dbi = None
|
|
1408
|
+
rc = _lib.mdb_dbi_open(txn._txn, name or _ffi.NULL, flags, dbipp)
|
|
1409
|
+
if rc:
|
|
1410
|
+
raise _error("mdb_dbi_open", rc)
|
|
1411
|
+
self._dbi = dbipp[0]
|
|
1412
|
+
self._load_flags(txn)
|
|
1413
|
+
|
|
1414
|
+
def _load_flags(self, txn):
|
|
1415
|
+
"""Load MDB's notion of the database flags."""
|
|
1416
|
+
flags_ = _ffi.new("unsigned int[]", 1)
|
|
1417
|
+
rc = _lib.mdb_dbi_flags(txn._txn, self._dbi, flags_)
|
|
1418
|
+
if rc:
|
|
1419
|
+
raise _error("mdb_dbi_flags", rc)
|
|
1420
|
+
self._flags = flags_[0]
|
|
1421
|
+
|
|
1422
|
+
def flags(self, *args):
|
|
1423
|
+
"""Return the database's associated flags as a dict of _Database
|
|
1424
|
+
constructor kwargs."""
|
|
1425
|
+
if len(args) > 1:
|
|
1426
|
+
raise TypeError("flags takes 0 or 1 arguments")
|
|
1427
|
+
|
|
1428
|
+
return {
|
|
1429
|
+
"reverse_key": bool(self._flags & _lib.MDB_REVERSEKEY),
|
|
1430
|
+
"dupsort": bool(self._flags & _lib.MDB_DUPSORT),
|
|
1431
|
+
"integerkey": bool(self._flags & _lib.MDB_INTEGERKEY),
|
|
1432
|
+
"integerdup": bool(self._flags & _lib.MDB_INTEGERDUP),
|
|
1433
|
+
"dupfixed": bool(self._flags & _lib.MDB_DUPFIXED),
|
|
1434
|
+
}
|
|
1435
|
+
|
|
1436
|
+
def _invalidate(self):
|
|
1437
|
+
self._dbi = _invalid
|
|
1438
|
+
|
|
1439
|
+
|
|
1440
|
+
open = Environment
|
|
1441
|
+
|
|
1442
|
+
|
|
1443
|
+
class Transaction(object):
|
|
1444
|
+
"""
|
|
1445
|
+
A transaction object. All operations require a transaction handle,
|
|
1446
|
+
transactions may be read-only or read-write. Write transactions may not
|
|
1447
|
+
span threads. Transaction objects implement the context manager protocol,
|
|
1448
|
+
so that reliable release of the transaction happens even in the face of
|
|
1449
|
+
unhandled exceptions:
|
|
1450
|
+
|
|
1451
|
+
.. code-block:: python
|
|
1452
|
+
|
|
1453
|
+
# Transaction aborts correctly:
|
|
1454
|
+
with env.begin(write=True) as txn:
|
|
1455
|
+
crash()
|
|
1456
|
+
|
|
1457
|
+
# Transaction commits automatically:
|
|
1458
|
+
with env.begin(write=True) as txn:
|
|
1459
|
+
txn.put('a', 'b')
|
|
1460
|
+
|
|
1461
|
+
Equivalent to `mdb_txn_begin()
|
|
1462
|
+
<http://lmdb.tech/doc/group__mdb.html#gad7ea55da06b77513609efebd44b26920>`_
|
|
1463
|
+
|
|
1464
|
+
`env`:
|
|
1465
|
+
Environment the transaction should be on.
|
|
1466
|
+
|
|
1467
|
+
`db`:
|
|
1468
|
+
Default named database to operate on. If unspecified, defaults to
|
|
1469
|
+
the environment's main database. Can be overridden on a per-call
|
|
1470
|
+
basis below.
|
|
1471
|
+
|
|
1472
|
+
`parent`:
|
|
1473
|
+
``None``, or a parent transaction (see lmdb.h).
|
|
1474
|
+
|
|
1475
|
+
`write`:
|
|
1476
|
+
Transactions are read-only by default. To modify the database, you
|
|
1477
|
+
must pass `write=True`. This flag is ignored if
|
|
1478
|
+
:py:class:`Environment` was opened with ``readonly=True``.
|
|
1479
|
+
|
|
1480
|
+
`buffers`:
|
|
1481
|
+
If ``True``, indicates :py:func:`buffer` objects should be yielded
|
|
1482
|
+
instead of bytestrings. This setting applies to the
|
|
1483
|
+
:py:class:`Transaction` instance itself and any :py:class:`Cursors
|
|
1484
|
+
<Cursor>` created within the transaction.
|
|
1485
|
+
|
|
1486
|
+
This feature significantly improves performance, since MDB has a
|
|
1487
|
+
zero-copy design, but it requires care when manipulating the
|
|
1488
|
+
returned buffer objects. The benefit of this facility is diminished
|
|
1489
|
+
when using small keys and values.
|
|
1490
|
+
"""
|
|
1491
|
+
|
|
1492
|
+
# If constructor fails, then __del__ will attempt to access these
|
|
1493
|
+
# attributes.
|
|
1494
|
+
_env = _invalid
|
|
1495
|
+
_txn = _invalid
|
|
1496
|
+
_parent = None
|
|
1497
|
+
_write = False
|
|
1498
|
+
|
|
1499
|
+
# Mutations occurred since transaction start. Required to know when Cursor
|
|
1500
|
+
# key/value must be refreshed.
|
|
1501
|
+
_mutations = 0
|
|
1502
|
+
|
|
1503
|
+
def __init__(self, env, db=None, parent=None, write=False, buffers=False):
|
|
1504
|
+
env._deps.add(self)
|
|
1505
|
+
self.env = env # hold ref
|
|
1506
|
+
self._db = db or env._db
|
|
1507
|
+
self._env = env._env
|
|
1508
|
+
self._key = _ffi.new("MDB_val *")
|
|
1509
|
+
self._val = _ffi.new("MDB_val *")
|
|
1510
|
+
self._to_py = _mvbuf if buffers else _mvstr
|
|
1511
|
+
self._deps = set()
|
|
1512
|
+
|
|
1513
|
+
if parent:
|
|
1514
|
+
self._parent = parent
|
|
1515
|
+
parent_txn = parent._txn
|
|
1516
|
+
parent._deps.add(self)
|
|
1517
|
+
else:
|
|
1518
|
+
parent_txn = _ffi.NULL
|
|
1519
|
+
|
|
1520
|
+
if write:
|
|
1521
|
+
if env.readonly:
|
|
1522
|
+
msg = "Cannot start write transaction with read-only env"
|
|
1523
|
+
raise _error(msg, _lib.EACCES)
|
|
1524
|
+
|
|
1525
|
+
txnpp = _ffi.new("MDB_txn **")
|
|
1526
|
+
rc = _lib.mdb_txn_begin(self._env, parent_txn, 0, txnpp)
|
|
1527
|
+
if rc:
|
|
1528
|
+
raise _error("mdb_txn_begin", rc)
|
|
1529
|
+
self._txn = txnpp[0]
|
|
1530
|
+
self._write = True
|
|
1531
|
+
else:
|
|
1532
|
+
try: # Exception catch in order to avoid racy 'if txns:' test
|
|
1533
|
+
if (
|
|
1534
|
+
env._creating_db_in_readonly
|
|
1535
|
+
): # Don't use spare txns for creating a DB when read-only
|
|
1536
|
+
raise IndexError
|
|
1537
|
+
self._txn = env._spare_txns.pop()
|
|
1538
|
+
env._max_spare_txns += 1
|
|
1539
|
+
rc = _lib.mdb_txn_renew(self._txn)
|
|
1540
|
+
if rc:
|
|
1541
|
+
while self._deps:
|
|
1542
|
+
self._deps.pop()._invalidate()
|
|
1543
|
+
_lib.mdb_txn_abort(self._txn)
|
|
1544
|
+
self._txn = _invalid
|
|
1545
|
+
self._invalidate()
|
|
1546
|
+
raise _error("mdb_txn_renew", rc)
|
|
1547
|
+
except IndexError:
|
|
1548
|
+
txnpp = _ffi.new("MDB_txn **")
|
|
1549
|
+
flags = _lib.MDB_RDONLY
|
|
1550
|
+
rc = _lib.mdb_txn_begin(self._env, parent_txn, flags, txnpp)
|
|
1551
|
+
if rc:
|
|
1552
|
+
raise _error("mdb_txn_begin", rc)
|
|
1553
|
+
self._txn = txnpp[0]
|
|
1554
|
+
|
|
1555
|
+
def _invalidate(self):
|
|
1556
|
+
if self._txn:
|
|
1557
|
+
self.abort()
|
|
1558
|
+
self.env._deps.discard(self)
|
|
1559
|
+
self._parent = None
|
|
1560
|
+
self._env = _invalid
|
|
1561
|
+
|
|
1562
|
+
def __del__(self):
|
|
1563
|
+
self.abort()
|
|
1564
|
+
|
|
1565
|
+
def __enter__(self):
|
|
1566
|
+
return self
|
|
1567
|
+
|
|
1568
|
+
def __exit__(self, exc_type, exc_value, traceback):
|
|
1569
|
+
if exc_type:
|
|
1570
|
+
self.abort()
|
|
1571
|
+
else:
|
|
1572
|
+
self.commit()
|
|
1573
|
+
|
|
1574
|
+
def id(self):
|
|
1575
|
+
"""id()
|
|
1576
|
+
|
|
1577
|
+
Return the transaction's ID.
|
|
1578
|
+
|
|
1579
|
+
This returns the identifier associated with this transaction. For a
|
|
1580
|
+
read-only transaction, this corresponds to the snapshot being read;
|
|
1581
|
+
concurrent readers will frequently have the same transaction ID.
|
|
1582
|
+
"""
|
|
1583
|
+
return _lib.mdb_txn_id(self._txn)
|
|
1584
|
+
|
|
1585
|
+
def stat(self, db):
|
|
1586
|
+
"""stat(db)
|
|
1587
|
+
|
|
1588
|
+
Return statistics like :py:meth:`Environment.stat`, except for a single
|
|
1589
|
+
DBI. `db` must be a database handle returned by :py:meth:`open_db`.
|
|
1590
|
+
"""
|
|
1591
|
+
st = _ffi.new("MDB_stat *")
|
|
1592
|
+
rc = _lib.mdb_stat(self._txn, db._dbi, st)
|
|
1593
|
+
if rc:
|
|
1594
|
+
raise _error("mdb_stat", rc)
|
|
1595
|
+
return self.env._convert_stat(st)
|
|
1596
|
+
|
|
1597
|
+
def drop(self, db, delete=True):
|
|
1598
|
+
"""Delete all keys in a named database and optionally delete the named
|
|
1599
|
+
database itself. Deleting the named database causes it to become
|
|
1600
|
+
unavailable, and invalidates existing cursors.
|
|
1601
|
+
|
|
1602
|
+
Equivalent to `mdb_drop()
|
|
1603
|
+
<http://lmdb.tech/doc/group__mdb.html#gab966fab3840fc54a6571dfb32b00f2db>`_
|
|
1604
|
+
"""
|
|
1605
|
+
while db._deps:
|
|
1606
|
+
db._deps.pop()._invalidate()
|
|
1607
|
+
rc = _lib.mdb_drop(self._txn, db._dbi, delete)
|
|
1608
|
+
self._mutations += 1
|
|
1609
|
+
if rc:
|
|
1610
|
+
raise _error("mdb_drop", rc)
|
|
1611
|
+
if db._name in self.env._dbs:
|
|
1612
|
+
del self.env._dbs[db._name]
|
|
1613
|
+
|
|
1614
|
+
def _cache_spare(self):
|
|
1615
|
+
# In order to avoid taking and maintaining a lock, a race is allowed
|
|
1616
|
+
# below which may result in more spare txns than desired. It seems
|
|
1617
|
+
# unlikely the race could ever result in a large amount of spare txns,
|
|
1618
|
+
# and in any case a correctly configured program should not be opening
|
|
1619
|
+
# more read-only transactions than there are configured spares.
|
|
1620
|
+
if self.env._max_spare_txns > 0:
|
|
1621
|
+
_lib.mdb_txn_reset(self._txn)
|
|
1622
|
+
self.env._spare_txns.append(self._txn)
|
|
1623
|
+
self.env._max_spare_txns -= 1
|
|
1624
|
+
self._txn = _invalid
|
|
1625
|
+
self._invalidate()
|
|
1626
|
+
return True
|
|
1627
|
+
|
|
1628
|
+
return False
|
|
1629
|
+
|
|
1630
|
+
def commit(self):
|
|
1631
|
+
"""Commit the pending transaction.
|
|
1632
|
+
|
|
1633
|
+
Equivalent to `mdb_txn_commit()
|
|
1634
|
+
<http://lmdb.tech/doc/group__mdb.html#ga846fbd6f46105617ac9f4d76476f6597>`_
|
|
1635
|
+
"""
|
|
1636
|
+
while self._deps:
|
|
1637
|
+
self._deps.pop()._invalidate()
|
|
1638
|
+
if self._write or not self._cache_spare():
|
|
1639
|
+
rc = _lib.mdb_txn_commit(self._txn)
|
|
1640
|
+
self._txn = _invalid
|
|
1641
|
+
if rc:
|
|
1642
|
+
raise _error("mdb_txn_commit", rc)
|
|
1643
|
+
self._invalidate()
|
|
1644
|
+
|
|
1645
|
+
def abort(self):
|
|
1646
|
+
"""Abort the pending transaction. Repeat calls to :py:meth:`abort` have
|
|
1647
|
+
no effect after a previously successful :py:meth:`commit` or
|
|
1648
|
+
:py:meth:`abort`, or after the associated :py:class:`Environment` has
|
|
1649
|
+
been closed.
|
|
1650
|
+
|
|
1651
|
+
Equivalent to `mdb_txn_abort()
|
|
1652
|
+
<http://lmdb.tech/doc/group__mdb.html#ga73a5938ae4c3239ee11efa07eb22b882>`_
|
|
1653
|
+
"""
|
|
1654
|
+
if self._txn:
|
|
1655
|
+
while self._deps:
|
|
1656
|
+
self._deps.pop()._invalidate()
|
|
1657
|
+
if self._write or not self._cache_spare():
|
|
1658
|
+
rc = _lib.mdb_txn_abort(self._txn)
|
|
1659
|
+
self._txn = _invalid
|
|
1660
|
+
if rc:
|
|
1661
|
+
raise _error("mdb_txn_abort", rc)
|
|
1662
|
+
self._invalidate()
|
|
1663
|
+
|
|
1664
|
+
def get(self, key, default=None, db=None):
|
|
1665
|
+
"""Fetch the first value matching `key`, returning `default` if `key`
|
|
1666
|
+
does not exist. A cursor must be used to fetch all values for a key in
|
|
1667
|
+
a `dupsort=True` database.
|
|
1668
|
+
|
|
1669
|
+
Equivalent to `mdb_get()
|
|
1670
|
+
<http://lmdb.tech/doc/group__mdb.html#ga8bf10cd91d3f3a83a34d04ce6b07992d>`_
|
|
1671
|
+
"""
|
|
1672
|
+
rc = _lib.pymdb_get(self._txn, (db or self._db)._dbi, key, len(key), self._val)
|
|
1673
|
+
if rc:
|
|
1674
|
+
if rc == _lib.MDB_NOTFOUND:
|
|
1675
|
+
return default
|
|
1676
|
+
raise _error("mdb_cursor_get", rc)
|
|
1677
|
+
|
|
1678
|
+
preload(self._val)
|
|
1679
|
+
return self._to_py(self._val)
|
|
1680
|
+
|
|
1681
|
+
def put(self, key, value, dupdata=True, overwrite=True, append=False, db=None):
|
|
1682
|
+
"""Store a record, returning ``True`` if it was written, or ``False``
|
|
1683
|
+
to indicate the key was already present and `overwrite=False`.
|
|
1684
|
+
On success, the cursor is positioned on the new record.
|
|
1685
|
+
|
|
1686
|
+
Equivalent to `mdb_put()
|
|
1687
|
+
<http://lmdb.tech/doc/group__mdb.html#ga4fa8573d9236d54687c61827ebf8cac0>`_
|
|
1688
|
+
|
|
1689
|
+
`key`:
|
|
1690
|
+
Bytestring key to store.
|
|
1691
|
+
|
|
1692
|
+
`value`:
|
|
1693
|
+
Bytestring value to store.
|
|
1694
|
+
|
|
1695
|
+
`dupdata`:
|
|
1696
|
+
If ``False`` and database was opened with `dupsort=True`, will return
|
|
1697
|
+
``False`` if the key already has that value. In other words, this only
|
|
1698
|
+
affects the return value.
|
|
1699
|
+
|
|
1700
|
+
`overwrite`:
|
|
1701
|
+
If ``False``, do not overwrite any existing matching key. If
|
|
1702
|
+
False and writing to a dupsort=True database, this will not add a value
|
|
1703
|
+
to the key and this function will return ``False``.
|
|
1704
|
+
|
|
1705
|
+
`append`:
|
|
1706
|
+
If ``True``, append the pair to the end of the database without
|
|
1707
|
+
comparing its order first. Appending a key that is not greater
|
|
1708
|
+
than the highest existing key will fail and return ``False``.
|
|
1709
|
+
|
|
1710
|
+
`db`:
|
|
1711
|
+
Named database to operate on. If unspecified, defaults to the
|
|
1712
|
+
database given to the :py:class:`Transaction` constructor.
|
|
1713
|
+
"""
|
|
1714
|
+
# Handle None key/value according to LMDB behavior
|
|
1715
|
+
if key is None:
|
|
1716
|
+
raise BadValsizeError("Key cannot be None")
|
|
1717
|
+
if value is None:
|
|
1718
|
+
value = b""
|
|
1719
|
+
|
|
1720
|
+
flags = 0
|
|
1721
|
+
if not dupdata:
|
|
1722
|
+
flags |= _lib.MDB_NODUPDATA
|
|
1723
|
+
if not overwrite:
|
|
1724
|
+
flags |= _lib.MDB_NOOVERWRITE
|
|
1725
|
+
if append:
|
|
1726
|
+
flags |= _lib.MDB_APPEND
|
|
1727
|
+
|
|
1728
|
+
rc = _lib.pymdb_put(
|
|
1729
|
+
self._txn, (db or self._db)._dbi, key, len(key), value, len(value), flags
|
|
1730
|
+
)
|
|
1731
|
+
self._mutations += 1
|
|
1732
|
+
if rc:
|
|
1733
|
+
if rc == _lib.MDB_KEYEXIST:
|
|
1734
|
+
return False
|
|
1735
|
+
raise _error("mdb_put", rc)
|
|
1736
|
+
return True
|
|
1737
|
+
|
|
1738
|
+
def replace(self, key, value, db=None):
|
|
1739
|
+
"""Use a temporary cursor to invoke :py:meth:`Cursor.replace`.
|
|
1740
|
+
|
|
1741
|
+
`db`:
|
|
1742
|
+
Named database to operate on. If unspecified, defaults to the
|
|
1743
|
+
database given to the :py:class:`Transaction` constructor.
|
|
1744
|
+
"""
|
|
1745
|
+
with Cursor(db or self._db, self) as curs:
|
|
1746
|
+
return curs.replace(key, value)
|
|
1747
|
+
|
|
1748
|
+
def pop(self, key, db=None):
|
|
1749
|
+
"""Use a temporary cursor to invoke :py:meth:`Cursor.pop`.
|
|
1750
|
+
|
|
1751
|
+
`db`:
|
|
1752
|
+
Named database to operate on. If unspecified, defaults to the
|
|
1753
|
+
database given to the :py:class:`Transaction` constructor.
|
|
1754
|
+
"""
|
|
1755
|
+
with Cursor(db or self._db, self) as curs:
|
|
1756
|
+
return curs.pop(key)
|
|
1757
|
+
|
|
1758
|
+
def delete(self, key, value=EMPTY_BYTES, db=None):
|
|
1759
|
+
"""Delete a key from the database.
|
|
1760
|
+
|
|
1761
|
+
Equivalent to `mdb_del()
|
|
1762
|
+
<http://lmdb.tech/doc/group__mdb.html#gab8182f9360ea69ac0afd4a4eaab1ddb0>`_
|
|
1763
|
+
|
|
1764
|
+
`key`:
|
|
1765
|
+
The key to delete.
|
|
1766
|
+
|
|
1767
|
+
value:
|
|
1768
|
+
If the database was opened with dupsort=True and value is not
|
|
1769
|
+
the empty bytestring, then delete elements matching only this
|
|
1770
|
+
`(key, value)` pair, otherwise all values for key are deleted.
|
|
1771
|
+
|
|
1772
|
+
Returns True if at least one key was deleted.
|
|
1773
|
+
"""
|
|
1774
|
+
if value is None: # for bug-compatibility with cpython impl
|
|
1775
|
+
value = EMPTY_BYTES
|
|
1776
|
+
|
|
1777
|
+
rc = _lib.pymdb_del(
|
|
1778
|
+
self._txn, (db or self._db)._dbi, key, len(key), value, len(value)
|
|
1779
|
+
)
|
|
1780
|
+
self._mutations += 1
|
|
1781
|
+
if rc:
|
|
1782
|
+
if rc == _lib.MDB_NOTFOUND:
|
|
1783
|
+
return False
|
|
1784
|
+
raise _error("mdb_del", rc)
|
|
1785
|
+
return True
|
|
1786
|
+
|
|
1787
|
+
def cursor(self, db=None):
|
|
1788
|
+
"""Shortcut for ``lmdb.Cursor(db, self)``"""
|
|
1789
|
+
return Cursor(db or self._db, self)
|
|
1790
|
+
|
|
1791
|
+
|
|
1792
|
+
class Cursor(object):
|
|
1793
|
+
"""
|
|
1794
|
+
Structure for navigating a database.
|
|
1795
|
+
|
|
1796
|
+
Equivalent to `mdb_cursor_open()
|
|
1797
|
+
<http://lmdb.tech/doc/group__mdb.html#ga9ff5d7bd42557fd5ee235dc1d62613aa>`_
|
|
1798
|
+
|
|
1799
|
+
`db`:
|
|
1800
|
+
:py:class:`_Database` to navigate.
|
|
1801
|
+
|
|
1802
|
+
`txn`:
|
|
1803
|
+
:py:class:`Transaction` to navigate.
|
|
1804
|
+
|
|
1805
|
+
As a convenience, :py:meth:`Transaction.cursor` can be used to quickly
|
|
1806
|
+
return a cursor:
|
|
1807
|
+
|
|
1808
|
+
::
|
|
1809
|
+
|
|
1810
|
+
>>> env = lmdb.open('/tmp/foo')
|
|
1811
|
+
>>> child_db = env.open_db('child_db')
|
|
1812
|
+
>>> with env.begin() as txn:
|
|
1813
|
+
... cursor = txn.cursor() # Cursor on main database.
|
|
1814
|
+
... cursor2 = txn.cursor(child_db) # Cursor on child database.
|
|
1815
|
+
|
|
1816
|
+
Cursors start in an unpositioned state. If :py:meth:`iternext` or
|
|
1817
|
+
:py:meth:`iterprev` are used in this state, iteration proceeds from the
|
|
1818
|
+
start or end respectively. Iterators directly position using the cursor,
|
|
1819
|
+
meaning strange behavior results when multiple iterators exist on the same
|
|
1820
|
+
cursor.
|
|
1821
|
+
|
|
1822
|
+
.. note::
|
|
1823
|
+
|
|
1824
|
+
From the perspective of the Python binding, cursors return to an
|
|
1825
|
+
'unpositioned' state once any scanning or seeking method (e.g.
|
|
1826
|
+
:py:meth:`next`, :py:meth:`prev_nodup`, :py:meth:`set_range`) returns
|
|
1827
|
+
``False`` or raises an exception. This is primarily to ensure safe,
|
|
1828
|
+
consistent semantics in the face of any error condition.
|
|
1829
|
+
|
|
1830
|
+
When the Cursor returns to an unpositioned state, its :py:meth:`key`
|
|
1831
|
+
and :py:meth:`value` return empty strings to indicate there is no
|
|
1832
|
+
active position, although internally the LMDB cursor may still have a
|
|
1833
|
+
valid position.
|
|
1834
|
+
|
|
1835
|
+
This may lead to slightly surprising behaviour when iterating the
|
|
1836
|
+
values for a `dupsort=True` database's keys, since methods such as
|
|
1837
|
+
:py:meth:`iternext_dup` will cause Cursor to appear unpositioned,
|
|
1838
|
+
despite it returning ``False`` only to indicate there are no more
|
|
1839
|
+
values for the current key. In that case, simply calling
|
|
1840
|
+
:py:meth:`next` would cause iteration to resume at the next available
|
|
1841
|
+
key.
|
|
1842
|
+
|
|
1843
|
+
This behaviour may change in future.
|
|
1844
|
+
|
|
1845
|
+
Iterator methods such as :py:meth:`iternext` and :py:meth:`iterprev` accept
|
|
1846
|
+
`keys` and `values` arguments. If both are ``True``, then the value of
|
|
1847
|
+
:py:meth:`item` is yielded on each iteration. If only `keys` is ``True``,
|
|
1848
|
+
:py:meth:`key` is yielded, otherwise only :py:meth:`value` is yielded.
|
|
1849
|
+
|
|
1850
|
+
Prior to iteration, a cursor can be positioned anywhere in the database:
|
|
1851
|
+
|
|
1852
|
+
::
|
|
1853
|
+
|
|
1854
|
+
>>> with env.begin() as txn:
|
|
1855
|
+
... cursor = txn.cursor()
|
|
1856
|
+
... if not cursor.set_range('5'): # Position at first key >= '5'.
|
|
1857
|
+
... print('Not found!')
|
|
1858
|
+
... else:
|
|
1859
|
+
... for key, value in cursor: # Iterate from first key >= '5'.
|
|
1860
|
+
... print((key, value))
|
|
1861
|
+
|
|
1862
|
+
Iteration is not required to navigate, and sometimes results in ugly or
|
|
1863
|
+
inefficient code. In cases where the iteration order is not obvious, or is
|
|
1864
|
+
related to the data being read, use of :py:meth:`set_key`,
|
|
1865
|
+
:py:meth:`set_range`, :py:meth:`key`, :py:meth:`value`, and :py:meth:`item`
|
|
1866
|
+
may be preferable:
|
|
1867
|
+
|
|
1868
|
+
::
|
|
1869
|
+
|
|
1870
|
+
>>> # Record the path from a child to the root of a tree.
|
|
1871
|
+
>>> path = ['child14123']
|
|
1872
|
+
>>> while path[-1] != 'root':
|
|
1873
|
+
... assert cursor.set_key(path[-1]), \\
|
|
1874
|
+
... 'Tree is broken! Path: %s' % (path,)
|
|
1875
|
+
... path.append(cursor.value())
|
|
1876
|
+
"""
|
|
1877
|
+
|
|
1878
|
+
def __init__(self, db, txn):
|
|
1879
|
+
db._deps.add(self)
|
|
1880
|
+
txn._deps.add(self)
|
|
1881
|
+
self.db = db # hold ref
|
|
1882
|
+
self.txn = txn # hold ref
|
|
1883
|
+
self._dbi = db._dbi
|
|
1884
|
+
self._txn = txn._txn
|
|
1885
|
+
self._key = _ffi.new("MDB_val *")
|
|
1886
|
+
self._val = _ffi.new("MDB_val *")
|
|
1887
|
+
self._valid = False
|
|
1888
|
+
self._to_py = txn._to_py
|
|
1889
|
+
curpp = _ffi.new("MDB_cursor **")
|
|
1890
|
+
self._cur = None
|
|
1891
|
+
rc = _lib.mdb_cursor_open(self._txn, self._dbi, curpp)
|
|
1892
|
+
if rc:
|
|
1893
|
+
raise _error("mdb_cursor_open", rc)
|
|
1894
|
+
self._cur = curpp[0]
|
|
1895
|
+
# If Transaction.mutations!=last_mutation, must MDB_GET_CURRENT to
|
|
1896
|
+
# refresh `key' and `val'.
|
|
1897
|
+
self._last_mutation = txn._mutations
|
|
1898
|
+
|
|
1899
|
+
def _invalidate(self):
|
|
1900
|
+
if self._cur:
|
|
1901
|
+
_lib.mdb_cursor_close(self._cur)
|
|
1902
|
+
self.db._deps.discard(self)
|
|
1903
|
+
self.txn._deps.discard(self)
|
|
1904
|
+
self._cur = _invalid
|
|
1905
|
+
self._dbi = _invalid
|
|
1906
|
+
self._txn = _invalid
|
|
1907
|
+
|
|
1908
|
+
def __del__(self):
|
|
1909
|
+
self._invalidate()
|
|
1910
|
+
|
|
1911
|
+
def close(self):
|
|
1912
|
+
"""Close the cursor, freeing its associated resources."""
|
|
1913
|
+
self._invalidate()
|
|
1914
|
+
|
|
1915
|
+
def __enter__(self):
|
|
1916
|
+
return self
|
|
1917
|
+
|
|
1918
|
+
def __exit__(self, _1, _2, _3):
|
|
1919
|
+
self._invalidate()
|
|
1920
|
+
|
|
1921
|
+
def key(self):
|
|
1922
|
+
"""Return the current key."""
|
|
1923
|
+
# Must refresh `key` and `val` following mutation.
|
|
1924
|
+
if self._last_mutation != self.txn._mutations:
|
|
1925
|
+
self._cursor_get(_lib.MDB_GET_CURRENT)
|
|
1926
|
+
return self._to_py(self._key)
|
|
1927
|
+
|
|
1928
|
+
def value(self):
|
|
1929
|
+
"""Return the current value."""
|
|
1930
|
+
# Must refresh `key` and `val` following mutation.
|
|
1931
|
+
if self._last_mutation != self.txn._mutations:
|
|
1932
|
+
self._cursor_get(_lib.MDB_GET_CURRENT)
|
|
1933
|
+
preload(self._val)
|
|
1934
|
+
return self._to_py(self._val)
|
|
1935
|
+
|
|
1936
|
+
def item(self):
|
|
1937
|
+
"""Return the current `(key, value)` pair."""
|
|
1938
|
+
# Must refresh `key` and `val` following mutation.
|
|
1939
|
+
if self._last_mutation != self.txn._mutations:
|
|
1940
|
+
self._cursor_get(_lib.MDB_GET_CURRENT)
|
|
1941
|
+
preload(self._val)
|
|
1942
|
+
return self._to_py(self._key), self._to_py(self._val)
|
|
1943
|
+
|
|
1944
|
+
def _iter(self, op, keys, values):
|
|
1945
|
+
if not values:
|
|
1946
|
+
get = self.key
|
|
1947
|
+
elif not keys:
|
|
1948
|
+
get = self.value
|
|
1949
|
+
else:
|
|
1950
|
+
get = self.item
|
|
1951
|
+
|
|
1952
|
+
cur = self._cur
|
|
1953
|
+
key = self._key
|
|
1954
|
+
val = self._val
|
|
1955
|
+
rc = 0
|
|
1956
|
+
|
|
1957
|
+
while self._valid:
|
|
1958
|
+
yield get()
|
|
1959
|
+
rc = _lib.mdb_cursor_get(cur, key, val, op)
|
|
1960
|
+
self._valid = not rc
|
|
1961
|
+
|
|
1962
|
+
if rc:
|
|
1963
|
+
self._key.mv_size = 0
|
|
1964
|
+
self._val.mv_size = 0
|
|
1965
|
+
if rc != _lib.MDB_NOTFOUND:
|
|
1966
|
+
raise _error("mdb_cursor_get", rc)
|
|
1967
|
+
|
|
1968
|
+
def iternext(self, keys=True, values=True):
|
|
1969
|
+
"""Return a forward iterator that yields the current element before
|
|
1970
|
+
calling :py:meth:`next`, repeating until the end of the database is
|
|
1971
|
+
reached. As a convenience, :py:class:`Cursor` implements the iterator
|
|
1972
|
+
protocol by automatically returning a forward iterator when invoked:
|
|
1973
|
+
|
|
1974
|
+
::
|
|
1975
|
+
|
|
1976
|
+
>>> # Equivalent:
|
|
1977
|
+
>>> it = iter(cursor)
|
|
1978
|
+
>>> it = cursor.iternext(keys=True, values=True)
|
|
1979
|
+
|
|
1980
|
+
If the cursor is not yet positioned, it is moved to the first key in
|
|
1981
|
+
the database, otherwise iteration proceeds from the current position.
|
|
1982
|
+
"""
|
|
1983
|
+
if not self._valid:
|
|
1984
|
+
self.first()
|
|
1985
|
+
return self._iter(_lib.MDB_NEXT, keys, values)
|
|
1986
|
+
|
|
1987
|
+
__iter__ = iternext
|
|
1988
|
+
|
|
1989
|
+
def iternext_dup(self, keys=False, values=True):
|
|
1990
|
+
"""Return a forward iterator that yields the current value
|
|
1991
|
+
("duplicate") of the current key before calling :py:meth:`next_dup`,
|
|
1992
|
+
repeating until the last value of the current key is reached.
|
|
1993
|
+
|
|
1994
|
+
Only meaningful for databases opened with `dupsort=True`.
|
|
1995
|
+
|
|
1996
|
+
.. code-block:: python
|
|
1997
|
+
|
|
1998
|
+
if not cursor.set_key("foo"):
|
|
1999
|
+
print("No values found for 'foo'")
|
|
2000
|
+
else:
|
|
2001
|
+
for idx, data in enumerate(cursor.iternext_dup()):
|
|
2002
|
+
print("%d'th value for 'foo': %s" % (idx, data))
|
|
2003
|
+
"""
|
|
2004
|
+
return self._iter(_lib.MDB_NEXT_DUP, keys, values)
|
|
2005
|
+
|
|
2006
|
+
def iternext_nodup(self, keys=True, values=False):
|
|
2007
|
+
"""Return a forward iterator that yields the current value
|
|
2008
|
+
("duplicate") of the current key before calling :py:meth:`next_nodup`,
|
|
2009
|
+
repeating until the end of the database is reached.
|
|
2010
|
+
|
|
2011
|
+
Only meaningful for databases opened with `dupsort=True`.
|
|
2012
|
+
|
|
2013
|
+
If the cursor is not yet positioned, it is moved to the first key in
|
|
2014
|
+
the database, otherwise iteration proceeds from the current position.
|
|
2015
|
+
|
|
2016
|
+
.. code-block:: python
|
|
2017
|
+
|
|
2018
|
+
for key in cursor.iternext_nodup():
|
|
2019
|
+
print("Key '%s' has %d values" % (key, cursor.count()))
|
|
2020
|
+
"""
|
|
2021
|
+
if not self._valid:
|
|
2022
|
+
self.first()
|
|
2023
|
+
return self._iter(_lib.MDB_NEXT_NODUP, keys, values)
|
|
2024
|
+
|
|
2025
|
+
def iterprev(self, keys=True, values=True):
|
|
2026
|
+
"""Return a reverse iterator that yields the current element before
|
|
2027
|
+
calling :py:meth:`prev`, until the start of the database is reached.
|
|
2028
|
+
|
|
2029
|
+
If the cursor is not yet positioned, it is moved to the last key in
|
|
2030
|
+
the database, otherwise iteration proceeds from the current position.
|
|
2031
|
+
|
|
2032
|
+
::
|
|
2033
|
+
|
|
2034
|
+
>>> with env.begin() as txn:
|
|
2035
|
+
... for i, (key, value) in enumerate(txn.cursor().iterprev()):
|
|
2036
|
+
... print('%dth last item is (%r, %r)' % (1+i, key, value))
|
|
2037
|
+
"""
|
|
2038
|
+
if not self._valid:
|
|
2039
|
+
self.last()
|
|
2040
|
+
return self._iter(_lib.MDB_PREV, keys, values)
|
|
2041
|
+
|
|
2042
|
+
def iterprev_dup(self, keys=False, values=True):
|
|
2043
|
+
"""Return a reverse iterator that yields the current value
|
|
2044
|
+
("duplicate") of the current key before calling :py:meth:`prev_dup`,
|
|
2045
|
+
repeating until the first value of the current key is reached.
|
|
2046
|
+
|
|
2047
|
+
Only meaningful for databases opened with `dupsort=True`.
|
|
2048
|
+
"""
|
|
2049
|
+
return self._iter(_lib.MDB_PREV_DUP, keys, values)
|
|
2050
|
+
|
|
2051
|
+
def iterprev_nodup(self, keys=True, values=False):
|
|
2052
|
+
"""Return a reverse iterator that yields the current value
|
|
2053
|
+
("duplicate") of the current key before calling :py:meth:`prev_nodup`,
|
|
2054
|
+
repeating until the start of the database is reached.
|
|
2055
|
+
|
|
2056
|
+
If the cursor is not yet positioned, it is moved to the last key in
|
|
2057
|
+
the database, otherwise iteration proceeds from the current position.
|
|
2058
|
+
|
|
2059
|
+
Only meaningful for databases opened with `dupsort=True`.
|
|
2060
|
+
"""
|
|
2061
|
+
if not self._valid:
|
|
2062
|
+
self.last()
|
|
2063
|
+
return self._iter(_lib.MDB_PREV_NODUP, keys, values)
|
|
2064
|
+
|
|
2065
|
+
def _cursor_get(self, op):
|
|
2066
|
+
rc = _lib.mdb_cursor_get(self._cur, self._key, self._val, op)
|
|
2067
|
+
self._valid = v = not rc
|
|
2068
|
+
self._last_mutation = self.txn._mutations
|
|
2069
|
+
if rc:
|
|
2070
|
+
self._key.mv_size = 0
|
|
2071
|
+
self._val.mv_size = 0
|
|
2072
|
+
if rc != _lib.MDB_NOTFOUND:
|
|
2073
|
+
if not (rc == _lib.EINVAL and op == _lib.MDB_GET_CURRENT):
|
|
2074
|
+
raise _error("mdb_cursor_get", rc)
|
|
2075
|
+
return v
|
|
2076
|
+
|
|
2077
|
+
def _cursor_get_kv(self, op, k, v):
|
|
2078
|
+
rc = _lib.pymdb_cursor_get(
|
|
2079
|
+
self._cur, k, len(k), v, len(v), self._key, self._val, op
|
|
2080
|
+
)
|
|
2081
|
+
self._valid = v = not rc
|
|
2082
|
+
if rc:
|
|
2083
|
+
self._key.mv_size = 0
|
|
2084
|
+
self._val.mv_size = 0
|
|
2085
|
+
if rc != _lib.MDB_NOTFOUND:
|
|
2086
|
+
if not (rc == _lib.EINVAL and op == _lib.MDB_GET_CURRENT):
|
|
2087
|
+
raise _error("mdb_cursor_get", rc)
|
|
2088
|
+
return v
|
|
2089
|
+
|
|
2090
|
+
def first(self):
|
|
2091
|
+
"""Move to the first key in the database, returning ``True`` on success
|
|
2092
|
+
or ``False`` if the database is empty.
|
|
2093
|
+
|
|
2094
|
+
If the database was opened with `dupsort=True` and the key contains
|
|
2095
|
+
duplicates, the cursor is positioned on the first value ("duplicate").
|
|
2096
|
+
|
|
2097
|
+
Equivalent to `mdb_cursor_get()
|
|
2098
|
+
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
|
|
2099
|
+
with `MDB_FIRST
|
|
2100
|
+
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
|
|
2101
|
+
"""
|
|
2102
|
+
return self._cursor_get(_lib.MDB_FIRST)
|
|
2103
|
+
|
|
2104
|
+
def first_dup(self):
|
|
2105
|
+
"""Move to the first value ("duplicate") for the current key, returning
|
|
2106
|
+
``True`` on success or ``False`` if the database is empty.
|
|
2107
|
+
|
|
2108
|
+
Only meaningful for databases opened with `dupsort=True`.
|
|
2109
|
+
|
|
2110
|
+
Equivalent to `mdb_cursor_get()
|
|
2111
|
+
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
|
|
2112
|
+
with `MDB_FIRST_DUP
|
|
2113
|
+
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
|
|
2114
|
+
"""
|
|
2115
|
+
return self._cursor_get(_lib.MDB_FIRST_DUP)
|
|
2116
|
+
|
|
2117
|
+
def last(self):
|
|
2118
|
+
"""Move to the last key in the database, returning ``True`` on success
|
|
2119
|
+
or ``False`` if the database is empty.
|
|
2120
|
+
|
|
2121
|
+
If the database was opened with `dupsort=True` and the key contains
|
|
2122
|
+
duplicates, the cursor is positioned on the last value ("duplicate").
|
|
2123
|
+
|
|
2124
|
+
Equivalent to `mdb_cursor_get()
|
|
2125
|
+
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
|
|
2126
|
+
with `MDB_LAST
|
|
2127
|
+
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
|
|
2128
|
+
"""
|
|
2129
|
+
return self._cursor_get(_lib.MDB_LAST)
|
|
2130
|
+
|
|
2131
|
+
def last_dup(self):
|
|
2132
|
+
"""Move to the last value ("duplicate") for the current key, returning
|
|
2133
|
+
``True`` on success or ``False`` if the database is empty.
|
|
2134
|
+
|
|
2135
|
+
Only meaningful for databases opened with `dupsort=True`.
|
|
2136
|
+
|
|
2137
|
+
Equivalent to `mdb_cursor_get()
|
|
2138
|
+
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
|
|
2139
|
+
with `MDB_LAST_DUP
|
|
2140
|
+
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
|
|
2141
|
+
"""
|
|
2142
|
+
return self._cursor_get(_lib.MDB_LAST_DUP)
|
|
2143
|
+
|
|
2144
|
+
def prev(self):
|
|
2145
|
+
"""Move to the previous element, returning ``True`` on success or
|
|
2146
|
+
``False`` if there is no previous item.
|
|
2147
|
+
|
|
2148
|
+
For databases opened with `dupsort=True`, moves to the previous data
|
|
2149
|
+
item ("duplicate") for the current key if one exists, otherwise moves
|
|
2150
|
+
to the previous key.
|
|
2151
|
+
|
|
2152
|
+
Equivalent to `mdb_cursor_get()
|
|
2153
|
+
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
|
|
2154
|
+
with `MDB_PREV
|
|
2155
|
+
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
|
|
2156
|
+
"""
|
|
2157
|
+
return self._cursor_get(_lib.MDB_PREV)
|
|
2158
|
+
|
|
2159
|
+
def prev_dup(self):
|
|
2160
|
+
"""Move to the previous value ("duplicate") of the current key,
|
|
2161
|
+
returning ``True`` on success or ``False`` if there is no previous
|
|
2162
|
+
value.
|
|
2163
|
+
|
|
2164
|
+
Only meaningful for databases opened with `dupsort=True`.
|
|
2165
|
+
|
|
2166
|
+
Equivalent to `mdb_cursor_get()
|
|
2167
|
+
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
|
|
2168
|
+
with `MDB_PREV_DUP
|
|
2169
|
+
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
|
|
2170
|
+
"""
|
|
2171
|
+
return self._cursor_get(_lib.MDB_PREV_DUP)
|
|
2172
|
+
|
|
2173
|
+
def prev_nodup(self):
|
|
2174
|
+
"""Move to the last value ("duplicate") of the previous key, returning
|
|
2175
|
+
``True`` on success or ``False`` if there is no previous key.
|
|
2176
|
+
|
|
2177
|
+
Only meaningful for databases opened with `dupsort=True`.
|
|
2178
|
+
|
|
2179
|
+
Equivalent to `mdb_cursor_get()
|
|
2180
|
+
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
|
|
2181
|
+
with `MDB_PREV_NODUP
|
|
2182
|
+
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
|
|
2183
|
+
"""
|
|
2184
|
+
return self._cursor_get(_lib.MDB_PREV_NODUP)
|
|
2185
|
+
|
|
2186
|
+
def next(self):
|
|
2187
|
+
"""Move to the next element, returning ``True`` on success or ``False``
|
|
2188
|
+
if there is no next element.
|
|
2189
|
+
|
|
2190
|
+
For databases opened with `dupsort=True`, moves to the next value
|
|
2191
|
+
("duplicate") for the current key if one exists, otherwise moves to the
|
|
2192
|
+
first value of the next key.
|
|
2193
|
+
|
|
2194
|
+
Equivalent to `mdb_cursor_get()
|
|
2195
|
+
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
|
|
2196
|
+
with `MDB_NEXT
|
|
2197
|
+
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
|
|
2198
|
+
"""
|
|
2199
|
+
return self._cursor_get(_lib.MDB_NEXT)
|
|
2200
|
+
|
|
2201
|
+
def next_dup(self):
|
|
2202
|
+
"""Move to the next value ("duplicate") of the current key, returning
|
|
2203
|
+
``True`` on success or ``False`` if there is no next value.
|
|
2204
|
+
|
|
2205
|
+
Only meaningful for databases opened with `dupsort=True`.
|
|
2206
|
+
|
|
2207
|
+
Equivalent to `mdb_cursor_get()
|
|
2208
|
+
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
|
|
2209
|
+
with `MDB_NEXT_DUP
|
|
2210
|
+
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
|
|
2211
|
+
"""
|
|
2212
|
+
return self._cursor_get(_lib.MDB_NEXT_DUP)
|
|
2213
|
+
|
|
2214
|
+
def next_nodup(self):
|
|
2215
|
+
"""Move to the first value ("duplicate") of the next key, returning
|
|
2216
|
+
``True`` on success or ``False`` if there is no next key.
|
|
2217
|
+
|
|
2218
|
+
Only meaningful for databases opened with `dupsort=True`.
|
|
2219
|
+
|
|
2220
|
+
Equivalent to `mdb_cursor_get()
|
|
2221
|
+
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
|
|
2222
|
+
with `MDB_NEXT_NODUP
|
|
2223
|
+
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
|
|
2224
|
+
"""
|
|
2225
|
+
return self._cursor_get(_lib.MDB_NEXT_NODUP)
|
|
2226
|
+
|
|
2227
|
+
def set_key(self, key):
|
|
2228
|
+
"""Seek exactly to `key`, returning ``True`` on success or ``False`` if
|
|
2229
|
+
the exact key was not found. It is an error to :py:meth:`set_key` the
|
|
2230
|
+
empty bytestring.
|
|
2231
|
+
|
|
2232
|
+
For databases opened with `dupsort=True`, moves to the first value
|
|
2233
|
+
("duplicate") for the key.
|
|
2234
|
+
|
|
2235
|
+
Equivalent to `mdb_cursor_get()
|
|
2236
|
+
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
|
|
2237
|
+
with `MDB_SET_KEY
|
|
2238
|
+
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
|
|
2239
|
+
"""
|
|
2240
|
+
return self._cursor_get_kv(_lib.MDB_SET_KEY, key, EMPTY_BYTES)
|
|
2241
|
+
|
|
2242
|
+
def set_key_dup(self, key, value):
|
|
2243
|
+
"""Seek exactly to `(key, value)`, returning ``True`` on success or
|
|
2244
|
+
``False`` if the exact key and value was not found. It is an error
|
|
2245
|
+
to :py:meth:`set_key` the empty bytestring.
|
|
2246
|
+
|
|
2247
|
+
Only meaningful for databases opened with `dupsort=True`.
|
|
2248
|
+
|
|
2249
|
+
Equivalent to `mdb_cursor_get()
|
|
2250
|
+
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
|
|
2251
|
+
with `MDB_GET_BOTH
|
|
2252
|
+
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
|
|
2253
|
+
"""
|
|
2254
|
+
return self._cursor_get_kv(_lib.MDB_GET_BOTH, key, value)
|
|
2255
|
+
|
|
2256
|
+
def get(self, key, default=None):
|
|
2257
|
+
"""Equivalent to :py:meth:`set_key()`, except :py:meth:`value` is
|
|
2258
|
+
returned when `key` is found, otherwise `default`.
|
|
2259
|
+
"""
|
|
2260
|
+
if self._cursor_get_kv(_lib.MDB_SET_KEY, key, EMPTY_BYTES):
|
|
2261
|
+
return self.value()
|
|
2262
|
+
return default
|
|
2263
|
+
|
|
2264
|
+
def getmulti(self, keys, dupdata=False, dupfixed_bytes=None, keyfixed=False):
|
|
2265
|
+
"""Returns an iterable of `(key, value)` 2-tuples containing results
|
|
2266
|
+
for each key in the iterable `keys`.
|
|
2267
|
+
|
|
2268
|
+
`keys`:
|
|
2269
|
+
Iterable to read keys from.
|
|
2270
|
+
|
|
2271
|
+
`dupdata`:
|
|
2272
|
+
If ``True`` and database was opened with `dupsort=True`, read
|
|
2273
|
+
all duplicate values for each matching key.
|
|
2274
|
+
|
|
2275
|
+
`dupfixed_bytes`:
|
|
2276
|
+
If database was opened with `dupsort=True` and `dupfixed=True`,
|
|
2277
|
+
accepts the size of each value, in bytes, and applies an
|
|
2278
|
+
optimization reducing the number of database lookups.
|
|
2279
|
+
|
|
2280
|
+
`keyfixed`:
|
|
2281
|
+
If `dupfixed_bytes` is set and database key size is fixed,
|
|
2282
|
+
setting keyfixed=True will result in this function returning
|
|
2283
|
+
a memoryview to the results as a structured array of bytes.
|
|
2284
|
+
The structured array can be instantiated by passing the
|
|
2285
|
+
memoryview buffer to NumPy:
|
|
2286
|
+
|
|
2287
|
+
.. code-block:: python
|
|
2288
|
+
|
|
2289
|
+
key_bytes, val_bytes = 4, 8
|
|
2290
|
+
dtype = np.dtype([(f'S{key_bytes}', f'S{val_bytes}}')])
|
|
2291
|
+
arr = np.frombuffer(
|
|
2292
|
+
cur.getmulti(keys, dupdata=True, dupfixed_bytes=val_bytes, keyfixed=True)
|
|
2293
|
+
)
|
|
2294
|
+
|
|
2295
|
+
"""
|
|
2296
|
+
if dupfixed_bytes and dupfixed_bytes < 0:
|
|
2297
|
+
raise _error("dupfixed_bytes must be a positive integer.")
|
|
2298
|
+
elif (dupfixed_bytes or keyfixed) and not dupdata:
|
|
2299
|
+
raise _error("dupdata is required for dupfixed_bytes/key_bytes.")
|
|
2300
|
+
elif keyfixed and not dupfixed_bytes:
|
|
2301
|
+
raise _error("dupfixed_bytes is required for key_bytes.")
|
|
2302
|
+
|
|
2303
|
+
if dupfixed_bytes:
|
|
2304
|
+
get_op = _lib.MDB_GET_MULTIPLE
|
|
2305
|
+
next_op = _lib.MDB_NEXT_MULTIPLE
|
|
2306
|
+
else:
|
|
2307
|
+
get_op = _lib.MDB_GET_CURRENT
|
|
2308
|
+
next_op = _lib.MDB_NEXT_DUP
|
|
2309
|
+
|
|
2310
|
+
a = bytearray()
|
|
2311
|
+
lst = list()
|
|
2312
|
+
for key in keys:
|
|
2313
|
+
if self.set_key(key):
|
|
2314
|
+
while self._valid:
|
|
2315
|
+
self._cursor_get(get_op)
|
|
2316
|
+
preload(self._val)
|
|
2317
|
+
key = self._to_py(self._key)
|
|
2318
|
+
val = self._to_py(self._val)
|
|
2319
|
+
|
|
2320
|
+
if dupfixed_bytes:
|
|
2321
|
+
gen = (
|
|
2322
|
+
(key, val[i : i + dupfixed_bytes])
|
|
2323
|
+
for i in range(0, len(val), dupfixed_bytes)
|
|
2324
|
+
)
|
|
2325
|
+
if keyfixed:
|
|
2326
|
+
for k, v in gen:
|
|
2327
|
+
a.extend(k + v)
|
|
2328
|
+
else:
|
|
2329
|
+
for k, v in gen:
|
|
2330
|
+
lst.append((k, v))
|
|
2331
|
+
else:
|
|
2332
|
+
lst.append((key, val))
|
|
2333
|
+
|
|
2334
|
+
if dupdata:
|
|
2335
|
+
self._cursor_get(next_op)
|
|
2336
|
+
else:
|
|
2337
|
+
break
|
|
2338
|
+
|
|
2339
|
+
if keyfixed:
|
|
2340
|
+
return memoryview(a)
|
|
2341
|
+
else:
|
|
2342
|
+
return lst
|
|
2343
|
+
|
|
2344
|
+
def set_range(self, key):
|
|
2345
|
+
"""Seek to the first key greater than or equal to `key`, returning
|
|
2346
|
+
``True`` on success, or ``False`` to indicate key was past end of
|
|
2347
|
+
database. Behaves like :py:meth:`first` if `key` is the empty
|
|
2348
|
+
bytestring.
|
|
2349
|
+
|
|
2350
|
+
For databases opened with `dupsort=True`, moves to the first value
|
|
2351
|
+
("duplicate") for the key.
|
|
2352
|
+
|
|
2353
|
+
Equivalent to `mdb_cursor_get()
|
|
2354
|
+
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
|
|
2355
|
+
with `MDB_SET_RANGE
|
|
2356
|
+
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
|
|
2357
|
+
"""
|
|
2358
|
+
if not key:
|
|
2359
|
+
return self.first()
|
|
2360
|
+
return self._cursor_get_kv(_lib.MDB_SET_RANGE, key, EMPTY_BYTES)
|
|
2361
|
+
|
|
2362
|
+
def set_range_dup(self, key, value):
|
|
2363
|
+
"""Seek to the first key/value pair greater than or equal to `key`,
|
|
2364
|
+
returning ``True`` on success, or ``False`` to indicate that `value` was past the
|
|
2365
|
+
last value of `key` or that `(key, value)` was past the end end of database.
|
|
2366
|
+
|
|
2367
|
+
Only meaningful for databases opened with `dupsort=True`.
|
|
2368
|
+
|
|
2369
|
+
Equivalent to `mdb_cursor_get()
|
|
2370
|
+
<http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_
|
|
2371
|
+
with `MDB_GET_BOTH_RANGE
|
|
2372
|
+
<http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_
|
|
2373
|
+
"""
|
|
2374
|
+
rc = self._cursor_get_kv(_lib.MDB_GET_BOTH_RANGE, key, value)
|
|
2375
|
+
# issue #126: MDB_GET_BOTH_RANGE does not satisfy its documentation,
|
|
2376
|
+
# and fails to update `key` and `value` on success. Therefore
|
|
2377
|
+
# explicitly call MDB_GET_CURRENT after MDB_GET_BOTH_RANGE.
|
|
2378
|
+
self._cursor_get(_lib.MDB_GET_CURRENT)
|
|
2379
|
+
return rc
|
|
2380
|
+
|
|
2381
|
+
def delete(self, dupdata=False):
|
|
2382
|
+
"""Delete the current element and move to the next, returning ``True``
|
|
2383
|
+
on success or ``False`` if the database was empty.
|
|
2384
|
+
|
|
2385
|
+
If `dupdata` is ``True``, delete all values ("duplicates") for the
|
|
2386
|
+
current key, otherwise delete only the currently positioned value. Only
|
|
2387
|
+
meaningful for databases opened with `dupsort=True`.
|
|
2388
|
+
|
|
2389
|
+
Equivalent to `mdb_cursor_del()
|
|
2390
|
+
<http://lmdb.tech/doc/group__mdb.html#ga26a52d3efcfd72e5bf6bd6960bf75f95>`_
|
|
2391
|
+
"""
|
|
2392
|
+
v = self._valid
|
|
2393
|
+
if v:
|
|
2394
|
+
flags = _lib.MDB_NODUPDATA if dupdata else 0
|
|
2395
|
+
rc = _lib.mdb_cursor_del(self._cur, flags)
|
|
2396
|
+
self.txn._mutations += 1
|
|
2397
|
+
if rc:
|
|
2398
|
+
raise _error("mdb_cursor_del", rc)
|
|
2399
|
+
self._cursor_get(_lib.MDB_GET_CURRENT)
|
|
2400
|
+
v = rc == 0
|
|
2401
|
+
return v
|
|
2402
|
+
|
|
2403
|
+
def count(self):
|
|
2404
|
+
"""Return the number of values ("duplicates") for the current key.
|
|
2405
|
+
|
|
2406
|
+
Only meaningful for databases opened with `dupsort=True`.
|
|
2407
|
+
|
|
2408
|
+
Equivalent to `mdb_cursor_count()
|
|
2409
|
+
<http://lmdb.tech/doc/group__mdb.html#ga4041fd1e1862c6b7d5f10590b86ffbe2>`_
|
|
2410
|
+
"""
|
|
2411
|
+
countp = _ffi.new("size_t *")
|
|
2412
|
+
rc = _lib.mdb_cursor_count(self._cur, countp)
|
|
2413
|
+
if rc:
|
|
2414
|
+
raise _error("mdb_cursor_count", rc)
|
|
2415
|
+
return countp[0]
|
|
2416
|
+
|
|
2417
|
+
def put(self, key, val, dupdata=True, overwrite=True, append=False):
|
|
2418
|
+
"""Store a record, returning ``True`` if it was written, or ``False``
|
|
2419
|
+
to indicate the key was already present and `overwrite=False`. On
|
|
2420
|
+
success, the cursor is positioned on the key.
|
|
2421
|
+
|
|
2422
|
+
Equivalent to `mdb_cursor_put()
|
|
2423
|
+
<http://lmdb.tech/doc/group__mdb.html#ga1f83ccb40011837ff37cc32be01ad91e>`_
|
|
2424
|
+
|
|
2425
|
+
`key`:
|
|
2426
|
+
Bytestring key to store.
|
|
2427
|
+
|
|
2428
|
+
`val`:
|
|
2429
|
+
Bytestring value to store.
|
|
2430
|
+
|
|
2431
|
+
`dupdata`:
|
|
2432
|
+
If ``False`` and database was opened with `dupsort=True`, will return
|
|
2433
|
+
``False`` if the key already has that value. In other words, this only
|
|
2434
|
+
affects the return value.
|
|
2435
|
+
|
|
2436
|
+
`overwrite`:
|
|
2437
|
+
If ``False``, do not overwrite the value for the key if it
|
|
2438
|
+
exists, just return ``False``. For databases opened with
|
|
2439
|
+
`dupsort=True`, ``False`` will always be returned if a
|
|
2440
|
+
duplicate key/value pair is inserted, regardless of the setting
|
|
2441
|
+
for `overwrite`.
|
|
2442
|
+
|
|
2443
|
+
`append`:
|
|
2444
|
+
If ``True``, append the pair to the end of the database without
|
|
2445
|
+
comparing its order first. Appending a key that is not greater
|
|
2446
|
+
than the highest existing key will fail and return ``False``.
|
|
2447
|
+
"""
|
|
2448
|
+
flags = 0
|
|
2449
|
+
if not dupdata:
|
|
2450
|
+
flags |= _lib.MDB_NODUPDATA
|
|
2451
|
+
if not overwrite:
|
|
2452
|
+
flags |= _lib.MDB_NOOVERWRITE
|
|
2453
|
+
if append:
|
|
2454
|
+
if self.txn._db._flags & _lib.MDB_DUPSORT:
|
|
2455
|
+
flags |= _lib.MDB_APPENDDUP
|
|
2456
|
+
else:
|
|
2457
|
+
flags |= _lib.MDB_APPEND
|
|
2458
|
+
|
|
2459
|
+
rc = _lib.pymdb_cursor_put(self._cur, key, len(key), val, len(val), flags)
|
|
2460
|
+
self.txn._mutations += 1
|
|
2461
|
+
if rc:
|
|
2462
|
+
if rc == _lib.MDB_KEYEXIST:
|
|
2463
|
+
return False
|
|
2464
|
+
raise _error("mdb_cursor_put", rc)
|
|
2465
|
+
self._cursor_get(_lib.MDB_GET_CURRENT)
|
|
2466
|
+
return True
|
|
2467
|
+
|
|
2468
|
+
def putmulti(self, items, dupdata=True, overwrite=True, append=False):
|
|
2469
|
+
"""Invoke :py:meth:`put` for each `(key, value)` 2-tuple from the
|
|
2470
|
+
iterable `items`. Elements must be exactly 2-tuples, they may not be of
|
|
2471
|
+
any other type, or tuple subclass.
|
|
2472
|
+
|
|
2473
|
+
Returns a tuple `(consumed, added)`, where `consumed` is the number of
|
|
2474
|
+
elements read from the iterable, and `added` is the number of new
|
|
2475
|
+
entries added to the database. `added` may be less than `consumed` when
|
|
2476
|
+
`overwrite=False`.
|
|
2477
|
+
|
|
2478
|
+
`items`:
|
|
2479
|
+
Iterable to read records from.
|
|
2480
|
+
|
|
2481
|
+
`dupdata`:
|
|
2482
|
+
If ``True`` and database was opened with `dupsort=True`, add
|
|
2483
|
+
pair as a duplicate if the given key already exists. Otherwise
|
|
2484
|
+
overwrite any existing matching key.
|
|
2485
|
+
|
|
2486
|
+
`overwrite`:
|
|
2487
|
+
If ``False``, do not overwrite the value for the key if it
|
|
2488
|
+
exists, just return ``False``. For databases opened with
|
|
2489
|
+
`dupsort=True`, ``False`` will always be returned if a
|
|
2490
|
+
duplicate key/value pair is inserted, regardless of the setting
|
|
2491
|
+
for `overwrite`.
|
|
2492
|
+
|
|
2493
|
+
`append`:
|
|
2494
|
+
If ``True``, append records to the end of the database without
|
|
2495
|
+
comparing their order first. Appending a key that is not
|
|
2496
|
+
greater than the highest existing key will cause corruption.
|
|
2497
|
+
"""
|
|
2498
|
+
flags = 0
|
|
2499
|
+
if not dupdata:
|
|
2500
|
+
flags |= _lib.MDB_NODUPDATA
|
|
2501
|
+
if not overwrite:
|
|
2502
|
+
flags |= _lib.MDB_NOOVERWRITE
|
|
2503
|
+
if append:
|
|
2504
|
+
if self.txn._db._flags & _lib.MDB_DUPSORT:
|
|
2505
|
+
flags |= _lib.MDB_APPENDDUP
|
|
2506
|
+
else:
|
|
2507
|
+
flags |= _lib.MDB_APPEND
|
|
2508
|
+
|
|
2509
|
+
added = 0
|
|
2510
|
+
skipped = 0
|
|
2511
|
+
for key, value in items:
|
|
2512
|
+
rc = _lib.pymdb_cursor_put(
|
|
2513
|
+
self._cur, key, len(key), value, len(value), flags
|
|
2514
|
+
)
|
|
2515
|
+
self.txn._mutations += 1
|
|
2516
|
+
added += 1
|
|
2517
|
+
if rc:
|
|
2518
|
+
if rc == _lib.MDB_KEYEXIST:
|
|
2519
|
+
skipped += 1
|
|
2520
|
+
else:
|
|
2521
|
+
raise _error("mdb_cursor_put", rc)
|
|
2522
|
+
self._cursor_get(_lib.MDB_GET_CURRENT)
|
|
2523
|
+
return added, added - skipped
|
|
2524
|
+
|
|
2525
|
+
def replace(self, key, val):
|
|
2526
|
+
"""Store a record, returning its previous value if one existed. Returns
|
|
2527
|
+
``None`` if no previous value existed. This uses the best available
|
|
2528
|
+
mechanism to minimize the cost of a `set-and-return-previous`
|
|
2529
|
+
operation.
|
|
2530
|
+
|
|
2531
|
+
For databases opened with `dupsort=True`, only the first data element
|
|
2532
|
+
("duplicate") is returned if it existed, all data elements are removed
|
|
2533
|
+
and the new `(key, data)` pair is inserted.
|
|
2534
|
+
|
|
2535
|
+
`key`:
|
|
2536
|
+
Bytestring key to store.
|
|
2537
|
+
|
|
2538
|
+
`value`:
|
|
2539
|
+
Bytestring value to store.
|
|
2540
|
+
"""
|
|
2541
|
+
if self.db._flags & _lib.MDB_DUPSORT:
|
|
2542
|
+
if self._cursor_get_kv(_lib.MDB_SET_KEY, key, EMPTY_BYTES):
|
|
2543
|
+
preload(self._val)
|
|
2544
|
+
old = _mvstr(self._val)
|
|
2545
|
+
self.delete(True)
|
|
2546
|
+
else:
|
|
2547
|
+
old = None
|
|
2548
|
+
self.put(key, val)
|
|
2549
|
+
return old
|
|
2550
|
+
|
|
2551
|
+
flags = _lib.MDB_NOOVERWRITE
|
|
2552
|
+
keylen = len(key)
|
|
2553
|
+
rc = _lib.pymdb_cursor_put(self._cur, key, keylen, val, len(val), flags)
|
|
2554
|
+
self.txn._mutations += 1
|
|
2555
|
+
if not rc:
|
|
2556
|
+
return
|
|
2557
|
+
if rc != _lib.MDB_KEYEXIST:
|
|
2558
|
+
raise _error("mdb_cursor_put", rc)
|
|
2559
|
+
|
|
2560
|
+
self._cursor_get(_lib.MDB_GET_CURRENT)
|
|
2561
|
+
preload(self._val)
|
|
2562
|
+
old = _mvstr(self._val)
|
|
2563
|
+
rc = _lib.pymdb_cursor_put(self._cur, key, keylen, val, len(val), 0)
|
|
2564
|
+
self.txn._mutations += 1
|
|
2565
|
+
if rc:
|
|
2566
|
+
raise _error("mdb_cursor_put", rc)
|
|
2567
|
+
self._cursor_get(_lib.MDB_GET_CURRENT)
|
|
2568
|
+
return old
|
|
2569
|
+
|
|
2570
|
+
def pop(self, key):
|
|
2571
|
+
"""Fetch a record's value then delete it. Returns ``None`` if no
|
|
2572
|
+
previous value existed. This uses the best available mechanism to
|
|
2573
|
+
minimize the cost of a `delete-and-return-previous` operation.
|
|
2574
|
+
|
|
2575
|
+
For databases opened with `dupsort=True`, the first data element
|
|
2576
|
+
("duplicate") for the key will be popped.
|
|
2577
|
+
|
|
2578
|
+
`key`:
|
|
2579
|
+
Bytestring key to delete.
|
|
2580
|
+
"""
|
|
2581
|
+
if self._cursor_get_kv(_lib.MDB_SET_KEY, key, EMPTY_BYTES):
|
|
2582
|
+
preload(self._val)
|
|
2583
|
+
old = _mvstr(self._val)
|
|
2584
|
+
rc = _lib.mdb_cursor_del(self._cur, 0)
|
|
2585
|
+
self.txn._mutations += 1
|
|
2586
|
+
if rc:
|
|
2587
|
+
raise _error("mdb_cursor_del", rc)
|
|
2588
|
+
self._cursor_get(_lib.MDB_GET_CURRENT)
|
|
2589
|
+
return old
|
|
2590
|
+
|
|
2591
|
+
def _iter_from(self, k, reverse):
|
|
2592
|
+
"""Helper for centidb. Please do not rely on this interface, it may be
|
|
2593
|
+
removed in future.
|
|
2594
|
+
"""
|
|
2595
|
+
if not k and not reverse:
|
|
2596
|
+
found = self.first()
|
|
2597
|
+
else:
|
|
2598
|
+
found = self.set_range(k)
|
|
2599
|
+
if reverse:
|
|
2600
|
+
if not found:
|
|
2601
|
+
self.last()
|
|
2602
|
+
return self.iterprev()
|
|
2603
|
+
else:
|
|
2604
|
+
if not found:
|
|
2605
|
+
return iter(())
|
|
2606
|
+
return self.iternext()
|