vastdb 0.0.5.2__py3-none-any.whl → 0.0.5.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vastdb/api.py +4 -2
- vastdb/bench_scan.py +45 -0
- vastdb/tests/__init__.py +0 -0
- vastdb/tests/conftest.py +45 -0
- vastdb/tests/test_create_table_from_parquets.py +50 -0
- vastdb/tests/test_sanity.py +63 -0
- vastdb/tests/test_schemas.py +39 -0
- vastdb/tests/test_tables.py +40 -0
- vastdb/util.py +77 -0
- vastdb/v2.py +327 -75
- {vastdb-0.0.5.2.dist-info → vastdb-0.0.5.3.dist-info}/METADATA +1 -1
- {vastdb-0.0.5.2.dist-info → vastdb-0.0.5.3.dist-info}/RECORD +15 -7
- {vastdb-0.0.5.2.dist-info → vastdb-0.0.5.3.dist-info}/LICENSE +0 -0
- {vastdb-0.0.5.2.dist-info → vastdb-0.0.5.3.dist-info}/WHEEL +0 -0
- {vastdb-0.0.5.2.dist-info → vastdb-0.0.5.3.dist-info}/top_level.txt +0 -0
vastdb/api.py
CHANGED
|
@@ -757,6 +757,7 @@ class VastdbApi:
|
|
|
757
757
|
if not port:
|
|
758
758
|
port = 443 if secure else 80
|
|
759
759
|
|
|
760
|
+
self.default_max_list_columns_page_size = 1000
|
|
760
761
|
self.session = requests.Session()
|
|
761
762
|
self.session.verify = False
|
|
762
763
|
self.session.headers['user-agent'] = "VastData Tabular API 1.0 - 2022 (c)"
|
|
@@ -1207,7 +1208,7 @@ class VastdbApi:
|
|
|
1207
1208
|
data=serialized_schema, headers=headers)
|
|
1208
1209
|
return self._check_res(res, "drop_columns", expected_retvals)
|
|
1209
1210
|
|
|
1210
|
-
def list_columns(self, bucket, schema, table, *, txid=0, client_tags=None, max_keys=
|
|
1211
|
+
def list_columns(self, bucket, schema, table, *, txid=0, client_tags=None, max_keys=None, next_key=0,
|
|
1211
1212
|
count_only=False, name_prefix="", exact_match=False,
|
|
1212
1213
|
expected_retvals=None, bc_list_internals=False):
|
|
1213
1214
|
"""
|
|
@@ -1218,6 +1219,7 @@ class VastdbApi:
|
|
|
1218
1219
|
tabular-max-keys: 1000
|
|
1219
1220
|
tabular-next-key: NextColumnId
|
|
1220
1221
|
"""
|
|
1222
|
+
max_keys = max_keys or self.default_max_list_columns_page_size
|
|
1221
1223
|
client_tags = client_tags or []
|
|
1222
1224
|
expected_retvals = expected_retvals or []
|
|
1223
1225
|
|
|
@@ -2007,7 +2009,7 @@ class VastdbApi:
|
|
|
2007
2009
|
txid, created_txid = self._begin_tx_if_necessary(txid)
|
|
2008
2010
|
|
|
2009
2011
|
if rows:
|
|
2010
|
-
columns = self._list_table_columns(bucket, schema, table, field_names=rows.keys())
|
|
2012
|
+
columns = self._list_table_columns(bucket, schema, table, field_names=rows.keys(), txid=txid)
|
|
2011
2013
|
columns_dict = dict([(column[0], column[1]) for column in columns])
|
|
2012
2014
|
arrow_schema = pa.schema([])
|
|
2013
2015
|
arrays = []
|
vastdb/bench_scan.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
from vastdb import api
|
|
2
|
+
|
|
3
|
+
from logbook import Logger, StreamHandler
|
|
4
|
+
import sys
|
|
5
|
+
import time
|
|
6
|
+
import pprint
|
|
7
|
+
|
|
8
|
+
StreamHandler(sys.stdout).push_application()
|
|
9
|
+
log = Logger('Logbook')
|
|
10
|
+
|
|
11
|
+
# access_key_id=F3YUMQZDQB60ZZJ1PBAZ
|
|
12
|
+
# secret_access_key=9a9Q3if6IC5LjUexly/nXFv1UCANBnhGxi++Sw6p
|
|
13
|
+
|
|
14
|
+
a = api.VastdbApi(
|
|
15
|
+
access_key='F3YUMQZDQB60ZZJ1PBAZ',
|
|
16
|
+
secret_key='9a9Q3if6IC5LjUexly/nXFv1UCANBnhGxi++Sw6p',
|
|
17
|
+
host='172.19.111.1:172.19.111.16')
|
|
18
|
+
|
|
19
|
+
kwargs = dict(
|
|
20
|
+
bucket='tabular-slothful-jocular-jack',
|
|
21
|
+
schema='tpcds_schema_create_as_select',
|
|
22
|
+
table='store_sales',
|
|
23
|
+
field_names=['ss_sold_date_sk', 'ss_sold_time_sk', 'ss_item_sk'],
|
|
24
|
+
filters={'ss_item_sk': ['le 1']},
|
|
25
|
+
num_sub_splits=8)
|
|
26
|
+
|
|
27
|
+
pprint.pprint(kwargs)
|
|
28
|
+
|
|
29
|
+
res = a.query_iterator(**kwargs)
|
|
30
|
+
|
|
31
|
+
total_bytes = 0
|
|
32
|
+
total_rows = 0
|
|
33
|
+
start = time.time()
|
|
34
|
+
last_log = None
|
|
35
|
+
|
|
36
|
+
for b in res:
|
|
37
|
+
total_bytes += b.get_total_buffer_size()
|
|
38
|
+
total_rows += len(b)
|
|
39
|
+
dt = time.time() - start
|
|
40
|
+
if last_log != int(dt):
|
|
41
|
+
log.info("{:.3f} Mrow/s, {:.3f} MB/s", (total_rows/dt) / 1e6, (total_bytes/dt) / 1e6)
|
|
42
|
+
last_log = int(dt)
|
|
43
|
+
|
|
44
|
+
dt = time.time() - start
|
|
45
|
+
log.info("Done after {:.3f} seconds, {:.3f} Mrows, {:.3f} MB", dt, total_rows / 1e6, total_bytes / 1e6)
|
vastdb/tests/__init__.py
ADDED
|
File without changes
|
vastdb/tests/conftest.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
import boto3
|
|
3
|
+
|
|
4
|
+
from vastdb import v2
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def pytest_addoption(parser):
|
|
8
|
+
parser.addoption("--tabular-bucket-name", help="Name of the S3 bucket with Tabular enabled")
|
|
9
|
+
parser.addoption("--tabular-access-key", help="Access key with Tabular permissions")
|
|
10
|
+
parser.addoption("--tabular-secret-key", help="Secret key with Tabular permissions")
|
|
11
|
+
parser.addoption("--tabular-endpoint-url", help="Tabular server endpoint")
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@pytest.fixture(scope="module")
|
|
15
|
+
def rpc(request):
|
|
16
|
+
return v2.connect(
|
|
17
|
+
access=request.config.getoption("--tabular-access-key"),
|
|
18
|
+
secret=request.config.getoption("--tabular-secret-key"),
|
|
19
|
+
endpoint=request.config.getoption("--tabular-endpoint-url"),
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@pytest.fixture(scope="module")
|
|
24
|
+
def test_bucket_name(request):
|
|
25
|
+
return request.config.getoption("--tabular-bucket-name")
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@pytest.fixture(scope="module")
|
|
29
|
+
def clean_bucket_name(request, test_bucket_name, rpc):
|
|
30
|
+
with rpc.transaction() as tx:
|
|
31
|
+
b = tx.bucket(test_bucket_name)
|
|
32
|
+
for s in b.schemas():
|
|
33
|
+
for t in s.tables():
|
|
34
|
+
t.drop()
|
|
35
|
+
s.drop()
|
|
36
|
+
return test_bucket_name
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
@pytest.fixture(scope="module")
|
|
40
|
+
def s3(request):
|
|
41
|
+
return boto3.client(
|
|
42
|
+
's3',
|
|
43
|
+
aws_access_key_id=request.config.getoption("--tabular-access-key"),
|
|
44
|
+
aws_secret_access_key=request.config.getoption("--tabular-secret-key"),
|
|
45
|
+
endpoint_url=request.config.getoption("--tabular-endpoint-url"))
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
import os
|
|
3
|
+
|
|
4
|
+
import pyarrow as pa
|
|
5
|
+
import pyarrow.parquet as pq
|
|
6
|
+
|
|
7
|
+
from vastdb.v2 import InvalidArgumentError
|
|
8
|
+
from vastdb import util
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def test_create_table_from_files(rpc, clean_bucket_name, s3):
|
|
12
|
+
datasets = [
|
|
13
|
+
{'num': [0],
|
|
14
|
+
'varch': ['z']},
|
|
15
|
+
{'num': [1, 2, 3, 4, 5],
|
|
16
|
+
'varch': ['a', 'b', 'c', 'd', 'e']},
|
|
17
|
+
{'num': [1, 2, 3, 4, 5],
|
|
18
|
+
'bool': [True, False, None, None, False],
|
|
19
|
+
'varch': ['a', 'b', 'c', 'd', 'e']},
|
|
20
|
+
{'num': [1, 2],
|
|
21
|
+
'bool': [True, True]},
|
|
22
|
+
{'varch': ['a', 'b', 'c'],
|
|
23
|
+
'mismatch': [1, 2, 3]}
|
|
24
|
+
]
|
|
25
|
+
for i, ds in enumerate(datasets):
|
|
26
|
+
table = pa.Table.from_pydict(ds)
|
|
27
|
+
pq.write_table(table, f'prq{i}')
|
|
28
|
+
with open(f'prq{i}', 'rb') as f:
|
|
29
|
+
s3.put_object(Bucket=clean_bucket_name, Key=f'prq{i}', Body=f)
|
|
30
|
+
os.remove(f'prq{i}')
|
|
31
|
+
|
|
32
|
+
same_schema_files = [f'/{clean_bucket_name}/prq{i}' for i in range(2)]
|
|
33
|
+
contained_schema_files = [f'/{clean_bucket_name}/prq{i}' for i in range(4)]
|
|
34
|
+
different_schema_files = [f'/{clean_bucket_name}/prq{i}' for i in range(5)]
|
|
35
|
+
|
|
36
|
+
with rpc.transaction() as tx:
|
|
37
|
+
b = tx.bucket(clean_bucket_name)
|
|
38
|
+
s = b.create_schema('s1')
|
|
39
|
+
t = util.create_table_from_files(s, 't1', contained_schema_files)
|
|
40
|
+
assert len(t.arrow_schema) == 3
|
|
41
|
+
assert t.arrow_schema == pa.schema([('num', pa.int64()), ('bool', pa.bool_()), ('varch', pa.string())])
|
|
42
|
+
|
|
43
|
+
with pytest.raises(InvalidArgumentError):
|
|
44
|
+
util.create_table_from_files(s, 't2', different_schema_files)
|
|
45
|
+
|
|
46
|
+
with pytest.raises(InvalidArgumentError):
|
|
47
|
+
util.create_table_from_files(s, 't2', contained_schema_files, schema_merge_func=util.strict_schema_merge)
|
|
48
|
+
|
|
49
|
+
util.create_table_from_files(s, 't2', different_schema_files, schema_merge_func=util.union_schema_merge)
|
|
50
|
+
util.create_table_from_files(s, 't3', same_schema_files, schema_merge_func=util.strict_schema_merge)
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
|
|
3
|
+
import threading
|
|
4
|
+
from http.server import HTTPServer, BaseHTTPRequestHandler
|
|
5
|
+
from vastdb import api
|
|
6
|
+
from itertools import cycle
|
|
7
|
+
|
|
8
|
+
log = logging.getLogger(__name__)
|
|
9
|
+
|
|
10
|
+
def test_hello_world(rpc):
|
|
11
|
+
with rpc.transaction() as tx:
|
|
12
|
+
assert tx.txid is not None
|
|
13
|
+
|
|
14
|
+
def test_version_extraction():
|
|
15
|
+
# A list of version and expected version parsed by API
|
|
16
|
+
TEST_CASES = [
|
|
17
|
+
(None, None), # vast server without version in header
|
|
18
|
+
("5", None), # major only is not supported
|
|
19
|
+
("5.2", "5.2"), # major.minor
|
|
20
|
+
("5.2.0", "5.2.0"), # major.minor.patch
|
|
21
|
+
("5.2.0.0", "5.2.0.0"), # major.minor.patch.protocol
|
|
22
|
+
("5.2.0.0 some other things", "5.2.0.0"), # Test forward comptibility 1
|
|
23
|
+
("5.2.0.0.20 some other things", "5.2.0.0"), # Test forward comptibility 2
|
|
24
|
+
]
|
|
25
|
+
|
|
26
|
+
# Mock OPTIONS handle that cycles through the test cases response
|
|
27
|
+
class MockOptionsHandler(BaseHTTPRequestHandler):
|
|
28
|
+
versions_iterator = cycle(TEST_CASES)
|
|
29
|
+
|
|
30
|
+
def __init__(self, *args) -> None:
|
|
31
|
+
super().__init__(*args)
|
|
32
|
+
|
|
33
|
+
def do_OPTIONS(self):
|
|
34
|
+
self.send_response(204)
|
|
35
|
+
self.end_headers()
|
|
36
|
+
|
|
37
|
+
def version_string(self):
|
|
38
|
+
version = next(self.versions_iterator)[0]
|
|
39
|
+
return f"vast {version}" if version else "vast"
|
|
40
|
+
|
|
41
|
+
def log_message(self, format, *args):
|
|
42
|
+
log.debug(format,*args)
|
|
43
|
+
|
|
44
|
+
# start the server on localhost on some available port port
|
|
45
|
+
server_address =('localhost', 0)
|
|
46
|
+
httpd = HTTPServer(server_address, MockOptionsHandler)
|
|
47
|
+
|
|
48
|
+
def start_http_server_in_thread():
|
|
49
|
+
log.info(f"Mock HTTP server is running on port {httpd.server_port}")
|
|
50
|
+
httpd.serve_forever()
|
|
51
|
+
log.info("Mock HTTP server killed")
|
|
52
|
+
|
|
53
|
+
# start the server in a thread so we have the main thread to operate the API
|
|
54
|
+
server_thread = threading.Thread(target=start_http_server_in_thread)
|
|
55
|
+
server_thread.start()
|
|
56
|
+
|
|
57
|
+
try:
|
|
58
|
+
for test_case in TEST_CASES:
|
|
59
|
+
tester = api.VastdbApi(endpoint=f"http://localhost:{httpd.server_port}", access_key="abc", secret_key="abc")
|
|
60
|
+
assert tester.vast_version == test_case[1]
|
|
61
|
+
finally:
|
|
62
|
+
# make sure we shut the server down no matter what
|
|
63
|
+
httpd.shutdown()
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def test_schemas(rpc, clean_bucket_name):
|
|
5
|
+
with rpc.transaction() as tx:
|
|
6
|
+
b = tx.bucket(clean_bucket_name)
|
|
7
|
+
assert b.schemas() == []
|
|
8
|
+
|
|
9
|
+
s = b.create_schema('s1')
|
|
10
|
+
assert s.bucket == b
|
|
11
|
+
assert b.schemas() == [s]
|
|
12
|
+
|
|
13
|
+
s.rename('s2')
|
|
14
|
+
assert s.bucket == b
|
|
15
|
+
assert s.name == 's2'
|
|
16
|
+
assert b.schemas()[0].name == 's2'
|
|
17
|
+
|
|
18
|
+
s.drop()
|
|
19
|
+
assert b.schemas() == []
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def test_commits_and_rollbacks(rpc, clean_bucket_name):
|
|
23
|
+
with rpc.transaction() as tx:
|
|
24
|
+
b = tx.bucket(clean_bucket_name)
|
|
25
|
+
assert b.schemas() == []
|
|
26
|
+
b.create_schema("s3")
|
|
27
|
+
assert b.schemas() != []
|
|
28
|
+
# implicit commit
|
|
29
|
+
|
|
30
|
+
with pytest.raises(ZeroDivisionError):
|
|
31
|
+
with rpc.transaction() as tx:
|
|
32
|
+
b = tx.bucket(clean_bucket_name)
|
|
33
|
+
b.schema("s3").drop()
|
|
34
|
+
assert b.schemas() == []
|
|
35
|
+
1/0 # rollback schema dropping
|
|
36
|
+
|
|
37
|
+
with rpc.transaction() as tx:
|
|
38
|
+
b = tx.bucket(clean_bucket_name)
|
|
39
|
+
assert b.schemas() != []
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
import pyarrow as pa
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def test_tables(rpc, clean_bucket_name):
|
|
5
|
+
with rpc.transaction() as tx:
|
|
6
|
+
s = tx.bucket(clean_bucket_name).create_schema('s1')
|
|
7
|
+
columns = pa.schema([
|
|
8
|
+
('a', pa.int16()),
|
|
9
|
+
('b', pa.float32()),
|
|
10
|
+
('s', pa.utf8()),
|
|
11
|
+
])
|
|
12
|
+
assert s.tables() == []
|
|
13
|
+
t = s.create_table('t1', columns)
|
|
14
|
+
assert s.tables() == [t]
|
|
15
|
+
|
|
16
|
+
rb = pa.record_batch(schema=columns, data=[
|
|
17
|
+
[111, 222],
|
|
18
|
+
[0.5, 1.5],
|
|
19
|
+
['a', 'b'],
|
|
20
|
+
])
|
|
21
|
+
expected = pa.Table.from_batches([rb])
|
|
22
|
+
t.insert(rb)
|
|
23
|
+
|
|
24
|
+
actual = pa.Table.from_batches(t.select(columns=['a', 'b', 's']))
|
|
25
|
+
assert actual == expected
|
|
26
|
+
|
|
27
|
+
actual = pa.Table.from_batches(t.select(columns=['a', 'b']))
|
|
28
|
+
assert actual == expected.select(['a', 'b'])
|
|
29
|
+
|
|
30
|
+
actual = pa.Table.from_batches(t.select(columns=['b', 's', 'a']))
|
|
31
|
+
assert actual == expected.select(['b', 's', 'a'])
|
|
32
|
+
|
|
33
|
+
actual = pa.Table.from_batches(t.select(columns=['s']))
|
|
34
|
+
assert actual == expected.select(['s'])
|
|
35
|
+
|
|
36
|
+
actual = pa.Table.from_batches(t.select(columns=[]))
|
|
37
|
+
assert actual == expected.select([])
|
|
38
|
+
|
|
39
|
+
t.drop()
|
|
40
|
+
s.drop()
|
vastdb/util.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from typing import Callable
|
|
3
|
+
|
|
4
|
+
import pyarrow as pa
|
|
5
|
+
import pyarrow.parquet as pq
|
|
6
|
+
|
|
7
|
+
from vastdb.v2 import InvalidArgumentError, Table, Schema
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
log = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def create_table_from_files(
|
|
14
|
+
schema: Schema, table_name: str, parquet_files: [str], schema_merge_func: Callable = None) -> Table:
|
|
15
|
+
if not schema_merge_func:
|
|
16
|
+
schema_merge_func = default_schema_merge
|
|
17
|
+
else:
|
|
18
|
+
assert schema_merge_func in [default_schema_merge, strict_schema_merge, union_schema_merge]
|
|
19
|
+
tx = schema.tx
|
|
20
|
+
current_schema = pa.schema([])
|
|
21
|
+
s3fs = pa.fs.S3FileSystem(
|
|
22
|
+
access_key=tx._rpc.api.access_key, secret_key=tx._rpc.api.secret_key, endpoint_override=tx._rpc.api.url)
|
|
23
|
+
for prq_file in parquet_files:
|
|
24
|
+
if not prq_file.startswith('/'):
|
|
25
|
+
raise InvalidArgumentError(f"Path {prq_file} must start with a '/'")
|
|
26
|
+
parquet_ds = pq.ParquetDataset(prq_file.lstrip('/'), filesystem=s3fs)
|
|
27
|
+
current_schema = schema_merge_func(current_schema, parquet_ds.schema)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
log.info("Creating table %s from %d Parquet files, with columns: %s",
|
|
31
|
+
table_name, len(parquet_files), list(current_schema))
|
|
32
|
+
table = schema.create_table(table_name, current_schema)
|
|
33
|
+
|
|
34
|
+
log.info("Starting import of %d files to table: %s", len(parquet_files), table)
|
|
35
|
+
table.import_files(parquet_files)
|
|
36
|
+
log.info("Finished import of %d files to table: %s", len(parquet_files), table)
|
|
37
|
+
return table
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def default_schema_merge(current_schema: pa.Schema, new_schema: pa.Schema) -> pa.Schema:
|
|
41
|
+
"""
|
|
42
|
+
This function validates a schema is contained in another schema
|
|
43
|
+
Raises an InvalidArgumentError if a certain field does not exist in the target schema
|
|
44
|
+
"""
|
|
45
|
+
if not current_schema.names:
|
|
46
|
+
return new_schema
|
|
47
|
+
s1 = set(current_schema)
|
|
48
|
+
s2 = set(new_schema)
|
|
49
|
+
|
|
50
|
+
if len(s1) > len(s2):
|
|
51
|
+
s1, s2 = s2, s1
|
|
52
|
+
result = current_schema # We need this variable in order to preserve the original fields order
|
|
53
|
+
else:
|
|
54
|
+
result = new_schema
|
|
55
|
+
|
|
56
|
+
if not s1.issubset(s2):
|
|
57
|
+
log.error("Schema mismatch. schema: %s isn't contained in schema: %s.", s1, s2)
|
|
58
|
+
raise InvalidArgumentError("Found mismatch in parquet files schemas.")
|
|
59
|
+
return result
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def strict_schema_merge(current_schema: pa.Schema, new_schema: pa.Schema) -> pa.Schema:
|
|
63
|
+
"""
|
|
64
|
+
This function validates two Schemas are identical.
|
|
65
|
+
Raises an InvalidArgumentError if schemas aren't identical.
|
|
66
|
+
"""
|
|
67
|
+
if current_schema.names and current_schema != new_schema:
|
|
68
|
+
raise InvalidArgumentError(f"Schemas are not identical. \n {current_schema} \n vs \n {new_schema}")
|
|
69
|
+
|
|
70
|
+
return new_schema
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def union_schema_merge(current_schema: pa.Schema, new_schema: pa.Schema) -> pa.Schema:
|
|
74
|
+
"""
|
|
75
|
+
This function returns a unified schema from potentially two different schemas.
|
|
76
|
+
"""
|
|
77
|
+
return pa.unify_schemas([current_schema, new_schema])
|
vastdb/v2.py
CHANGED
|
@@ -1,108 +1,360 @@
|
|
|
1
|
-
from
|
|
1
|
+
from dataclasses import dataclass, field
|
|
2
|
+
import logging
|
|
3
|
+
import os
|
|
2
4
|
|
|
5
|
+
import boto3
|
|
6
|
+
import botocore
|
|
7
|
+
import ibis
|
|
8
|
+
import pyarrow as pa
|
|
9
|
+
import requests
|
|
3
10
|
|
|
4
|
-
|
|
5
|
-
tx: int
|
|
6
|
-
_rpc: RPC
|
|
11
|
+
from vastdb.api import VastdbApi, serialize_record_batch, build_query_data_request, parse_query_data_response, TABULAR_INVALID_ROW_ID
|
|
7
12
|
|
|
8
|
-
def bucket(name: str) -> Bucket
|
|
9
13
|
|
|
14
|
+
log = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class VastException(Exception):
|
|
18
|
+
pass
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class NotFoundError(VastException):
|
|
22
|
+
pass
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class AccessDeniedError(VastException):
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class ImportFilesError(VastException):
|
|
30
|
+
pass
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class InvalidArgumentError(VastException):
|
|
34
|
+
pass
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class RPC:
|
|
38
|
+
def __init__(self, access=None, secret=None, endpoint=None):
|
|
39
|
+
if access is None:
|
|
40
|
+
access = os.environ['AWS_ACCESS_KEY_ID']
|
|
41
|
+
if secret is None:
|
|
42
|
+
secret = os.environ['AWS_SECRET_ACCESS_KEY']
|
|
43
|
+
if endpoint is None:
|
|
44
|
+
endpoint = os.environ['AWS_S3_ENDPOINT_URL']
|
|
45
|
+
|
|
46
|
+
self.api = VastdbApi(endpoint, access, secret)
|
|
47
|
+
self.s3 = boto3.client('s3',
|
|
48
|
+
aws_access_key_id=access,
|
|
49
|
+
aws_secret_access_key=secret,
|
|
50
|
+
endpoint_url=endpoint)
|
|
51
|
+
|
|
52
|
+
def __repr__(self):
|
|
53
|
+
return f'RPC(endpoint={self.api.url}, access={self.api.access_key})'
|
|
54
|
+
|
|
55
|
+
def transaction(self):
|
|
56
|
+
return Transaction(self)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def connect(*args, **kw):
|
|
60
|
+
return RPC(*args, **kw)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
@dataclass
|
|
64
|
+
class Transaction:
|
|
65
|
+
_rpc: RPC
|
|
66
|
+
txid: int = None
|
|
67
|
+
|
|
68
|
+
def __enter__(self):
|
|
69
|
+
response = self._rpc.api.begin_transaction()
|
|
70
|
+
self.txid = int(response.headers['tabular-txid'])
|
|
71
|
+
log.debug("opened txid=%016x", self.txid)
|
|
72
|
+
return self
|
|
73
|
+
|
|
74
|
+
def __exit__(self, *args):
|
|
75
|
+
if args == (None, None, None):
|
|
76
|
+
log.debug("committing txid=%016x", self.txid)
|
|
77
|
+
self._rpc.api.commit_transaction(self.txid)
|
|
78
|
+
else:
|
|
79
|
+
log.exception("rolling back txid=%016x", self.txid)
|
|
80
|
+
self._rpc.api.rollback_transaction(self.txid)
|
|
81
|
+
|
|
82
|
+
def __repr__(self):
|
|
83
|
+
return f'Transaction(id=0x{self.txid:016x})'
|
|
84
|
+
|
|
85
|
+
def bucket(self, name: str) -> "Bucket":
|
|
86
|
+
try:
|
|
87
|
+
self._rpc.s3.head_bucket(Bucket=name)
|
|
88
|
+
return Bucket(name, self)
|
|
89
|
+
except botocore.exceptions.ClientError as e:
|
|
90
|
+
if e.response['Error']['Code'] == 403:
|
|
91
|
+
raise AccessDeniedError(f"Access is denied to bucket: {name}") from e
|
|
92
|
+
else:
|
|
93
|
+
raise NotFoundError(f"Bucket {name} does not exist") from e
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
@dataclass
|
|
10
97
|
class Bucket:
|
|
11
|
-
|
|
12
|
-
|
|
98
|
+
name: str
|
|
99
|
+
tx: Transaction
|
|
100
|
+
|
|
101
|
+
def create_schema(self, path: str) -> "Schema":
|
|
102
|
+
self.tx._rpc.api.create_schema(self.name, path, txid=self.tx.txid)
|
|
103
|
+
log.info("Created schema: %s", path)
|
|
104
|
+
return self.schema(path)
|
|
13
105
|
|
|
14
|
-
|
|
106
|
+
def schema(self, path: str) -> "Schema":
|
|
107
|
+
schema = self.schemas(path)
|
|
108
|
+
log.debug("schema: %s", schema)
|
|
109
|
+
if not schema:
|
|
110
|
+
raise NotFoundError(f"Schema '{path}' was not found in bucket: {self.name}")
|
|
111
|
+
assert len(schema) == 1, f"Expected to receive only a single schema, but got: {len(schema)}. ({schema})"
|
|
112
|
+
log.debug("Found schema: %s", schema[0].name)
|
|
113
|
+
return schema[0]
|
|
15
114
|
|
|
115
|
+
def schemas(self, schema: str = None) -> ["Schema"]:
|
|
116
|
+
schemas = []
|
|
117
|
+
next_key = 0
|
|
118
|
+
exact_match = bool(schema)
|
|
119
|
+
log.debug("list schemas param: schema=%s, exact_match=%s", schema, exact_match)
|
|
120
|
+
while True:
|
|
121
|
+
bucket_name, curr_schemas, next_key, is_truncated, _ = \
|
|
122
|
+
self.tx._rpc.api.list_schemas(bucket=self.name, next_key=next_key, txid=self.tx.txid,
|
|
123
|
+
name_prefix=schema, exact_match=exact_match)
|
|
124
|
+
if not curr_schemas:
|
|
125
|
+
break
|
|
126
|
+
schemas.extend(curr_schemas)
|
|
127
|
+
if not is_truncated:
|
|
128
|
+
break
|
|
129
|
+
|
|
130
|
+
return [Schema(name=name, bucket=self) for name, *_ in schemas]
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
@dataclass
|
|
16
134
|
class Schema:
|
|
17
|
-
|
|
18
|
-
|
|
135
|
+
name: str
|
|
136
|
+
bucket: Bucket
|
|
137
|
+
|
|
138
|
+
@property
|
|
139
|
+
def tx(self):
|
|
140
|
+
return self.bucket.tx
|
|
141
|
+
|
|
142
|
+
def create_table(self, table_name: str, columns: pa.Schema) -> "Table":
|
|
143
|
+
self.tx._rpc.api.create_table(self.bucket.name, self.name, table_name, columns, txid=self.tx.txid)
|
|
144
|
+
log.info("Created table: %s", table_name)
|
|
145
|
+
return self.table(table_name)
|
|
146
|
+
|
|
147
|
+
def table(self, name: str) -> "Table":
|
|
148
|
+
t = self.tables(table_name=name)
|
|
149
|
+
if not t:
|
|
150
|
+
raise NotFoundError(f"Table '{name}' was not found under schema: {self.name}")
|
|
151
|
+
assert len(t) == 1, f"Expected to receive only a single table, but got: {len(t)}. tables: {t}"
|
|
152
|
+
log.debug("Found table: %s", t[0])
|
|
153
|
+
return t[0]
|
|
154
|
+
|
|
155
|
+
def tables(self, table_name=None) -> ["Table"]:
|
|
156
|
+
tables = []
|
|
157
|
+
next_key = 0
|
|
158
|
+
name_prefix = table_name if table_name else ""
|
|
159
|
+
exact_match = bool(table_name)
|
|
160
|
+
while True:
|
|
161
|
+
bucket_name, schema_name, curr_tables, next_key, is_truncated, _ = \
|
|
162
|
+
self.tx._rpc.api.list_tables(
|
|
163
|
+
bucket=self.bucket.name, schema=self.name, next_key=next_key, txid=self.tx.txid,
|
|
164
|
+
exact_match=exact_match, name_prefix=name_prefix)
|
|
165
|
+
if not curr_tables:
|
|
166
|
+
break
|
|
167
|
+
tables.extend(curr_tables)
|
|
168
|
+
if not is_truncated:
|
|
169
|
+
break
|
|
170
|
+
|
|
171
|
+
return [_parse_table_info(table, self) for table in tables]
|
|
172
|
+
|
|
173
|
+
def drop(self) -> None:
|
|
174
|
+
self.tx._rpc.api.drop_schema(self.bucket.name, self.name, txid=self.tx.txid)
|
|
175
|
+
log.info("Dropped schema: %s", self.name)
|
|
176
|
+
|
|
177
|
+
def rename(self, new_name) -> None:
|
|
178
|
+
self.tx._rpc.api.alter_schema(self.bucket.name, self.name, txid=self.tx.txid, new_name=new_name)
|
|
179
|
+
log.info("Renamed schema: %s to %s", self.name, new_name)
|
|
180
|
+
self.name = new_name
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
@dataclass
|
|
184
|
+
class TableStats:
|
|
185
|
+
num_rows: int
|
|
186
|
+
size: int
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
@dataclass
|
|
190
|
+
class QueryConfig:
|
|
191
|
+
num_sub_splits: int = 4
|
|
192
|
+
num_splits: int = 1
|
|
193
|
+
data_endpoints: [str] = None
|
|
194
|
+
limit_per_sub_split: int = 128 * 1024
|
|
195
|
+
num_row_groups_per_sub_split: int = 8
|
|
19
196
|
|
|
20
|
-
def schema(name: str) -> Schema
|
|
21
|
-
def table(name: str) -> Table
|
|
22
197
|
|
|
198
|
+
@dataclass
|
|
23
199
|
class Table:
|
|
24
|
-
|
|
25
|
-
|
|
200
|
+
name: str
|
|
201
|
+
schema: pa.Schema
|
|
202
|
+
handle: int
|
|
203
|
+
stats: TableStats
|
|
204
|
+
properties: dict = None
|
|
205
|
+
arrow_schema: pa.Schema = field(init=False, compare=False)
|
|
206
|
+
_ibis_table: ibis.Schema = field(init=False, compare=False)
|
|
26
207
|
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
208
|
+
def __post_init__(self):
|
|
209
|
+
self.properties = self.properties or {}
|
|
210
|
+
self.arrow_schema = self.columns()
|
|
211
|
+
self._ibis_table = ibis.Schema.from_pyarrow(self.arrow_schema)
|
|
30
212
|
|
|
213
|
+
@property
|
|
214
|
+
def tx(self):
|
|
215
|
+
return self.schema.tx
|
|
31
216
|
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
Cannot do pagination
|
|
36
|
-
"""
|
|
217
|
+
@property
|
|
218
|
+
def bucket(self):
|
|
219
|
+
return self.schema.bucket
|
|
37
220
|
|
|
38
|
-
|
|
221
|
+
def __repr__(self):
|
|
222
|
+
return f"{type(self).__name__}(name={self.name})"
|
|
39
223
|
|
|
40
|
-
|
|
41
|
-
|
|
224
|
+
def columns(self) -> pa.Schema:
|
|
225
|
+
cols = self.tx._rpc.api._list_table_columns(self.bucket.name, self.schema.name, self.name, txid=self.tx.txid)
|
|
226
|
+
self.arrow_schema = pa.schema([(col[0], col[1]) for col in cols])
|
|
227
|
+
return self.arrow_schema
|
|
42
228
|
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
yield Context(rpc, tx)
|
|
49
|
-
finally:
|
|
50
|
-
rpc.close_transaction(tx)
|
|
229
|
+
def import_files(self, files_to_import: [str]) -> None:
|
|
230
|
+
source_files = {}
|
|
231
|
+
for f in files_to_import:
|
|
232
|
+
bucket_name, object_path = _parse_bucket_and_object_names(f)
|
|
233
|
+
source_files[(bucket_name, object_path)] = b''
|
|
51
234
|
|
|
235
|
+
self._execute_import(source_files)
|
|
52
236
|
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
237
|
+
def import_partitioned_files(self, files_and_partitions: {str: pa.RecordBatch}) -> None:
|
|
238
|
+
source_files = {}
|
|
239
|
+
for f, record_batch in files_and_partitions.items():
|
|
240
|
+
bucket_name, object_path = _parse_bucket_and_object_names(f)
|
|
241
|
+
serialized_batch = _serialize_record_batch(record_batch)
|
|
242
|
+
source_files = {(bucket_name, object_path): serialized_batch.to_pybytes()}
|
|
56
243
|
|
|
57
|
-
|
|
244
|
+
self._execute_import(source_files)
|
|
58
245
|
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
246
|
+
def _execute_import(self, source_files):
|
|
247
|
+
try:
|
|
248
|
+
self.tx._rpc.api.import_data(
|
|
249
|
+
self.bucket.name, self.schema.name, self.name, source_files, txid=self.tx.txid)
|
|
250
|
+
except requests.HTTPError as e:
|
|
251
|
+
raise ImportFilesError(f"import_files failed with status: {e.response.status_code}, reason: {e.response.reason}")
|
|
252
|
+
except Exception as e:
|
|
253
|
+
# TODO: investigate and raise proper error in case of failure mid import.
|
|
254
|
+
raise ImportFilesError("import_files failed") from e
|
|
62
255
|
|
|
63
|
-
|
|
256
|
+
def select(self, columns: [str], predicate: ibis.expr.types.BooleanColumn = None,
|
|
257
|
+
config: "QueryConfig" = None):
|
|
258
|
+
if config is None:
|
|
259
|
+
config = QueryConfig()
|
|
64
260
|
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
261
|
+
api = self.tx._rpc.api
|
|
262
|
+
field_names = columns
|
|
263
|
+
filters = []
|
|
264
|
+
bucket = self.bucket.name
|
|
265
|
+
schema = self.schema.name
|
|
266
|
+
table = self.name
|
|
267
|
+
query_data_request = build_query_data_request(
|
|
268
|
+
schema=self.arrow_schema, filters=filters, field_names=field_names)
|
|
68
269
|
|
|
69
|
-
|
|
270
|
+
start_row_ids = {i: 0 for i in range(config.num_sub_splits)}
|
|
271
|
+
assert config.num_splits == 1 # TODO()
|
|
272
|
+
split = (0, 1, config.num_row_groups_per_sub_split)
|
|
273
|
+
response_row_id = False
|
|
70
274
|
|
|
71
|
-
|
|
72
|
-
|
|
275
|
+
while not all(row_id == TABULAR_INVALID_ROW_ID for row_id in start_row_ids.values()):
|
|
276
|
+
response = api.query_data(
|
|
277
|
+
bucket=bucket,
|
|
278
|
+
schema=schema,
|
|
279
|
+
table=table,
|
|
280
|
+
params=query_data_request.serialized,
|
|
281
|
+
split=split,
|
|
282
|
+
num_sub_splits=config.num_sub_splits,
|
|
283
|
+
response_row_id=response_row_id,
|
|
284
|
+
txid=self.tx.txid,
|
|
285
|
+
limit_rows=config.limit_per_sub_split,
|
|
286
|
+
sub_split_start_row_ids=start_row_ids.items())
|
|
73
287
|
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
288
|
+
pages_iter = parse_query_data_response(
|
|
289
|
+
conn=response.raw,
|
|
290
|
+
schema=query_data_request.response_schema,
|
|
291
|
+
start_row_ids=start_row_ids)
|
|
77
292
|
|
|
293
|
+
for page in pages_iter:
|
|
294
|
+
for batch in page.to_batches():
|
|
295
|
+
if len(batch) > 0:
|
|
296
|
+
yield batch
|
|
78
297
|
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
t.import_files(["/buck1/file1", ... "/buck3/file3"])
|
|
83
|
-
t.import_partitioned_files({"/buck1/file1": pa.RecordBatch, ... "/buck3/file3": pa.RecordBatch})
|
|
298
|
+
def insert(self, rows: pa.RecordBatch) -> None:
|
|
299
|
+
blob = serialize_record_batch(rows)
|
|
300
|
+
self.tx._rpc.api.insert_rows(self.bucket.name, self.schema.name, self.name, record_batch=blob, txid=self.tx.txid)
|
|
84
301
|
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
predicate: ibis.BooleanColumn???,
|
|
89
|
-
limit: int = None,
|
|
90
|
-
config: QueryConfig = None
|
|
91
|
-
)
|
|
302
|
+
def drop(self) -> None:
|
|
303
|
+
self.tx._rpc.api.drop_table(self.bucket.name, self.schema.name, self.name, txid=self.tx.txid)
|
|
304
|
+
log.info("Dropped table: %s", self.name)
|
|
92
305
|
|
|
306
|
+
def rename(self, new_name) -> None:
|
|
307
|
+
self.tx._rpc.api.alter_table(
|
|
308
|
+
self.bucket.name, self.schema.name, self.name, txid=self.tx.txid, new_name=new_name)
|
|
309
|
+
log.info("Renamed table from %s to %s ", self.name, new_name)
|
|
310
|
+
self.name = new_name
|
|
93
311
|
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
...
|
|
312
|
+
def add_column(self, new_column: pa.Schema) -> None:
|
|
313
|
+
self.tx._rpc.api.add_columns(self.bucket.name, self.schema.name, self.name, new_column, txid=self.tx.txid)
|
|
314
|
+
log.info("Added column(s): %s", new_column)
|
|
315
|
+
self.arrow_schema = self.columns()
|
|
99
316
|
|
|
317
|
+
def drop_column(self, column_to_drop: pa.Schema) -> None:
|
|
318
|
+
self.tx._rpc.api.drop_columns(self.bucket.name, self.schema.name, self.name, column_to_drop, txid=self.tx.txid)
|
|
319
|
+
log.info("Dropped column(s): %s", column_to_drop)
|
|
320
|
+
self.arrow_schema = self.columns()
|
|
100
321
|
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
322
|
+
def rename_column(self, current_column_name: str, new_column_name: str) -> None:
|
|
323
|
+
self.tx._rpc.api.alter_column(self.bucket.name, self.schema.name, self.name, name=current_column_name,
|
|
324
|
+
new_name=new_column_name, txid=self.tx.txid)
|
|
325
|
+
log.info("Renamed column: %s to %s", current_column_name, new_column_name)
|
|
326
|
+
self.arrow_schema = self.columns()
|
|
327
|
+
|
|
328
|
+
def __getitem__(self, col_name):
|
|
329
|
+
return self._ibis_table[col_name]
|
|
330
|
+
|
|
331
|
+
|
|
332
|
+
def _parse_table_info(table_info, schema: "Schema"):
|
|
333
|
+
stats = TableStats(num_rows=table_info.num_rows, size=table_info.size_in_bytes)
|
|
334
|
+
return Table(name=table_info.name, schema=schema, handle=int(table_info.handle), stats=stats)
|
|
335
|
+
|
|
336
|
+
|
|
337
|
+
def _parse_bucket_and_object_names(path: str) -> (str, str):
|
|
338
|
+
if not path.startswith('/'):
|
|
339
|
+
raise InvalidArgumentError(f"Path {path} must start with a '/'")
|
|
340
|
+
components = path.split(os.path.sep)
|
|
341
|
+
bucket_name = components[1]
|
|
342
|
+
object_path = os.path.sep.join(components[2:])
|
|
343
|
+
return bucket_name, object_path
|
|
344
|
+
|
|
345
|
+
|
|
346
|
+
def _serialize_record_batch(record_batch: pa.RecordBatch) -> pa.lib.Buffer:
|
|
347
|
+
sink = pa.BufferOutputStream()
|
|
348
|
+
with pa.ipc.new_stream(sink, record_batch.schema) as writer:
|
|
349
|
+
writer.write(record_batch)
|
|
350
|
+
return sink.getvalue()
|
|
351
|
+
|
|
352
|
+
|
|
353
|
+
def _parse_endpoint(endpoint):
|
|
354
|
+
if ":" in endpoint:
|
|
355
|
+
endpoint, port = endpoint.split(":")
|
|
356
|
+
port = int(port)
|
|
357
|
+
else:
|
|
358
|
+
port = 80
|
|
359
|
+
log.debug("endpoint: %s, port: %d", endpoint, port)
|
|
360
|
+
return endpoint, port
|
|
@@ -163,10 +163,18 @@ vast_protobuf/substrait/extensions/extensions_pb2.py,sha256=I_6c6nMmMaYvVtzF-5yc
|
|
|
163
163
|
vast_protobuf/tabular/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
164
164
|
vast_protobuf/tabular/rpc_pb2.py,sha256=7kW2WrA2sGk6WVbD83mc_cKkZ2MxoImSO5GOVz6NbbE,23776
|
|
165
165
|
vastdb/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
166
|
-
vastdb/api.py,sha256=
|
|
167
|
-
vastdb/
|
|
168
|
-
vastdb
|
|
169
|
-
vastdb
|
|
170
|
-
vastdb
|
|
171
|
-
vastdb
|
|
172
|
-
vastdb
|
|
166
|
+
vastdb/api.py,sha256=u5Cf01LeHGN7x_pcjnzfLV-lU485FGFCv7eTIKpSaB0,124883
|
|
167
|
+
vastdb/bench_scan.py,sha256=95O34oHS0UehX2ad4T2mok87CKszCFLCDZASMnZp77M,1208
|
|
168
|
+
vastdb/util.py,sha256=EF892Gbs08BxHVgG3FZ6QvhpKI2-eIL5bPzzrYE_Qd8,2905
|
|
169
|
+
vastdb/v2.py,sha256=gWZUnhSLEvtrXPxoTpTAwNuzU9qxrCaWKXmeNBpMrGY,12601
|
|
170
|
+
vastdb/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
171
|
+
vastdb/tests/conftest.py,sha256=dcYFJO0Riyn687qZTwcwKbwGieg6s4yZrVFrJAX-ylU,1461
|
|
172
|
+
vastdb/tests/test_create_table_from_parquets.py,sha256=dxykmvUR-vui6Z3qUvXPYJ9Nw6V_qcxKl4NDNQK4kiY,1963
|
|
173
|
+
vastdb/tests/test_sanity.py,sha256=7HmCjuOmtoYnuWiPjMP6m7sYQYop1_qRCzq2ZX0rKlc,2404
|
|
174
|
+
vastdb/tests/test_schemas.py,sha256=-nntn3ltBaaqSTsUvi-i9J0yr4TYvOTRyTNY039vEIk,1047
|
|
175
|
+
vastdb/tests/test_tables.py,sha256=KPe0ESVGWixecTSwQ8whzSF-NZrNVZ-Kv-C4Gz-OQnQ,1225
|
|
176
|
+
vastdb-0.0.5.3.dist-info/LICENSE,sha256=obffan7LYrq7hLHNrY7vHcn2pKUTBUYXMKu-VOAvDxU,11333
|
|
177
|
+
vastdb-0.0.5.3.dist-info/METADATA,sha256=Yd93AoZE5ZUhJUr0MhtfhcMaQUtSFZ1wbzc6vvEvclQ,1369
|
|
178
|
+
vastdb-0.0.5.3.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92
|
|
179
|
+
vastdb-0.0.5.3.dist-info/top_level.txt,sha256=Vsj2MKtlhPg0J4so64slQtnwjhgoPmJgcG-6YcVAwVc,20
|
|
180
|
+
vastdb-0.0.5.3.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|