vastdb 0.1.1__py3-none-any.whl → 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,7 +6,6 @@ import threading
6
6
  from contextlib import closing
7
7
  from tempfile import NamedTemporaryFile
8
8
 
9
- import duckdb
10
9
  import pyarrow as pa
11
10
  import pyarrow.compute as pc
12
11
  import pyarrow.parquet as pq
@@ -91,7 +90,6 @@ def test_exists(session, clean_bucket_name):
91
90
  assert s.tables() == [t]
92
91
 
93
92
 
94
-
95
93
  def test_update_table(session, clean_bucket_name):
96
94
  columns = pa.schema([
97
95
  ('a', pa.int64()),
@@ -147,12 +145,13 @@ def test_update_table(session, clean_bucket_name):
147
145
  'b': [0.5, 1.5, 2.5]
148
146
  }
149
147
 
148
+
150
149
  def test_select_with_multisplits(session, clean_bucket_name):
151
150
  columns = pa.schema([
152
151
  ('a', pa.int32())
153
152
  ])
154
153
 
155
- data = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
154
+ data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
156
155
  data = data * 1000
157
156
  expected = pa.table(schema=columns, data=[data])
158
157
 
@@ -179,15 +178,15 @@ def test_types(session, clean_bucket_name):
179
178
  ('t3', pa.time32('ms')),
180
179
  ('t6', pa.time64('us')),
181
180
  ('t9', pa.time64('ns')),
182
- ('ts0' ,pa.timestamp('s')),
183
- ('ts3' ,pa.timestamp('ms')),
184
- ('ts6' ,pa.timestamp('us')),
185
- ('ts9' ,pa.timestamp('ns')),
181
+ ('ts0', pa.timestamp('s')),
182
+ ('ts3', pa.timestamp('ms')),
183
+ ('ts6', pa.timestamp('us')),
184
+ ('ts9', pa.timestamp('ns')),
186
185
  ])
187
186
 
188
187
  expected = pa.table(schema=columns, data=[
189
188
  [True, True, False],
190
- [1 , 2, 4],
189
+ [1, 2, 4],
191
190
  [1999, 2000, 2001],
192
191
  [11122221, 222111122, 333333],
193
192
  [0.5, 1.5, 2.5],
@@ -295,7 +294,7 @@ def test_filters(session, clean_bucket_name):
295
294
  assert select(((t['a'] > 111) | (t['a'] < 333)) & (t['b'] < 2.5)) == expected.filter(((pc.field('a') > 111) | (pc.field('a') < 333)) & (pc.field('b') < 2.5))
296
295
  with pytest.raises(NotImplementedError):
297
296
  assert select((t['a'] > 111) | (t['b'] > 0) | (t['s'] < 'ccc')) == expected.filter((pc.field('a') > 111) | (pc.field('b') > 0) | (pc.field('s') < 'ccc'))
298
- assert select((t['a'] > 111) | (t['a'] < 333) | (t['a'] == 777) ) == expected.filter((pc.field('a') > 111) | (pc.field('a') < 333) | (pc.field('a') == 777))
297
+ assert select((t['a'] > 111) | (t['a'] < 333) | (t['a'] == 777)) == expected.filter((pc.field('a') > 111) | (pc.field('a') < 333) | (pc.field('a') == 777))
299
298
 
300
299
  assert select(t['s'].isnull()) == expected.filter(pc.field('s').is_null())
301
300
  assert select((t['s'].isnull()) | (t['s'] == 'bb')) == expected.filter((pc.field('s').is_null()) | (pc.field('s') == 'bb'))
@@ -306,26 +305,6 @@ def test_filters(session, clean_bucket_name):
306
305
  assert select(t['s'].contains('y')) == expected.filter(pc.field('s') == 'xyz')
307
306
 
308
307
 
309
- def test_duckdb(session, clean_bucket_name):
310
- columns = pa.schema([
311
- ('a', pa.int32()),
312
- ('b', pa.float64()),
313
- ])
314
- data = pa.table(schema=columns, data=[
315
- [111, 222, 333],
316
- [0.5, 1.5, 2.5],
317
- ])
318
- with prepare_data(session, clean_bucket_name, 's', 't', data) as t:
319
- conn = duckdb.connect()
320
- batches = t.select(columns=['a'], predicate=(t['b'] < 2)) # noqa: F841
321
- actual = conn.execute('SELECT max(a) as "a_max" FROM batches').arrow()
322
- expected = (data
323
- .filter(pc.field('b') < 2)
324
- .group_by([])
325
- .aggregate([('a', 'max')]))
326
- assert actual == expected
327
-
328
-
329
308
  def test_parquet_export(session, clean_bucket_name):
330
309
  with session.transaction() as tx:
331
310
  s = tx.bucket(clean_bucket_name).create_schema('s1')
@@ -344,8 +323,7 @@ def test_parquet_export(session, clean_bucket_name):
344
323
  ['a', 'b'],
345
324
  ])
346
325
  expected = pa.Table.from_batches([rb])
347
- rb = t.insert(rb)
348
- assert rb.to_pylist() == [0, 1]
326
+ t.insert(rb)
349
327
  actual = pa.Table.from_batches(t.select())
350
328
  assert actual == expected
351
329
 
@@ -359,6 +337,7 @@ def test_parquet_export(session, clean_bucket_name):
359
337
 
360
338
  assert expected == pq.read_table(parquet_file.name)
361
339
 
340
+
362
341
  def test_errors(session, clean_bucket_name):
363
342
  with pytest.raises(errors.MissingSchema):
364
343
  with session.transaction() as tx:
@@ -378,7 +357,8 @@ def test_errors(session, clean_bucket_name):
378
357
  ('s', pa.utf8()),
379
358
  ])
380
359
  s.create_table('t1', columns)
381
- s.drop() # cannot drop schema without dropping its tables first
360
+ s.drop() # cannot drop schema without dropping its tables first
361
+
382
362
 
383
363
  def test_rename_schema(session, clean_bucket_name):
384
364
 
@@ -436,20 +416,21 @@ def test_rename_table(session, clean_bucket_name):
436
416
  s.table('t')
437
417
  t = s.table('t2')
438
418
 
439
- #assert that other transactions are isolated
419
+ # assert that other transactions are isolated
440
420
  with pytest.raises(errors.MissingTable):
441
421
  tx2.bucket(clean_bucket_name).schema('s').table('t2')
442
422
  tx2.bucket(clean_bucket_name).schema('s').table('t')
443
423
 
444
424
  with session.transaction() as tx:
445
425
  s = tx.bucket(clean_bucket_name).schema('s')
446
- #assert that new transactions see the change
426
+ # assert that new transactions see the change
447
427
  with pytest.raises(errors.MissingTable):
448
428
  s.table('t')
449
429
  t = s.table('t2')
450
430
  t.drop()
451
431
  s.drop()
452
432
 
433
+
453
434
  def test_add_column(session, clean_bucket_name):
454
435
  columns = pa.schema([
455
436
  ('a', pa.int16()),
@@ -472,18 +453,18 @@ def test_add_column(session, clean_bucket_name):
472
453
  # in which it was added
473
454
  assert t.arrow_schema == new_schema
474
455
 
475
- #assert that other transactions are isolated
456
+ # assert that other transactions are isolated
476
457
  assert tx2.bucket(clean_bucket_name).schema('s').table('t').arrow_schema == columns
477
458
 
478
-
479
459
  with session.transaction() as tx:
480
460
  s = tx.bucket(clean_bucket_name).schema('s')
481
461
  t = s.table('t')
482
- #assert that new transactions see the change
462
+ # assert that new transactions see the change
483
463
  assert t.arrow_schema == new_schema
484
464
  t.drop()
485
465
  s.drop()
486
466
 
467
+
487
468
  def test_drop_column(session, clean_bucket_name):
488
469
  columns = pa.schema([
489
470
  ('a', pa.int16()),
@@ -507,31 +488,32 @@ def test_drop_column(session, clean_bucket_name):
507
488
  # in which it was added
508
489
  assert t.arrow_schema == new_schema
509
490
 
510
- #assert that other transactions are isolated
491
+ # assert that other transactions are isolated
511
492
  assert tx2.bucket(clean_bucket_name).schema('s').table('t').arrow_schema == columns
512
493
 
513
-
514
494
  with session.transaction() as tx:
515
495
  s = tx.bucket(clean_bucket_name).schema('s')
516
496
  t = s.table('t')
517
- #assert that new transactions see the change
497
+ # assert that new transactions see the change
518
498
  assert t.arrow_schema == new_schema
519
499
  t.drop()
520
500
  s.drop()
521
501
 
502
+
522
503
  def test_rename_column(session, clean_bucket_name):
523
504
  columns = pa.schema([
524
505
  ('a', pa.int16()),
525
506
  ('b', pa.float32()),
526
507
  ('s', pa.utf8()),
527
508
  ])
528
- def prepare_rename_column(schema : pa.Schema, old_name : str, new_name : str) -> pa.Schema:
509
+
510
+ def prepare_rename_column(schema: pa.Schema, old_name: str, new_name: str) -> pa.Schema:
529
511
  field_idx = schema.get_field_index(old_name)
530
512
  column_to_rename = schema.field(field_idx)
531
513
  renamed_column = column_to_rename.with_name(new_name)
532
514
  return schema.set(field_idx, renamed_column)
533
515
 
534
- new_schema = prepare_rename_column(columns,'a','aaa')
516
+ new_schema = prepare_rename_column(columns, 'a', 'aaa')
535
517
 
536
518
  with session.transaction() as tx:
537
519
  s = tx.bucket(clean_bucket_name).create_schema('s')
@@ -546,10 +528,10 @@ def test_rename_column(session, clean_bucket_name):
546
528
  # in which it was added
547
529
  assert t.arrow_schema == new_schema
548
530
 
549
- #assert that other transactions are isolated
531
+ # assert that other transactions are isolated
550
532
  assert tx2.bucket(clean_bucket_name).schema('s').table('t').arrow_schema == columns
551
533
 
552
- #assert that new transactions see the change
534
+ # assert that new transactions see the change
553
535
  with session.transaction() as tx:
554
536
  s = tx.bucket(clean_bucket_name).schema('s')
555
537
  t = s.table('t')
@@ -564,7 +546,7 @@ def test_rename_column(session, clean_bucket_name):
564
546
  t1 = tx1.bucket(clean_bucket_name).schema('s').table('t')
565
547
  t2 = tx2.bucket(clean_bucket_name).schema('s').table('t')
566
548
  t1.rename_column('b', 'bb')
567
- with pytest.raises(HTTPError, match = '409 Client Error: Conflict'):
549
+ with pytest.raises(HTTPError, match='409 Client Error: Conflict'):
568
550
  t2.rename_column('b', 'bbb')
569
551
 
570
552
  with session.transaction() as tx:
@@ -580,6 +562,7 @@ def test_rename_column(session, clean_bucket_name):
580
562
  t.drop()
581
563
  s.drop()
582
564
 
565
+
583
566
  def test_select_stop(session, clean_bucket_name):
584
567
  columns = pa.schema([
585
568
  ('a', pa.uint8()),
@@ -602,15 +585,16 @@ def test_select_stop(session, clean_bucket_name):
602
585
  qc = QueryConfig(num_sub_splits=2, num_splits=4, num_row_groups_per_sub_split=1)
603
586
  with session.transaction() as tx:
604
587
  t = tx.bucket(clean_bucket_name).schema('s').table('t')
605
- t.refresh_stats()
606
- qc.data_endpoints = list(t.stats.endpoints) * 2
588
+ qc.data_endpoints = list(t.get_stats().endpoints) * 2
607
589
 
608
590
  # Duplicate the table until it is large enough to generate enough batches
609
591
  while num_rows < (qc.num_sub_splits * qc.num_splits) * ROWS_PER_GROUP:
592
+ # We need two separate transactions to prevent an infinite loop that may happen
593
+ # while appending and reading the same table using a single transaction.
610
594
  with session.transaction() as tx_read, session.transaction() as tx_write:
611
595
  t_read = tx_read.bucket(clean_bucket_name).schema('s').table('t')
612
596
  t_write = tx_write.bucket(clean_bucket_name).schema('s').table('t')
613
- for batch in t_read.select(['a'],config=qc):
597
+ for batch in t_read.select(['a'], config=qc):
614
598
  t_write.insert(batch)
615
599
  num_rows = num_rows * 2
616
600
  log.info("Num rows: %d", num_rows)
@@ -627,11 +611,12 @@ def test_select_stop(session, clean_bucket_name):
627
611
  # If this assert triggers it just means that the test assumptions about how
628
612
  # the tabular server splits the batches is not true anymore and we need to
629
613
  # rewrite the test.
630
- assert read_batches == qc.num_splits*qc.num_sub_splits
631
- qc.query_id = str(random.randint(0,2**32))
614
+ assert read_batches == qc.num_splits * qc.num_sub_splits
615
+ qc.query_id = str(random.randint(0, 2**32))
632
616
  log.info("query id is: %s", qc.query_id)
617
+
633
618
  def active_threads():
634
- log.debug("%s",[t.getName() for t in threading.enumerate() if t.is_alive()])
619
+ log.debug("%s", [t.getName() for t in threading.enumerate() if t.is_alive()])
635
620
  return sum([1 if t.is_alive() and qc.query_id in t.getName() else 0 for t in threading.enumerate()])
636
621
 
637
622
  assert active_threads() == 0
vastdb/tests/util.py CHANGED
@@ -9,10 +9,7 @@ def prepare_data(session, clean_bucket_name, schema_name, table_name, arrow_tabl
9
9
  with session.transaction() as tx:
10
10
  s = tx.bucket(clean_bucket_name).create_schema(schema_name)
11
11
  t = s.create_table(table_name, arrow_table.schema)
12
- row_ids_array = t.insert(arrow_table)
13
- row_ids = row_ids_array.to_pylist()
14
- log.debug("row_ids=%s" % row_ids)
15
- assert row_ids == list(range(arrow_table.num_rows))
12
+ t.insert(arrow_table)
16
13
  yield t
17
14
  t.drop()
18
15
  s.drop()
vastdb/transaction.py CHANGED
@@ -8,19 +8,21 @@ A transcation is used as a context manager, since every Database-related operati
8
8
 
9
9
  import logging
10
10
  from dataclasses import dataclass
11
+ from typing import Optional
11
12
 
12
13
  import botocore
13
14
 
14
- from . import bucket, errors, session
15
+ from . import bucket, errors, schema, session, table
15
16
 
16
17
  log = logging.getLogger(__name__)
17
18
 
19
+
18
20
  @dataclass
19
21
  class Transaction:
20
22
  """A holder of a single VAST transaction."""
21
23
 
22
24
  _rpc: "session.Session"
23
- txid: int = None
25
+ txid: Optional[int] = None
24
26
 
25
27
  def __enter__(self):
26
28
  """Create a transaction and store its ID."""
@@ -31,12 +33,14 @@ class Transaction:
31
33
 
32
34
  def __exit__(self, exc_type, exc_value, exc_traceback):
33
35
  """On success, the transaction is committed. Otherwise, it is rolled back."""
36
+ txid = self.txid
37
+ self.txid = None
34
38
  if (exc_type, exc_value, exc_traceback) == (None, None, None):
35
- log.debug("committing txid=%016x", self.txid)
36
- self._rpc.api.commit_transaction(self.txid)
39
+ log.debug("committing txid=%016x", txid)
40
+ self._rpc.api.commit_transaction(txid)
37
41
  else:
38
- log.exception("rolling back txid=%016x due to:", self.txid)
39
- self._rpc.api.rollback_transaction(self.txid)
42
+ log.exception("rolling back txid=%016x due to:", txid)
43
+ self._rpc.api.rollback_transaction(txid)
40
44
 
41
45
  def __repr__(self):
42
46
  """Don't show the session details."""
@@ -52,3 +56,9 @@ class Transaction:
52
56
  raise errors.MissingBucket(name)
53
57
  raise
54
58
  return bucket.Bucket(name, self)
59
+
60
+ def catalog(self, fail_if_missing=True) -> Optional["table.Table"]:
61
+ """Return VAST Catalog table."""
62
+ b = bucket.Bucket("vast-big-catalog-bucket", self)
63
+ s = schema.Schema("vast_big_catalog_schema", b)
64
+ return s.table(name="vast_big_catalog_table", fail_if_missing=fail_if_missing)
vastdb/util.py CHANGED
@@ -1,5 +1,5 @@
1
1
  import logging
2
- from typing import Callable
2
+ from typing import Callable, List, Optional
3
3
 
4
4
  import pyarrow as pa
5
5
  import pyarrow.parquet as pq
@@ -12,8 +12,9 @@ log = logging.getLogger(__name__)
12
12
 
13
13
 
14
14
  def create_table_from_files(
15
- schema: Schema, table_name: str, parquet_files: [str], schema_merge_func: Callable = None,
16
- config: ImportConfig = None) -> Table:
15
+ schema: Schema, table_name: str, parquet_files: List[str],
16
+ schema_merge_func: Optional[Callable] = None,
17
+ config: Optional[ImportConfig] = None) -> Table:
17
18
  if not schema_merge_func:
18
19
  schema_merge_func = default_schema_merge
19
20
  else:
@@ -1,12 +1,11 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vastdb
3
- Version: 0.1.1
3
+ Version: 0.1.2
4
4
  Summary: VAST Data SDK
5
5
  Home-page: https://github.com/vast-data/vastdb_sdk
6
6
  Author: VAST DATA
7
7
  Author-email: hello@vastdata.com
8
8
  License: Copyright (C) VAST Data Ltd.
9
- Platform: UNKNOWN
10
9
  Classifier: Development Status :: 4 - Beta
11
10
  Classifier: License :: OSI Approved :: Apache Software License
12
11
  Classifier: Programming Language :: Python :: 3
@@ -34,5 +33,3 @@ and [VAST Catalog](https://vastdata.com/blog/vast-catalog-treat-your-file-system
34
33
  enabling schema and table management, efficient ingest, query and modification of columnar data.
35
34
 
36
35
  For more details, see [our whitepaper](https://vastdata.com/whitepaper/#TheVASTDataBase).
37
-
38
-
@@ -148,28 +148,29 @@ vast_flatbuf/tabular/ObjectDetails.py,sha256=qW0WtbkCYYE_L-Kw6VNRDCLYaRm5lKvTbLN
148
148
  vast_flatbuf/tabular/S3File.py,sha256=KC9c2oS5-JXwTTriUVFdjOvRG0B54Cq9kviSDZY3NI0,4450
149
149
  vast_flatbuf/tabular/VipRange.py,sha256=_BJd1RRZAcK76T9vlsHzXKYVsPVaz6WTEAqStMQCAUQ,2069
150
150
  vast_flatbuf/tabular/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
151
- vastdb/__init__.py,sha256=GY30IfZQApfl7HfcFmfTzFpx48oHgQIrDcUQCiTnxpo,206
152
- vastdb/bucket.py,sha256=5J8KBdRViaz5bZ8WEPggQj7DfJaIhY7CqpoWP6reaDo,2854
151
+ vastdb/__init__.py,sha256=cMJtZuJ0IL9aKyM3DUWqTCzuP1H1MXXVivKKE1-q0DY,292
152
+ vastdb/bucket.py,sha256=xtKs7S4w0jmI4MujDWH3HDI-iEgbq5Xqqsod-tw4zSo,2991
153
153
  vastdb/conftest.py,sha256=pKpo_46Vq4QHzTDQAFxasrVhnZ2V2L-y6IMLxojxaFM,2132
154
- vastdb/errors.py,sha256=wCJp70QyBW8US2KMwhB6e5ZnKRft4GiN8jyJ36f1Yuo,3315
155
- vastdb/internal_commands.py,sha256=rmxOjIq229gsxFFZ4nKXwVIFJcu8WR1DVsE-91w4-BY,101564
156
- vastdb/schema.py,sha256=x9Yn4tFTFkSpJbQqpqlecKUSOK214XsRLdOUrNW0jzM,3192
154
+ vastdb/errors.py,sha256=fxpKSxjEgoJZuBtEGWzTW9lpDlEjuzgpgXwAQc1W6BQ,3436
155
+ vastdb/internal_commands.py,sha256=3F6FiYu-Ama1zBO7hENPxCaQYJT8mcZP6rSQvtI7Sks,101273
156
+ vastdb/schema.py,sha256=MrQr-WIrES8KcQ0V6cJkRRp_-9jj9FboyrBnkNBsw-8,3324
157
157
  vastdb/session.py,sha256=VZOFGZbAdr5Tl4cp88VRQYnR4Q16UNuYjSmX_QPW1II,1718
158
- vastdb/table.py,sha256=eALN5YpUfDFqZNF_lp6lZD5RJkBKqp5Mlc6hpwGI8Rg,20443
159
- vastdb/transaction.py,sha256=2I5k81YvcgDsp07BrAWkmXf41qUP6m88Y40rFfuIKvI,1796
160
- vastdb/util.py,sha256=VR0UJ1D0WUpqS5edG_mkxDZYZJ_qqce8y7iJOvqeyWE,2974
158
+ vastdb/table.py,sha256=bdx3C1iWiFivKmtifH7MyG7TMqnVVIU91as-_hMn1rE,20532
159
+ vastdb/transaction.py,sha256=1uCSHXqWcwsMJv6DuNx4WyQMGUm8P-RCCqYdBdUGusI,2196
160
+ vastdb/util.py,sha256=Tjj6p4gqabK5G21uWuCiuYM9FaaR04_Zk5X8NWtcdj8,3022
161
161
  vastdb/bench/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
162
- vastdb/bench/test_perf.py,sha256=X7BIo60L5Oj7H-56e8pDFtXY9rNLerkywKexXWiqvrY,1111
162
+ vastdb/bench/test_perf.py,sha256=iHE3E60fvyU5SBDHPi4h03Dj6QcY6VI9l9mMhgNMtPc,1117
163
163
  vastdb/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
164
+ vastdb/tests/test_duckdb.py,sha256=KDuv4PrjGEwChCGHG36xNT2JiFlBOt6K3DQ3L06Kq-A,1913
164
165
  vastdb/tests/test_imports.py,sha256=fDUjO5U-5i4QTIMoNnSSW4X_ZnOStLbx0mJkNq2pj9Q,5033
165
166
  vastdb/tests/test_nested.py,sha256=3kejEvtSqV0LrUgb1QglRjrlxnKI4_AXTFw2nE7Q520,951
166
- vastdb/tests/test_projections.py,sha256=0ZiFya6rzGvnKOrdb1xxxv-BEerNmiK_ymfZM6eIvvw,1254
167
- vastdb/tests/test_sanity.py,sha256=kaOmZWDGBc-XhZ8eFQ3sks2Mo9De8q41Z5pqYWzJsHM,2958
168
- vastdb/tests/test_schemas.py,sha256=8ZlEvnU7Fyg-TDQDxD65GAql4rU8R2_SFWVGrdv564o,1721
169
- vastdb/tests/test_tables.py,sha256=o_JPqr2GX1DDpPB4Zq4E1YPFgmlsiXyVe1S3TcCjF-w,26226
170
- vastdb/tests/util.py,sha256=_euE3fKJqgNssT9gVxlcHjdE61mnsNQcwDPzn1tTe9g,597
171
- vastdb-0.1.1.dist-info/LICENSE,sha256=obffan7LYrq7hLHNrY7vHcn2pKUTBUYXMKu-VOAvDxU,11333
172
- vastdb-0.1.1.dist-info/METADATA,sha256=e84OEOXS09DEXniHJAU2aeK80-1h2rIZmYNBCMLa1AM,1331
173
- vastdb-0.1.1.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92
174
- vastdb-0.1.1.dist-info/top_level.txt,sha256=Vsj2MKtlhPg0J4so64slQtnwjhgoPmJgcG-6YcVAwVc,20
175
- vastdb-0.1.1.dist-info/RECORD,,
167
+ vastdb/tests/test_projections.py,sha256=_cDNfD5zTwbCXLk6uGpPUWGN0P-4HElu5OjubWu-Jg0,1255
168
+ vastdb/tests/test_sanity.py,sha256=ixx0QPo73hLHjAa7bByFXjS1XST0WvmSwLEpgnHh_JY,2960
169
+ vastdb/tests/test_schemas.py,sha256=b-JpYHOFYVTdE570_La7O2RWf8BGN-q8KDXNXeC8CSg,1724
170
+ vastdb/tests/test_tables.py,sha256=TXM4LSBvPb3EEu7XScZ5iEiu_zhHClq61W18EQodxw8,25667
171
+ vastdb/tests/util.py,sha256=NaCzKymEGy1xuiyMxyt2_0frKVfVk9iGrFwLf3GHjTI,435
172
+ vastdb-0.1.2.dist-info/LICENSE,sha256=obffan7LYrq7hLHNrY7vHcn2pKUTBUYXMKu-VOAvDxU,11333
173
+ vastdb-0.1.2.dist-info/METADATA,sha256=edJPdDWmHj6tRHRR97eSppfN9_4ARfIr0jS9HMjHfSQ,1311
174
+ vastdb-0.1.2.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
175
+ vastdb-0.1.2.dist-info/top_level.txt,sha256=Vsj2MKtlhPg0J4so64slQtnwjhgoPmJgcG-6YcVAwVc,20
176
+ vastdb-0.1.2.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.37.0)
2
+ Generator: bdist_wheel (0.43.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5