vastdb 0.1.3__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
vastdb/errors.py CHANGED
@@ -71,6 +71,10 @@ class ServiceUnavailable(HttpError):
71
71
  pass
72
72
 
73
73
 
74
+ class Slowdown(ServiceUnavailable):
75
+ pass
76
+
77
+
74
78
  class UnexpectedError(HttpError):
75
79
  pass
76
80
 
@@ -97,6 +101,10 @@ class MissingTransaction(Missing):
97
101
  pass
98
102
 
99
103
 
104
+ class MissingRowIdColumn(Missing):
105
+ pass
106
+
107
+
100
108
  class NotSupported(Exception):
101
109
  pass
102
110
 
@@ -163,6 +171,12 @@ class NotSupportedVersion(NotSupported):
163
171
  version: str
164
172
 
165
173
 
174
+ def handle_unavailable(**kwargs):
175
+ if kwargs['code'] == 'SlowDown':
176
+ raise Slowdown(**kwargs)
177
+ raise ServiceUnavailable(**kwargs)
178
+
179
+
166
180
  ERROR_TYPES_MAP = {
167
181
  HttpStatus.BAD_REQUEST: BadRequest,
168
182
  HttpStatus.FOBIDDEN: Forbidden,
@@ -172,7 +186,7 @@ ERROR_TYPES_MAP = {
172
186
  HttpStatus.CONFLICT: Conflict,
173
187
  HttpStatus.INTERNAL_SERVER_ERROR: InternalServerError,
174
188
  HttpStatus.NOT_IMPLEMENTED: NotImplemented,
175
- HttpStatus.SERVICE_UNAVAILABLE: ServiceUnavailable,
189
+ HttpStatus.SERVICE_UNAVAILABLE: handle_unavailable,
176
190
  }
177
191
 
178
192
 
@@ -205,4 +219,4 @@ def from_response(res: requests.Response):
205
219
  log.warning("RPC failed: %s", kwargs)
206
220
  status = HttpStatus(res.status_code)
207
221
  error_type = ERROR_TYPES_MAP.get(status, UnexpectedError)
208
- return error_type(**kwargs)
222
+ return error_type(**kwargs) # type: ignore
@@ -178,6 +178,7 @@ class Predicate:
178
178
  )
179
179
  from ibis.expr.operations.logical import (
180
180
  And,
181
+ Between,
181
182
  Equals,
182
183
  Greater,
183
184
  GreaterEqual,
@@ -200,6 +201,7 @@ class Predicate:
200
201
  IsNull: self.build_is_null,
201
202
  Not: self.build_is_not_null,
202
203
  StringContains: self.build_match_substring,
204
+ Between: self.build_between,
203
205
  }
204
206
 
205
207
  positions_map = dict((f.name, index) for index, f in enumerate(self.schema)) # TODO: BFS
@@ -237,6 +239,9 @@ class Predicate:
237
239
  raise NotImplementedError(self.expr)
238
240
  column, = not_arg.args
239
241
  literals = (None,)
242
+ elif builder_func == self.build_between:
243
+ column, lower, upper = inner_op.args
244
+ literals = (None,)
240
245
  else:
241
246
  column, arg = inner_op.args
242
247
  if isinstance(arg, tuple):
@@ -262,6 +267,9 @@ class Predicate:
262
267
  args_offsets = [column_offset]
263
268
  if literal is not None:
264
269
  args_offsets.append(self.build_literal(field=field, value=literal.value))
270
+ if builder_func == self.build_between:
271
+ args_offsets.append(self.build_literal(field=field, value=lower.value))
272
+ args_offsets.append(self.build_literal(field=field, value=upper.value))
265
273
 
266
274
  inner_offsets.append(builder_func(*args_offsets))
267
275
 
@@ -556,6 +564,13 @@ class Predicate:
556
564
  def build_match_substring(self, column: int, literal: int):
557
565
  return self.build_function('match_substring', column, literal)
558
566
 
567
+ def build_between(self, column: int, lower: int, upper: int):
568
+ offsets = [
569
+ self.build_greater_equal(column, lower),
570
+ self.build_less_equal(column, upper),
571
+ ]
572
+ return self.build_and(offsets)
573
+
559
574
 
560
575
  class FieldNodesState:
561
576
  def __init__(self) -> None:
@@ -1578,8 +1593,7 @@ class VastdbApi:
1578
1593
  headers['Content-Length'] = str(len(record_batch))
1579
1594
  res = self.session.post(self._api_prefix(bucket=bucket, schema=schema, table=table, command="rows"),
1580
1595
  data=record_batch, headers=headers, stream=True)
1581
- self._check_res(res, "insert_rows", expected_retvals)
1582
- res.raw.read() # flush the response
1596
+ return self._check_res(res, "insert_rows", expected_retvals)
1583
1597
 
1584
1598
  def update_rows(self, bucket, schema, table, record_batch, txid=0, client_tags=[], expected_retvals=[]):
1585
1599
  """
@@ -2107,6 +2121,13 @@ class QueryDataRequest:
2107
2121
  self.response_parser = response_parser
2108
2122
 
2109
2123
 
2124
+ def get_response_schema(schema: 'pa.Schema' = pa.schema([]), field_names: Optional[List[str]] = None):
2125
+ if field_names is None:
2126
+ field_names = [field.name for field in schema]
2127
+
2128
+ return pa.schema([schema.field(name) for name in field_names])
2129
+
2130
+
2110
2131
  def build_query_data_request(schema: 'pa.Schema' = pa.schema([]), predicate: ibis.expr.types.BooleanColumn = None, field_names: Optional[List[str]] = None):
2111
2132
  builder = flatbuffers.Builder(1024)
2112
2133
 
@@ -2127,13 +2148,11 @@ def build_query_data_request(schema: 'pa.Schema' = pa.schema([]), predicate: ibi
2127
2148
  filter_obj = predicate.serialize(builder)
2128
2149
 
2129
2150
  parser = QueryDataParser(schema)
2130
- fields_map = {node.field.name: node.field for node in parser.nodes}
2131
2151
  leaves_map = {node.field.name: [leaf.index for leaf in node._iter_leaves()] for node in parser.nodes}
2132
2152
 
2133
- if field_names is None:
2134
- field_names = [field.name for field in schema]
2153
+ response_schema = get_response_schema(schema, field_names)
2154
+ field_names = [field.name for field in response_schema]
2135
2155
 
2136
- response_schema = pa.schema([fields_map[name] for name in field_names])
2137
2156
  projection_fields = []
2138
2157
  for field_name in field_names:
2139
2158
  # TODO: only root-level projection pushdown is supported (i.e. no support for SELECT s.x FROM t)
vastdb/session.py CHANGED
@@ -26,6 +26,11 @@ class Features:
26
26
  if self.vast_version < (5, 2):
27
27
  raise errors.NotSupportedVersion("import_table requires 5.2+", self.vast_version)
28
28
 
29
+ def check_return_row_ids(self):
30
+ """Check if insert/update/delete can return the row_ids."""
31
+ if self.vast_version < (5, 1):
32
+ raise errors.NotSupportedVersion("return_row_ids requires 5.1+", self.vast_version)
33
+
29
34
 
30
35
  class Session:
31
36
  """VAST database session."""
vastdb/table.py CHANGED
@@ -7,8 +7,9 @@ import queue
7
7
  from dataclasses import dataclass, field
8
8
  from math import ceil
9
9
  from threading import Event
10
- from typing import Dict, List, Optional, Tuple, Union
10
+ from typing import Any, Dict, List, Optional, Tuple, Union
11
11
 
12
+ import backoff
12
13
  import ibis
13
14
  import pyarrow as pa
14
15
 
@@ -18,10 +19,14 @@ log = logging.getLogger(__name__)
18
19
 
19
20
 
20
21
  INTERNAL_ROW_ID = "$row_id"
22
+ INTERNAL_ROW_ID_FIELD = pa.field(INTERNAL_ROW_ID, pa.uint64())
23
+
21
24
  MAX_ROWS_PER_BATCH = 512 * 1024
22
25
  # for insert we need a smaller limit due to response amplification
23
26
  # for example insert of 512k uint8 result in 512k*8bytes response since row_ids are uint64
24
27
  MAX_INSERT_ROWS_PER_PATCH = 512 * 1024
28
+ # in case insert has TooWideRow - need to insert in smaller batches - each cell could contain up to 128K, and our wire is limited to 5MB
29
+ MAX_COLUMN_IN_BATCH = int(5 * 1024 / 128)
25
30
 
26
31
 
27
32
  @dataclass
@@ -46,6 +51,8 @@ class QueryConfig:
46
51
  use_semi_sorted_projections: bool = True
47
52
  rows_per_split: int = 4000000
48
53
  query_id: str = ""
54
+ max_slowdown_retry: int = 10
55
+ backoff_func: Any = field(default=backoff.on_exception(backoff.expo, errors.Slowdown, max_tries=max_slowdown_retry))
49
56
 
50
57
 
51
58
  @dataclass
@@ -72,7 +79,8 @@ class SelectSplitState:
72
79
  Can be called repeatedly, to allow pagination.
73
80
  """
74
81
  while not self.done:
75
- response = api.query_data(
82
+ query_with_backoff = self.config.backoff_func(api.query_data)
83
+ response = query_with_backoff(
76
84
  bucket=self.table.bucket.name,
77
85
  schema=self.table.schema.name,
78
86
  table=self.table.name,
@@ -291,11 +299,17 @@ class Table:
291
299
 
292
300
  query_schema = self.arrow_schema
293
301
  if internal_row_id:
294
- queried_fields = [pa.field(INTERNAL_ROW_ID, pa.uint64())]
302
+ queried_fields = [INTERNAL_ROW_ID_FIELD]
295
303
  queried_fields.extend(column for column in self.arrow_schema)
296
304
  query_schema = pa.schema(queried_fields)
297
305
  columns.append(INTERNAL_ROW_ID)
298
306
 
307
+ if predicate is True:
308
+ predicate = None
309
+ if predicate is False:
310
+ response_schema = internal_commands.get_response_schema(schema=query_schema, field_names=columns)
311
+ return pa.RecordBatchReader.from_batches(response_schema, [])
312
+
299
313
  query_data_request = internal_commands.build_query_data_request(
300
314
  schema=query_schema,
301
315
  predicate=predicate,
@@ -385,27 +399,68 @@ class Table:
385
399
 
386
400
  return pa.RecordBatchReader.from_batches(query_data_request.response_schema, batches_iterator())
387
401
 
388
- def insert(self, rows: pa.RecordBatch) -> pa.RecordBatch:
402
+ def insert_in_column_batches(self, rows: pa.RecordBatch):
403
+ """Split the RecordBatch into max_columns that can be inserted in single RPC.
404
+
405
+ Insert first MAX_COLUMN_IN_BATCH columns and get the row_ids. Then loop on the rest of the columns and
406
+ update in groups of MAX_COLUMN_IN_BATCH.
407
+ """
408
+ column_record_batch = pa.RecordBatch.from_arrays([_combine_chunks(rows.column(i)) for i in range(0, MAX_COLUMN_IN_BATCH)],
409
+ schema=pa.schema([rows.schema.field(i) for i in range(0, MAX_COLUMN_IN_BATCH)]))
410
+ row_ids = self.insert(rows=column_record_batch) # type: ignore
411
+
412
+ columns_names = [field.name for field in rows.schema]
413
+ columns = list(rows.schema)
414
+ arrays = [_combine_chunks(rows.column(i)) for i in range(len(rows.schema))]
415
+ for start in range(MAX_COLUMN_IN_BATCH, len(rows.schema), MAX_COLUMN_IN_BATCH):
416
+ end = start + MAX_COLUMN_IN_BATCH if start + MAX_COLUMN_IN_BATCH < len(rows.schema) else len(rows.schema)
417
+ columns_name_chunk = columns_names[start:end]
418
+ columns_chunks = columns[start:end]
419
+ arrays_chunks = arrays[start:end]
420
+ columns_chunks.append(INTERNAL_ROW_ID_FIELD)
421
+ arrays_chunks.append(row_ids.to_pylist())
422
+ column_record_batch = pa.RecordBatch.from_arrays(arrays_chunks, schema=pa.schema(columns_chunks))
423
+ self.update(rows=column_record_batch, columns=columns_name_chunk)
424
+ return row_ids
425
+
426
+ def insert(self, rows: pa.RecordBatch):
389
427
  """Insert a RecordBatch into this table."""
390
428
  if self._imports_table:
391
429
  raise errors.NotSupportedCommand(self.bucket.name, self.schema.name, self.name)
392
- serialized_slices = util.iter_serialized_slices(rows, MAX_INSERT_ROWS_PER_PATCH)
393
- for slice in serialized_slices:
394
- self.tx._rpc.api.insert_rows(self.bucket.name, self.schema.name, self.name, record_batch=slice,
395
- txid=self.tx.txid)
430
+ try:
431
+ row_ids = []
432
+ serialized_slices = util.iter_serialized_slices(rows, MAX_INSERT_ROWS_PER_PATCH)
433
+ for slice in serialized_slices:
434
+ res = self.tx._rpc.api.insert_rows(self.bucket.name, self.schema.name, self.name, record_batch=slice,
435
+ txid=self.tx.txid)
436
+ (batch,) = pa.RecordBatchStreamReader(res.raw)
437
+ row_ids.append(batch[INTERNAL_ROW_ID])
438
+ try:
439
+ self.tx._rpc.features.check_return_row_ids()
440
+ except errors.NotSupportedVersion:
441
+ return # type: ignore
442
+ return pa.chunked_array(row_ids)
443
+ except errors.TooWideRow:
444
+ self.tx._rpc.features.check_return_row_ids()
445
+ return self.insert_in_column_batches(rows)
396
446
 
397
447
  def update(self, rows: Union[pa.RecordBatch, pa.Table], columns: Optional[List[str]] = None) -> None:
398
448
  """Update a subset of cells in this table.
399
449
 
400
- Row IDs are specified using a special field (named "$row_id" of uint64 type).
450
+ Row IDs are specified using a special field (named "$row_id" of uint64 type) - this function assume that this
451
+ special field is part of arguments.
401
452
 
402
453
  A subset of columns to be updated can be specified via the `columns` argument.
403
454
  """
404
455
  if self._imports_table:
405
456
  raise errors.NotSupportedCommand(self.bucket.name, self.schema.name, self.name)
457
+ try:
458
+ rows_chunk = rows[INTERNAL_ROW_ID]
459
+ except KeyError:
460
+ raise errors.MissingRowIdColumn
406
461
  if columns is not None:
407
462
  update_fields = [(INTERNAL_ROW_ID, pa.uint64())]
408
- update_values = [_combine_chunks(rows[INTERNAL_ROW_ID])]
463
+ update_values = [_combine_chunks(rows_chunk)]
409
464
  for col in columns:
410
465
  update_fields.append(rows.field(col))
411
466
  update_values.append(_combine_chunks(rows[col]))
@@ -424,8 +479,14 @@ class Table:
424
479
 
425
480
  Row IDs are specified using a special field (named "$row_id" of uint64 type).
426
481
  """
482
+ if self._imports_table:
483
+ raise errors.NotSupportedCommand(self.bucket.name, self.schema.name, self.name)
484
+ try:
485
+ rows_chunk = rows[INTERNAL_ROW_ID]
486
+ except KeyError:
487
+ raise errors.MissingRowIdColumn
427
488
  delete_rows_rb = pa.record_batch(schema=pa.schema([(INTERNAL_ROW_ID, pa.uint64())]),
428
- data=[_combine_chunks(rows[INTERNAL_ROW_ID])])
489
+ data=[_combine_chunks(rows_chunk)])
429
490
 
430
491
  serialized_slices = util.iter_serialized_slices(delete_rows_rb, MAX_ROWS_PER_BATCH)
431
492
  for slice in serialized_slices:
@@ -71,6 +71,16 @@ def test_tables(session, clean_bucket_name):
71
71
  }
72
72
 
73
73
 
74
+ def test_insert_wide_row(session, clean_bucket_name):
75
+ columns = pa.schema([pa.field(f's{i}', pa.utf8()) for i in range(500)])
76
+ data = [['a' * 10**4] for i in range(500)]
77
+ expected = pa.table(schema=columns, data=data)
78
+
79
+ with prepare_data(session, clean_bucket_name, 's', 't', expected) as t:
80
+ actual = pa.Table.from_batches(t.select())
81
+ assert actual == expected
82
+
83
+
74
84
  def test_exists(session, clean_bucket_name):
75
85
  with session.transaction() as tx:
76
86
  s = tx.bucket(clean_bucket_name).create_schema('s1')
@@ -265,6 +275,11 @@ def test_filters(session, clean_bucket_name):
265
275
  return pa.Table.from_batches(t.select(predicate=predicate), t.arrow_schema)
266
276
 
267
277
  assert select(None) == expected
278
+ assert select(True) == expected
279
+ assert select(False) == pa.Table.from_batches([], schema=columns)
280
+
281
+ assert select(t['a'].between(222, 444)) == expected.filter((pc.field('a') >= 222) & (pc.field('a') <= 444))
282
+ assert select((t['a'].between(222, 444)) & (t['b'] > 2.5)) == expected.filter((pc.field('a') >= 222) & (pc.field('a') <= 444) & (pc.field('b') > 2.5))
268
283
 
269
284
  assert select(t['a'] > 222) == expected.filter(pc.field('a') > 222)
270
285
  assert select(t['a'] < 222) == expected.filter(pc.field('a') < 222)
@@ -331,7 +346,8 @@ def test_parquet_export(session, clean_bucket_name):
331
346
  ['a', 'b'],
332
347
  ])
333
348
  expected = pa.Table.from_batches([rb])
334
- t.insert(rb)
349
+ rb = t.insert(rb)
350
+ assert rb.to_pylist() == [0, 1]
335
351
  actual = pa.Table.from_batches(t.select())
336
352
  assert actual == expected
337
353
 
vastdb/tests/util.py CHANGED
@@ -9,7 +9,9 @@ def prepare_data(session, clean_bucket_name, schema_name, table_name, arrow_tabl
9
9
  with session.transaction() as tx:
10
10
  s = tx.bucket(clean_bucket_name).create_schema(schema_name)
11
11
  t = s.create_table(table_name, arrow_table.schema)
12
- t.insert(arrow_table)
12
+ row_ids_array = t.insert(arrow_table)
13
+ row_ids = row_ids_array.to_pylist()
14
+ assert row_ids == list(range(arrow_table.num_rows))
13
15
  yield t
14
16
  t.drop()
15
17
  s.drop()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vastdb
3
- Version: 0.1.3
3
+ Version: 0.1.4
4
4
  Summary: VAST Data SDK
5
5
  Home-page: https://github.com/vast-data/vastdb_sdk
6
6
  Author: VAST DATA
@@ -25,6 +25,7 @@ Requires-Dist: ibis-framework ==8.0.0
25
25
  Requires-Dist: pyarrow
26
26
  Requires-Dist: requests
27
27
  Requires-Dist: xmltodict
28
+ Requires-Dist: backoff ==2.2.1
28
29
 
29
30
 
30
31
  `vastdb` is a Python-based SDK designed for interacting
@@ -151,11 +151,11 @@ vast_flatbuf/tabular/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hS
151
151
  vastdb/__init__.py,sha256=cMJtZuJ0IL9aKyM3DUWqTCzuP1H1MXXVivKKE1-q0DY,292
152
152
  vastdb/bucket.py,sha256=4rPEm9qlPTg7ccWO6VGmd4LKb8w-BDhJYwzXGjn03sc,3566
153
153
  vastdb/conftest.py,sha256=pKpo_46Vq4QHzTDQAFxasrVhnZ2V2L-y6IMLxojxaFM,2132
154
- vastdb/errors.py,sha256=vKWoq1yXrHyafMWwJgW_sQkSxQYxlI1JbTVCLz5Xi9Y,3793
155
- vastdb/internal_commands.py,sha256=ZD2YXYvZ3lJWYzZU0oHtv8G3lNtDQUF0e8yg8813Xt4,99575
154
+ vastdb/errors.py,sha256=fj8IlPnGi1lbJWIl1-8MSjLavL9bYQ-YUoboWbXCo54,4047
155
+ vastdb/internal_commands.py,sha256=yS6ylyuJjaAwAm4OqVGX4tq-Un5cvM-LXp7F4eYOUDw,100414
156
156
  vastdb/schema.py,sha256=ql4TPB1W_FQ_BHov3CKHI8JX3krXMlcKWz7dTrjpQ1w,3346
157
- vastdb/session.py,sha256=ciYS8Je2cRpuaAEE6Wjk79VsW0KAPdnRB2cqfxFCjis,2323
158
- vastdb/table.py,sha256=xnSTWUUa0QHzXC5MUQWsGT1fsG8yAgMLy3nrgSH4j5Q,25661
157
+ vastdb/session.py,sha256=2tu5cp7xG28ynyQfEl9_HM2dtNcLM2AoJmm3bfNLC0o,2563
158
+ vastdb/table.py,sha256=apRXCrglg6_glozJXu8D7q6du5seP7NMi42PNjyGcTM,28891
159
159
  vastdb/transaction.py,sha256=g8YTcYnsNPIhB2udbHyT5RIFB5kHnBLJcvV2CWRICwI,2845
160
160
  vastdb/util.py,sha256=rs7nLL2Qz-OVEZDSVIqAvS-uETMq-zxQs5jBksB5-JA,4276
161
161
  vastdb/bench/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -167,11 +167,11 @@ vastdb/tests/test_nested.py,sha256=3kejEvtSqV0LrUgb1QglRjrlxnKI4_AXTFw2nE7Q520,9
167
167
  vastdb/tests/test_projections.py,sha256=_cDNfD5zTwbCXLk6uGpPUWGN0P-4HElu5OjubWu-Jg0,1255
168
168
  vastdb/tests/test_sanity.py,sha256=ixx0QPo73hLHjAa7bByFXjS1XST0WvmSwLEpgnHh_JY,2960
169
169
  vastdb/tests/test_schemas.py,sha256=qoHTLX51D-0S4bMxdCpRh9gaYQd-BkZdT_agGOwFwTM,1739
170
- vastdb/tests/test_tables.py,sha256=joeEQ30TwKBQc-2N_qGIdviZVnQr4rs6thlNsy5s_og,26672
170
+ vastdb/tests/test_tables.py,sha256=pfQx0OZm6oVJj1-CziPWUoEn3l2-OET4Bpl8M9Z4mws,27499
171
171
  vastdb/tests/test_util.py,sha256=owRAU3TCKMq-kz54NRdA5wX2O_bZIHqG5ucUR77jm5k,1046
172
- vastdb/tests/util.py,sha256=NaCzKymEGy1xuiyMxyt2_0frKVfVk9iGrFwLf3GHjTI,435
173
- vastdb-0.1.3.dist-info/LICENSE,sha256=obffan7LYrq7hLHNrY7vHcn2pKUTBUYXMKu-VOAvDxU,11333
174
- vastdb-0.1.3.dist-info/METADATA,sha256=3h3JttUxw9oMMsxV_CVG_LMYwhgegsS9-b4gZkihrM0,1319
175
- vastdb-0.1.3.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
176
- vastdb-0.1.3.dist-info/top_level.txt,sha256=Vsj2MKtlhPg0J4so64slQtnwjhgoPmJgcG-6YcVAwVc,20
177
- vastdb-0.1.3.dist-info/RECORD,,
172
+ vastdb/tests/util.py,sha256=dpRJYbboDnlqL4qIdvScpp8--5fxRUBIcIYitrfcj9o,555
173
+ vastdb-0.1.4.dist-info/LICENSE,sha256=obffan7LYrq7hLHNrY7vHcn2pKUTBUYXMKu-VOAvDxU,11333
174
+ vastdb-0.1.4.dist-info/METADATA,sha256=SyZkyjQSwklzsq3oub8m8w9lY-HuI4XOG72y8trKvf4,1350
175
+ vastdb-0.1.4.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
176
+ vastdb-0.1.4.dist-info/top_level.txt,sha256=Vsj2MKtlhPg0J4so64slQtnwjhgoPmJgcG-6YcVAwVc,20
177
+ vastdb-0.1.4.dist-info/RECORD,,
File without changes