vastdb 0.0.5.3__py3-none-any.whl → 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vast_flatbuf/tabular/GetTableStatsResponse.py +45 -1
- vast_flatbuf/tabular/VipRange.py +56 -0
- vastdb/__init__.py +7 -0
- vastdb/bench/test_perf.py +29 -0
- vastdb/bucket.py +85 -0
- vastdb/{tests/conftest.py → conftest.py} +29 -14
- vastdb/errors.py +175 -0
- vastdb/{api.py → internal_commands.py} +373 -875
- vastdb/schema.py +85 -0
- vastdb/session.py +47 -0
- vastdb/table.py +483 -0
- vastdb/tests/test_imports.py +123 -0
- vastdb/tests/test_nested.py +28 -0
- vastdb/tests/test_projections.py +42 -0
- vastdb/tests/test_sanity.py +34 -15
- vastdb/tests/test_schemas.py +30 -6
- vastdb/tests/test_tables.py +628 -13
- vastdb/tests/util.py +18 -0
- vastdb/transaction.py +54 -0
- vastdb/util.py +11 -10
- vastdb-0.1.1.dist-info/METADATA +38 -0
- {vastdb-0.0.5.3.dist-info → vastdb-0.1.1.dist-info}/RECORD +26 -31
- vast_protobuf/substrait/__init__.py +0 -0
- vast_protobuf/substrait/algebra_pb2.py +0 -1344
- vast_protobuf/substrait/capabilities_pb2.py +0 -46
- vast_protobuf/substrait/ddl_pb2.py +0 -57
- vast_protobuf/substrait/extended_expression_pb2.py +0 -49
- vast_protobuf/substrait/extensions/__init__.py +0 -0
- vast_protobuf/substrait/extensions/extensions_pb2.py +0 -89
- vast_protobuf/substrait/function_pb2.py +0 -168
- vast_protobuf/substrait/parameterized_types_pb2.py +0 -181
- vast_protobuf/substrait/plan_pb2.py +0 -67
- vast_protobuf/substrait/type_expressions_pb2.py +0 -198
- vast_protobuf/substrait/type_pb2.py +0 -350
- vast_protobuf/tabular/__init__.py +0 -0
- vast_protobuf/tabular/rpc_pb2.py +0 -344
- vastdb/bench_scan.py +0 -45
- vastdb/tests/test_create_table_from_parquets.py +0 -50
- vastdb/v2.py +0 -360
- vastdb-0.0.5.3.dist-info/METADATA +0 -47
- {vast_protobuf → vastdb/bench}/__init__.py +0 -0
- {vastdb-0.0.5.3.dist-info → vastdb-0.1.1.dist-info}/LICENSE +0 -0
- {vastdb-0.0.5.3.dist-info → vastdb-0.1.1.dist-info}/WHEEL +0 -0
- {vastdb-0.0.5.3.dist-info → vastdb-0.1.1.dist-info}/top_level.txt +0 -0
|
@@ -45,7 +45,39 @@ class GetTableStatsResponse(object):
|
|
|
45
45
|
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
|
|
46
46
|
return False
|
|
47
47
|
|
|
48
|
-
|
|
48
|
+
# GetTableStatsResponse
|
|
49
|
+
def AddressType(self):
|
|
50
|
+
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
|
|
51
|
+
if o != 0:
|
|
52
|
+
return self._tab.String(o + self._tab.Pos)
|
|
53
|
+
return None
|
|
54
|
+
|
|
55
|
+
# GetTableStatsResponse
|
|
56
|
+
def Vips(self, j):
|
|
57
|
+
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
|
|
58
|
+
if o != 0:
|
|
59
|
+
x = self._tab.Vector(o)
|
|
60
|
+
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
|
|
61
|
+
x = self._tab.Indirect(x)
|
|
62
|
+
from vast_flatbuf.tabular.VipRange import VipRange
|
|
63
|
+
obj = VipRange()
|
|
64
|
+
obj.Init(self._tab.Bytes, x)
|
|
65
|
+
return obj
|
|
66
|
+
return None
|
|
67
|
+
|
|
68
|
+
# GetTableStatsResponse
|
|
69
|
+
def VipsLength(self):
|
|
70
|
+
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
|
|
71
|
+
if o != 0:
|
|
72
|
+
return self._tab.VectorLen(o)
|
|
73
|
+
return 0
|
|
74
|
+
|
|
75
|
+
# GetTableStatsResponse
|
|
76
|
+
def VipsIsNone(self):
|
|
77
|
+
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
|
|
78
|
+
return o == 0
|
|
79
|
+
|
|
80
|
+
def Start(builder): builder.StartObject(5)
|
|
49
81
|
def GetTableStatsResponseStart(builder):
|
|
50
82
|
"""This method is deprecated. Please switch to Start."""
|
|
51
83
|
return Start(builder)
|
|
@@ -61,6 +93,18 @@ def AddIsExternalRowidAlloc(builder, isExternalRowidAlloc): builder.PrependBoolS
|
|
|
61
93
|
def GetTableStatsResponseAddIsExternalRowidAlloc(builder, isExternalRowidAlloc):
|
|
62
94
|
"""This method is deprecated. Please switch to AddIsExternalRowidAlloc."""
|
|
63
95
|
return AddIsExternalRowidAlloc(builder, isExternalRowidAlloc)
|
|
96
|
+
def AddAddressType(builder, addressType): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(addressType), 0)
|
|
97
|
+
def GetTableStatsResponseAddAddressType(builder, addressType):
|
|
98
|
+
"""This method is deprecated. Please switch to AddAddressType."""
|
|
99
|
+
return AddAddressType(builder, addressType)
|
|
100
|
+
def AddVips(builder, vips): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(vips), 0)
|
|
101
|
+
def GetTableStatsResponseAddVips(builder, vips):
|
|
102
|
+
"""This method is deprecated. Please switch to AddVips."""
|
|
103
|
+
return AddVips(builder, vips)
|
|
104
|
+
def StartVipsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
|
|
105
|
+
def GetTableStatsResponseStartVipsVector(builder, numElems):
|
|
106
|
+
"""This method is deprecated. Please switch to Start."""
|
|
107
|
+
return StartVipsVector(builder, numElems)
|
|
64
108
|
def End(builder): return builder.EndObject()
|
|
65
109
|
def GetTableStatsResponseEnd(builder):
|
|
66
110
|
"""This method is deprecated. Please switch to End."""
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
# automatically generated by the FlatBuffers compiler, do not modify
|
|
2
|
+
|
|
3
|
+
# namespace: tabular
|
|
4
|
+
|
|
5
|
+
import flatbuffers
|
|
6
|
+
from flatbuffers.compat import import_numpy
|
|
7
|
+
np = import_numpy()
|
|
8
|
+
|
|
9
|
+
class VipRange(object):
|
|
10
|
+
__slots__ = ['_tab']
|
|
11
|
+
|
|
12
|
+
@classmethod
|
|
13
|
+
def GetRootAs(cls, buf, offset=0):
|
|
14
|
+
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
|
|
15
|
+
x = VipRange()
|
|
16
|
+
x.Init(buf, n + offset)
|
|
17
|
+
return x
|
|
18
|
+
|
|
19
|
+
@classmethod
|
|
20
|
+
def GetRootAsVipRange(cls, buf, offset=0):
|
|
21
|
+
"""This method is deprecated. Please switch to GetRootAs."""
|
|
22
|
+
return cls.GetRootAs(buf, offset)
|
|
23
|
+
# VipRange
|
|
24
|
+
def Init(self, buf, pos):
|
|
25
|
+
self._tab = flatbuffers.table.Table(buf, pos)
|
|
26
|
+
|
|
27
|
+
# VipRange
|
|
28
|
+
def StartAddress(self):
|
|
29
|
+
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
|
|
30
|
+
if o != 0:
|
|
31
|
+
return self._tab.String(o + self._tab.Pos)
|
|
32
|
+
return None
|
|
33
|
+
|
|
34
|
+
# VipRange
|
|
35
|
+
def AddressCount(self):
|
|
36
|
+
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
|
|
37
|
+
if o != 0:
|
|
38
|
+
return self._tab.Get(flatbuffers.number_types.Uint16Flags, o + self._tab.Pos)
|
|
39
|
+
return 0
|
|
40
|
+
|
|
41
|
+
def Start(builder): builder.StartObject(2)
|
|
42
|
+
def VipRangeStart(builder):
|
|
43
|
+
"""This method is deprecated. Please switch to Start."""
|
|
44
|
+
return Start(builder)
|
|
45
|
+
def AddStartAddress(builder, startAddress): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(startAddress), 0)
|
|
46
|
+
def VipRangeAddStartAddress(builder, startAddress):
|
|
47
|
+
"""This method is deprecated. Please switch to AddStartAddress."""
|
|
48
|
+
return AddStartAddress(builder, startAddress)
|
|
49
|
+
def AddAddressCount(builder, addressCount): builder.PrependUint16Slot(1, addressCount, 0)
|
|
50
|
+
def VipRangeAddAddressCount(builder, addressCount):
|
|
51
|
+
"""This method is deprecated. Please switch to AddAddressCount."""
|
|
52
|
+
return AddAddressCount(builder, addressCount)
|
|
53
|
+
def End(builder): return builder.EndObject()
|
|
54
|
+
def VipRangeEnd(builder):
|
|
55
|
+
"""This method is deprecated. Please switch to End."""
|
|
56
|
+
return End(builder)
|
vastdb/__init__.py
CHANGED
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import time
|
|
3
|
+
|
|
4
|
+
import pyarrow as pa
|
|
5
|
+
import pytest
|
|
6
|
+
|
|
7
|
+
from vastdb import util
|
|
8
|
+
from vastdb.table import ImportConfig, QueryConfig
|
|
9
|
+
|
|
10
|
+
log = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@pytest.mark.benchmark
|
|
14
|
+
def test_bench(session, clean_bucket_name, parquets_path, crater_path):
|
|
15
|
+
files = [str(parquets_path/f) for f in (parquets_path.glob('**/*.pq'))]
|
|
16
|
+
|
|
17
|
+
with session.transaction() as tx:
|
|
18
|
+
b = tx.bucket(clean_bucket_name)
|
|
19
|
+
s = b.create_schema('s1')
|
|
20
|
+
t = util.create_table_from_files(s, 't1', files, config=ImportConfig(import_concurrency=8))
|
|
21
|
+
config = QueryConfig(num_splits=8, num_sub_splits=4)
|
|
22
|
+
s = time.time()
|
|
23
|
+
pa_table = pa.Table.from_batches(t.select(columns=['sid'], predicate=t['sid'] == 10033007, config=config))
|
|
24
|
+
e = time.time()
|
|
25
|
+
log.info("'SELECT sid from TABLE WHERE sid = 10033007' returned in %s seconds.", e-s)
|
|
26
|
+
if crater_path:
|
|
27
|
+
with open(f'{crater_path}/bench_results', 'a') as f:
|
|
28
|
+
f.write(f"'SELECT sid FROM TABLE WHERE sid = 10033007' returned in {e-s} seconds")
|
|
29
|
+
assert pa_table.num_rows == 255_075
|
vastdb/bucket.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
"""VAST Database bucket.
|
|
2
|
+
|
|
3
|
+
VAST S3 buckets can be used to create Database schemas and tables.
|
|
4
|
+
It is possible to list and access VAST snapshots generated over a bucket.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
from dataclasses import dataclass
|
|
9
|
+
|
|
10
|
+
from . import errors, schema, transaction
|
|
11
|
+
|
|
12
|
+
log = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class Snapshot:
|
|
17
|
+
"""VAST bucket-level snapshot."""
|
|
18
|
+
|
|
19
|
+
name: str
|
|
20
|
+
bucket: "Bucket"
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass
|
|
24
|
+
class Bucket:
|
|
25
|
+
"""VAST bucket."""
|
|
26
|
+
|
|
27
|
+
name: str
|
|
28
|
+
tx: "transaction.Transaction"
|
|
29
|
+
|
|
30
|
+
def create_schema(self, path: str, fail_if_exists=True) -> "schema.Schema":
|
|
31
|
+
"""Create a new schema (a container of tables) under this bucket."""
|
|
32
|
+
if current := self.schema(path, fail_if_missing=False):
|
|
33
|
+
if fail_if_exists:
|
|
34
|
+
raise errors.SchemaExists(self.name, path)
|
|
35
|
+
else:
|
|
36
|
+
return current
|
|
37
|
+
self.tx._rpc.api.create_schema(self.name, path, txid=self.tx.txid)
|
|
38
|
+
log.info("Created schema: %s", path)
|
|
39
|
+
return self.schema(path)
|
|
40
|
+
|
|
41
|
+
def schema(self, path: str, fail_if_missing=True) -> "schema.Schema":
|
|
42
|
+
"""Get a specific schema (a container of tables) under this bucket."""
|
|
43
|
+
s = self.schemas(path)
|
|
44
|
+
log.debug("schema: %s", s)
|
|
45
|
+
if not s:
|
|
46
|
+
if fail_if_missing:
|
|
47
|
+
raise errors.MissingSchema(self.name, path)
|
|
48
|
+
else:
|
|
49
|
+
return None
|
|
50
|
+
assert len(s) == 1, f"Expected to receive only a single schema, but got: {len(s)}. ({s})"
|
|
51
|
+
log.debug("Found schema: %s", s[0].name)
|
|
52
|
+
return s[0]
|
|
53
|
+
|
|
54
|
+
def schemas(self, name: str = None) -> ["schema.Schema"]:
|
|
55
|
+
"""List bucket's schemas."""
|
|
56
|
+
schemas = []
|
|
57
|
+
next_key = 0
|
|
58
|
+
exact_match = bool(name)
|
|
59
|
+
log.debug("list schemas param: schema=%s, exact_match=%s", name, exact_match)
|
|
60
|
+
while True:
|
|
61
|
+
bucket_name, curr_schemas, next_key, is_truncated, _ = \
|
|
62
|
+
self.tx._rpc.api.list_schemas(bucket=self.name, next_key=next_key, txid=self.tx.txid,
|
|
63
|
+
name_prefix=name, exact_match=exact_match)
|
|
64
|
+
if not curr_schemas:
|
|
65
|
+
break
|
|
66
|
+
schemas.extend(curr_schemas)
|
|
67
|
+
if not is_truncated:
|
|
68
|
+
break
|
|
69
|
+
|
|
70
|
+
return [schema.Schema(name=name, bucket=self) for name, *_ in schemas]
|
|
71
|
+
|
|
72
|
+
def snapshots(self) -> [Snapshot]:
|
|
73
|
+
"""List bucket's snapshots."""
|
|
74
|
+
snapshots = []
|
|
75
|
+
next_key = 0
|
|
76
|
+
while True:
|
|
77
|
+
curr_snapshots, is_truncated, next_key = \
|
|
78
|
+
self.tx._rpc.api.list_snapshots(bucket=self.name, next_token=next_key)
|
|
79
|
+
if not curr_snapshots:
|
|
80
|
+
break
|
|
81
|
+
snapshots.extend(curr_snapshots)
|
|
82
|
+
if not is_truncated:
|
|
83
|
+
break
|
|
84
|
+
|
|
85
|
+
return [Snapshot(name=snapshot, bucket=self) for snapshot in snapshots]
|
|
@@ -1,33 +1,38 @@
|
|
|
1
|
-
import
|
|
1
|
+
import os
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
|
|
2
4
|
import boto3
|
|
5
|
+
import pytest
|
|
3
6
|
|
|
4
|
-
|
|
7
|
+
import vastdb
|
|
5
8
|
|
|
6
9
|
|
|
7
10
|
def pytest_addoption(parser):
|
|
8
|
-
parser.addoption("--tabular-bucket-name", help="Name of the S3 bucket with Tabular enabled")
|
|
9
|
-
parser.addoption("--tabular-access-key", help="Access key with Tabular permissions")
|
|
10
|
-
parser.addoption("--tabular-secret-key", help="Secret key with Tabular permissions")
|
|
11
|
-
parser.addoption("--tabular-endpoint-url", help="Tabular server endpoint")
|
|
11
|
+
parser.addoption("--tabular-bucket-name", help="Name of the S3 bucket with Tabular enabled", default="vastdb")
|
|
12
|
+
parser.addoption("--tabular-access-key", help="Access key with Tabular permissions (AWS_ACCESS_KEY_ID)", default=os.environ.get("AWS_ACCESS_KEY_ID", None))
|
|
13
|
+
parser.addoption("--tabular-secret-key", help="Secret key with Tabular permissions (AWS_SECRET_ACCESS_KEY)", default=os.environ.get("AWS_SECRET_ACCESS_KEY", None))
|
|
14
|
+
parser.addoption("--tabular-endpoint-url", help="Tabular server endpoint", default="http://localhost:9090")
|
|
15
|
+
parser.addoption("--data-path", help="Data files location", default=None)
|
|
16
|
+
parser.addoption("--crater-path", help="Save benchmark results in a dedicated location", default=None)
|
|
12
17
|
|
|
13
18
|
|
|
14
|
-
@pytest.fixture(scope="
|
|
15
|
-
def
|
|
16
|
-
return
|
|
19
|
+
@pytest.fixture(scope="session")
|
|
20
|
+
def session(request):
|
|
21
|
+
return vastdb.connect(
|
|
17
22
|
access=request.config.getoption("--tabular-access-key"),
|
|
18
23
|
secret=request.config.getoption("--tabular-secret-key"),
|
|
19
24
|
endpoint=request.config.getoption("--tabular-endpoint-url"),
|
|
20
25
|
)
|
|
21
26
|
|
|
22
27
|
|
|
23
|
-
@pytest.fixture(scope="
|
|
28
|
+
@pytest.fixture(scope="session")
|
|
24
29
|
def test_bucket_name(request):
|
|
25
30
|
return request.config.getoption("--tabular-bucket-name")
|
|
26
31
|
|
|
27
32
|
|
|
28
|
-
@pytest.fixture(scope="
|
|
29
|
-
def clean_bucket_name(request, test_bucket_name,
|
|
30
|
-
with
|
|
33
|
+
@pytest.fixture(scope="function")
|
|
34
|
+
def clean_bucket_name(request, test_bucket_name, session):
|
|
35
|
+
with session.transaction() as tx:
|
|
31
36
|
b = tx.bucket(test_bucket_name)
|
|
32
37
|
for s in b.schemas():
|
|
33
38
|
for t in s.tables():
|
|
@@ -36,10 +41,20 @@ def clean_bucket_name(request, test_bucket_name, rpc):
|
|
|
36
41
|
return test_bucket_name
|
|
37
42
|
|
|
38
43
|
|
|
39
|
-
@pytest.fixture(scope="
|
|
44
|
+
@pytest.fixture(scope="session")
|
|
40
45
|
def s3(request):
|
|
41
46
|
return boto3.client(
|
|
42
47
|
's3',
|
|
43
48
|
aws_access_key_id=request.config.getoption("--tabular-access-key"),
|
|
44
49
|
aws_secret_access_key=request.config.getoption("--tabular-secret-key"),
|
|
45
50
|
endpoint_url=request.config.getoption("--tabular-endpoint-url"))
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
@pytest.fixture(scope="function")
|
|
54
|
+
def parquets_path(request):
|
|
55
|
+
return Path(request.config.getoption("--data-path"))
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
@pytest.fixture(scope="function")
|
|
59
|
+
def crater_path(request):
|
|
60
|
+
return request.config.getoption("--crater-path")
|
vastdb/errors.py
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import xml.etree.ElementTree
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from enum import Enum
|
|
5
|
+
|
|
6
|
+
import requests
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class HttpStatus(Enum):
|
|
10
|
+
SUCCESS = 200
|
|
11
|
+
BAD_REQUEST = 400
|
|
12
|
+
FOBIDDEN = 403
|
|
13
|
+
NOT_FOUND = 404
|
|
14
|
+
METHOD_NOT_ALLOWED = 405
|
|
15
|
+
REQUEST_TIMEOUT = 408
|
|
16
|
+
CONFLICT = 409
|
|
17
|
+
INTERNAL_SERVER_ERROR = 500
|
|
18
|
+
NOT_IMPLEMENTED = 501
|
|
19
|
+
SERVICE_UNAVAILABLE = 503
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
log = logging.getLogger(__name__)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@dataclass
|
|
26
|
+
class HttpError(Exception):
|
|
27
|
+
code: str
|
|
28
|
+
message: str
|
|
29
|
+
url: str
|
|
30
|
+
status: int # HTTP status
|
|
31
|
+
headers: requests.structures.CaseInsensitiveDict # HTTP response headers
|
|
32
|
+
|
|
33
|
+
def __post_init__(self):
|
|
34
|
+
self.args = [vars(self)]
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class NotFound(HttpError):
|
|
38
|
+
pass
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class Forbidden(HttpError):
|
|
42
|
+
pass
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class BadRequest(HttpError):
|
|
46
|
+
pass
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class MethodNotAllowed(HttpError):
|
|
50
|
+
pass
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
class RequestTimeout(HttpError):
|
|
54
|
+
pass
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class Conflict(HttpError):
|
|
58
|
+
pass
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class InternalServerError(HttpError):
|
|
62
|
+
pass
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class NotImplemented(HttpError):
|
|
66
|
+
pass
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
class ServiceUnavailable(HttpError):
|
|
70
|
+
pass
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
class UnexpectedError(HttpError):
|
|
74
|
+
pass
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
@dataclass
|
|
78
|
+
class ImportFilesError(Exception):
|
|
79
|
+
message: str
|
|
80
|
+
error_dict: dict
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
class InvalidArgument(Exception):
|
|
84
|
+
pass
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class Missing(Exception):
|
|
88
|
+
pass
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
@dataclass
|
|
92
|
+
class MissingBucket(Missing):
|
|
93
|
+
bucket: str
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
@dataclass
|
|
97
|
+
class MissingSchema(Missing):
|
|
98
|
+
bucket: str
|
|
99
|
+
schema: str
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
@dataclass
|
|
103
|
+
class MissingTable(Missing):
|
|
104
|
+
bucket: str
|
|
105
|
+
schema: str
|
|
106
|
+
table: str
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
@dataclass
|
|
110
|
+
class MissingProjection(Missing):
|
|
111
|
+
bucket: str
|
|
112
|
+
schema: str
|
|
113
|
+
table: str
|
|
114
|
+
projection: str
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
class Exists(Exception):
|
|
118
|
+
pass
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
@dataclass
|
|
122
|
+
class SchemaExists(Exists):
|
|
123
|
+
bucket: str
|
|
124
|
+
schema: str
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
@dataclass
|
|
128
|
+
class TableExists(Exists):
|
|
129
|
+
bucket: str
|
|
130
|
+
schema: str
|
|
131
|
+
table: str
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
ERROR_TYPES_MAP = {
|
|
135
|
+
HttpStatus.BAD_REQUEST: BadRequest,
|
|
136
|
+
HttpStatus.FOBIDDEN: Forbidden,
|
|
137
|
+
HttpStatus.NOT_FOUND: NotFound,
|
|
138
|
+
HttpStatus.METHOD_NOT_ALLOWED: MethodNotAllowed,
|
|
139
|
+
HttpStatus.REQUEST_TIMEOUT: RequestTimeout,
|
|
140
|
+
HttpStatus.CONFLICT: Conflict,
|
|
141
|
+
HttpStatus.INTERNAL_SERVER_ERROR: InternalServerError,
|
|
142
|
+
HttpStatus.NOT_IMPLEMENTED: NotImplemented,
|
|
143
|
+
HttpStatus.SERVICE_UNAVAILABLE: ServiceUnavailable,
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
def from_response(res: requests.Response):
|
|
148
|
+
if res.status_code == HttpStatus.SUCCESS.value:
|
|
149
|
+
return None
|
|
150
|
+
|
|
151
|
+
log.debug("response: url='%s', code=%s, headers=%s, body='%s'", res.request.url, res.status_code, res.headers, res.text)
|
|
152
|
+
# try to parse S3 XML response for the error details:
|
|
153
|
+
code = None
|
|
154
|
+
message = None
|
|
155
|
+
if res.text:
|
|
156
|
+
try:
|
|
157
|
+
root = xml.etree.ElementTree.fromstring(res.text)
|
|
158
|
+
code = root.find('Code')
|
|
159
|
+
code = code.text if code is not None else None
|
|
160
|
+
message = root.find('Message')
|
|
161
|
+
message = message.text if message is not None else None
|
|
162
|
+
except xml.etree.ElementTree.ParseError:
|
|
163
|
+
log.debug("invalid XML: %r", res.text)
|
|
164
|
+
|
|
165
|
+
kwargs = dict(
|
|
166
|
+
code=code,
|
|
167
|
+
message=message,
|
|
168
|
+
url=res.request.url,
|
|
169
|
+
status=res.status_code,
|
|
170
|
+
headers=res.headers,
|
|
171
|
+
)
|
|
172
|
+
log.warning("RPC failed: %s", kwargs)
|
|
173
|
+
status = HttpStatus(res.status_code)
|
|
174
|
+
error_type = ERROR_TYPES_MAP.get(status, UnexpectedError)
|
|
175
|
+
raise error_type(**kwargs)
|