clickhouse-driver 0.2.4__cp38-cp38-musllinux_1_1_i686.whl → 0.2.8__cp38-cp38-musllinux_1_1_i686.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. clickhouse_driver/__init__.py +1 -1
  2. clickhouse_driver/block.py +3 -2
  3. clickhouse_driver/bufferedreader.cpython-38-i386-linux-gnu.so +0 -0
  4. clickhouse_driver/bufferedwriter.cpython-38-i386-linux-gnu.so +0 -0
  5. clickhouse_driver/client.py +209 -19
  6. clickhouse_driver/clientinfo.py +2 -2
  7. clickhouse_driver/columns/arraycolumn.py +16 -7
  8. clickhouse_driver/columns/base.py +71 -7
  9. clickhouse_driver/columns/datecolumn.py +52 -13
  10. clickhouse_driver/columns/jsoncolumn.py +37 -0
  11. clickhouse_driver/columns/largeint.cpython-38-i386-linux-gnu.so +0 -0
  12. clickhouse_driver/columns/lowcardinalitycolumn.py +23 -4
  13. clickhouse_driver/columns/mapcolumn.py +15 -2
  14. clickhouse_driver/columns/nestedcolumn.py +2 -13
  15. clickhouse_driver/columns/numpy/boolcolumn.py +8 -0
  16. clickhouse_driver/columns/numpy/datetimecolumn.py +18 -18
  17. clickhouse_driver/columns/numpy/lowcardinalitycolumn.py +2 -2
  18. clickhouse_driver/columns/numpy/service.py +3 -1
  19. clickhouse_driver/columns/service.py +12 -2
  20. clickhouse_driver/columns/tuplecolumn.py +31 -5
  21. clickhouse_driver/columns/uuidcolumn.py +1 -1
  22. clickhouse_driver/connection.py +123 -17
  23. clickhouse_driver/defines.py +9 -1
  24. clickhouse_driver/log.py +7 -3
  25. clickhouse_driver/progress.py +8 -2
  26. clickhouse_driver/settings/writer.py +7 -2
  27. clickhouse_driver/streams/native.py +18 -6
  28. clickhouse_driver/util/compat.py +12 -0
  29. clickhouse_driver/util/escape.py +35 -7
  30. clickhouse_driver/varint.cpython-38-i386-linux-gnu.so +0 -0
  31. {clickhouse_driver-0.2.4.dist-info → clickhouse_driver-0.2.8.dist-info}/METADATA +8 -13
  32. {clickhouse_driver-0.2.4.dist-info → clickhouse_driver-0.2.8.dist-info}/RECORD +71 -69
  33. {clickhouse_driver-0.2.4.dist-info → clickhouse_driver-0.2.8.dist-info}/WHEEL +1 -1
  34. {clickhouse_driver-0.2.4.dist-info → clickhouse_driver-0.2.8.dist-info}/LICENSE +0 -0
  35. {clickhouse_driver-0.2.4.dist-info → clickhouse_driver-0.2.8.dist-info}/top_level.txt +0 -0
@@ -1,3 +1,4 @@
1
+ from os import getenv
1
2
  from datetime import date, timedelta
2
3
 
3
4
  from .base import FormatColumn
@@ -6,8 +7,54 @@ from .base import FormatColumn
6
7
  epoch_start = date(1970, 1, 1)
7
8
  epoch_end = date(2149, 6, 6)
8
9
 
9
- epoch_start_date32 = date(1925, 1, 1)
10
- epoch_end_date32 = date(2283, 11, 11)
10
+ epoch_start_date32 = date(1900, 1, 1)
11
+ epoch_end_date32 = date(2299, 12, 31)
12
+
13
+
14
+ class LazyLUT(dict):
15
+ def __init__(self, *args, _factory, **kwargs):
16
+ super().__init__(*args, **kwargs)
17
+ self._default_factory = _factory
18
+
19
+ def __missing__(self, key):
20
+ return self.setdefault(key, self._default_factory(key))
21
+
22
+
23
+ def make_date_lut_range(date_start, date_end):
24
+ return range(
25
+ (date_start - epoch_start).days,
26
+ (date_end - epoch_start).days + 1,
27
+ )
28
+
29
+
30
+ enable_lazy_date_lut = getenv('CLICKHOUSE_DRIVER_LASY_DATE_LUT', False)
31
+ if enable_lazy_date_lut:
32
+ try:
33
+ start, end = enable_lazy_date_lut.split(':')
34
+ start_date = date.fromisoformat(start)
35
+ end_date = date.fromisoformat(end)
36
+
37
+ date_range = make_date_lut_range(start_date, end_date)
38
+ except ValueError:
39
+ date_range = ()
40
+
41
+ # Since we initialize lazy lut with some initially warmed values,
42
+ # we use iterator and not dict comprehension for memory & time optimization
43
+ _date_lut = LazyLUT(
44
+ ((x, epoch_start + timedelta(days=x)) for x in date_range),
45
+ _factory=lambda x: epoch_start + timedelta(days=x),
46
+ )
47
+ _date_lut_reverse = LazyLUT(
48
+ ((value, key) for key, value in _date_lut.items()),
49
+ _factory=lambda x: (x - epoch_start).days,
50
+ )
51
+ else:
52
+ # If lazy lut is not enabled, we fallback to static dict initialization
53
+ # In both cases, we use same lut for both data types,
54
+ # since one encompasses the other and we can avoid duplicating overlap
55
+ date_range = make_date_lut_range(epoch_start_date32, epoch_end_date32)
56
+ _date_lut = {x: epoch_start + timedelta(days=x) for x in date_range}
57
+ _date_lut_reverse = {value: key for key, value in _date_lut.items()}
11
58
 
12
59
 
13
60
  class DateColumn(FormatColumn):
@@ -18,9 +65,8 @@ class DateColumn(FormatColumn):
18
65
  min_value = epoch_start
19
66
  max_value = epoch_end
20
67
 
21
- date_lut_days = (epoch_end - epoch_start).days + 1
22
- date_lut = {x: epoch_start + timedelta(x) for x in range(date_lut_days)}
23
- date_lut_reverse = {value: key for key, value in date_lut.items()}
68
+ date_lut = _date_lut
69
+ date_lut_reverse = _date_lut_reverse
24
70
 
25
71
  def before_write_items(self, items, nulls_map=None):
26
72
  null_value = self.null_value
@@ -34,7 +80,7 @@ class DateColumn(FormatColumn):
34
80
  items[i] = null_value
35
81
  continue
36
82
 
37
- if type(item) != date:
83
+ if item is not date:
38
84
  item = date(item.year, item.month, item.day)
39
85
 
40
86
  if min_value <= item <= max_value:
@@ -60,10 +106,3 @@ class Date32Column(DateColumn):
60
106
 
61
107
  min_value = epoch_start_date32
62
108
  max_value = epoch_end_date32
63
-
64
- date_lut_days = (epoch_end_date32 - epoch_start).days + 1
65
- date_lut = {
66
- x: epoch_start + timedelta(x)
67
- for x in range((epoch_start_date32 - epoch_start).days, date_lut_days)
68
- }
69
- date_lut_reverse = {value: key for key, value in date_lut.items()}
@@ -0,0 +1,37 @@
1
+ from .base import Column
2
+ from .stringcolumn import String
3
+ from ..reader import read_binary_uint8, read_binary_str
4
+ from ..util.compat import json
5
+ from ..writer import write_binary_uint8
6
+
7
+
8
+ class JsonColumn(Column):
9
+ py_types = (dict, )
10
+
11
+ # No NULL value actually
12
+ null_value = {}
13
+
14
+ def __init__(self, column_by_spec_getter, **kwargs):
15
+ self.column_by_spec_getter = column_by_spec_getter
16
+ self.string_column = String(**kwargs)
17
+ super(JsonColumn, self).__init__(**kwargs)
18
+
19
+ def write_state_prefix(self, buf):
20
+ # Read in binary format.
21
+ # Write in text format.
22
+ write_binary_uint8(1, buf)
23
+
24
+ def read_items(self, n_items, buf):
25
+ read_binary_uint8(buf)
26
+ spec = read_binary_str(buf)
27
+ col = self.column_by_spec_getter(spec)
28
+ col.read_state_prefix(buf)
29
+ return col.read_data(n_items, buf)
30
+
31
+ def write_items(self, items, buf):
32
+ items = [x if isinstance(x, str) else json.dumps(x) for x in items]
33
+ self.string_column.write_items(items, buf)
34
+
35
+
36
+ def create_json_column(spec, column_by_spec_getter, column_options):
37
+ return JsonColumn(column_by_spec_getter, **column_options)
@@ -35,25 +35,32 @@ class LowCardinalityColumn(Column):
35
35
  serialization_type = has_additional_keys_bit | need_update_dictionary
36
36
 
37
37
  def __init__(self, nested_column, **kwargs):
38
+ self.init_kwargs = kwargs
38
39
  self.nested_column = nested_column
39
40
  super(LowCardinalityColumn, self).__init__(**kwargs)
40
41
 
41
42
  def read_state_prefix(self, buf):
42
- return read_binary_uint64(buf)
43
+ super(LowCardinalityColumn, self).read_state_prefix(buf)
44
+
45
+ read_binary_uint64(buf)
43
46
 
44
47
  def write_state_prefix(self, buf):
48
+ super(LowCardinalityColumn, self).write_state_prefix(buf)
49
+
45
50
  # KeysSerializationVersion. See ClickHouse docs.
46
51
  write_binary_int64(1, buf)
47
52
 
48
53
  def _write_data(self, items, buf):
49
54
  index, keys = [], []
50
55
  key_by_index_element = {}
56
+ nested_is_nullable = False
51
57
 
52
58
  if self.nested_column.nullable:
53
59
  # First element represents NULL if column is nullable.
54
60
  index.append(self.nested_column.null_value)
55
61
  # Prevent null map writing. Reset nested column nullable flag.
56
62
  self.nested_column.nullable = False
63
+ nested_is_nullable = True
57
64
 
58
65
  for x in items:
59
66
  if x is None:
@@ -87,14 +94,26 @@ class LowCardinalityColumn(Column):
87
94
  return
88
95
 
89
96
  int_type = int(log(len(index), 2) / 8)
90
- int_column = self.int_types[int_type]()
97
+ int_column = self.int_types[int_type](**self.init_kwargs)
91
98
 
92
99
  serialization_type = self.serialization_type | int_type
93
100
 
94
101
  write_binary_int64(serialization_type, buf)
95
102
  write_binary_int64(len(index), buf)
96
103
 
97
- self.nested_column.write_data(index, buf)
104
+ if nested_is_nullable:
105
+ # Given we reset nested column nullable flag above,
106
+ # we need to write null map manually. If to invoke
107
+ # write_data method, it will cause an exception,
108
+ # because `prepare_data` may not be able to handle
109
+ # null value correctly.
110
+ self.nested_column.write_items(
111
+ [self.nested_column.null_value], buf)
112
+ # Remove null map from index, because it is already written.
113
+ index_to_write = index[1:]
114
+ self.nested_column.write_data(index_to_write, buf)
115
+ else:
116
+ self.nested_column.write_data(index, buf)
98
117
  write_binary_int64(len(items), buf)
99
118
  int_column.write_items(keys, buf)
100
119
 
@@ -106,7 +125,7 @@ class LowCardinalityColumn(Column):
106
125
 
107
126
  # Lowest byte contains info about key type.
108
127
  key_type = serialization_type & 0xf
109
- keys_column = self.int_types[key_type]()
128
+ keys_column = self.int_types[key_type](**self.init_kwargs)
110
129
 
111
130
  nullable = self.nested_column.nullable
112
131
  # Prevent null map reading. Reset nested column nullable flag.
@@ -1,28 +1,39 @@
1
+ import re
1
2
  from .base import Column
2
3
  from .intcolumn import UInt64Column
3
4
  from ..util.helpers import pairwise
4
5
 
5
6
 
7
+ comma_re = re.compile(r',(?![^()]*\))')
8
+
9
+
6
10
  class MapColumn(Column):
7
11
  py_types = (dict, )
8
12
 
9
13
  null_value = {}
10
14
 
11
15
  def __init__(self, key_column, value_column, **kwargs):
12
- self.offset_column = UInt64Column()
16
+ self.offset_column = UInt64Column(**kwargs)
13
17
  self.key_column = key_column
14
18
  self.value_column = value_column
15
19
  super(MapColumn, self).__init__(**kwargs)
16
20
 
17
21
  def read_state_prefix(self, buf):
22
+ super(MapColumn, self).read_state_prefix(buf)
23
+
18
24
  self.key_column.read_state_prefix(buf)
19
25
  self.value_column.read_state_prefix(buf)
20
26
 
21
27
  def write_state_prefix(self, buf):
28
+ super(MapColumn, self).write_state_prefix(buf)
29
+
22
30
  self.key_column.write_state_prefix(buf)
23
31
  self.value_column.write_state_prefix(buf)
24
32
 
25
33
  def read_items(self, n_items, buf):
34
+ if not n_items:
35
+ return [{}]
36
+
26
37
  offsets = list(self.offset_column.read_items(n_items, buf))
27
38
  last_offset = offsets[-1]
28
39
  keys = self.key_column.read_data(last_offset, buf)
@@ -53,7 +64,9 @@ class MapColumn(Column):
53
64
 
54
65
 
55
66
  def create_map_column(spec, column_by_spec_getter, column_options):
56
- key, value = spec[4:-1].split(',')
67
+ # Match commas outside of parentheses, so we don't match the comma in
68
+ # Decimal types.
69
+ key, value = comma_re.split(spec[4:-1])
57
70
  key_column = column_by_spec_getter(key.strip())
58
71
  value_column = column_by_spec_getter(value.strip())
59
72
 
@@ -1,21 +1,10 @@
1
1
 
2
2
  from .arraycolumn import create_array_column
3
- from .util import get_inner_spec, get_inner_columns, \
4
- get_inner_columns_with_types
3
+ from .util import get_inner_spec
5
4
 
6
5
 
7
6
  def create_nested_column(spec, column_by_spec_getter, column_options):
8
7
  return create_array_column(
9
- 'Array(Tuple({}))'.format(','.join(get_nested_columns(spec))),
8
+ 'Array(Tuple({}))'.format(get_inner_spec('Nested', spec)),
10
9
  column_by_spec_getter, column_options
11
10
  )
12
-
13
-
14
- def get_nested_columns(spec):
15
- inner_spec = get_inner_spec('Nested', spec)
16
- return get_inner_columns(inner_spec)
17
-
18
-
19
- def get_columns_with_types(spec):
20
- inner_spec = get_inner_spec('Nested', spec)
21
- return get_inner_columns_with_types(inner_spec)
@@ -0,0 +1,8 @@
1
+ import numpy as np
2
+
3
+ from .base import NumpyColumn
4
+
5
+
6
+ class NumpyBoolColumn(NumpyColumn):
7
+ dtype = np.dtype(np.bool_)
8
+ ch_type = 'Bool'
@@ -21,12 +21,12 @@ class NumpyDateTimeColumnBase(NumpyColumn):
21
21
  def apply_timezones_after_read(self, dt):
22
22
  timezone = self.timezone if self.timezone else self.local_timezone
23
23
 
24
- ts = pd.to_datetime(dt, utc=True).tz_convert(timezone)
25
-
26
- if self.offset_naive:
24
+ if self.offset_naive and timezone.zone != 'UTC':
25
+ ts = pd.to_datetime(dt, utc=True).tz_convert(timezone)
27
26
  ts = ts.tz_localize(None)
27
+ return ts.to_numpy(self.datetime_dtype)
28
28
 
29
- return ts.to_numpy(self.datetime_dtype)
29
+ return dt
30
30
 
31
31
  def apply_timezones_before_write(self, items):
32
32
  if isinstance(items, pd.DatetimeIndex):
@@ -65,25 +65,25 @@ class NumpyDateTimeColumn(NumpyDateTimeColumnBase):
65
65
 
66
66
 
67
67
  class NumpyDateTime64Column(NumpyDateTimeColumnBase):
68
- dtype = np.dtype(np.uint64)
68
+ dtype = np.dtype(np.int64)
69
69
  datetime_dtype = 'datetime64[ns]'
70
70
 
71
- max_scale = 6
71
+ max_scale = 9
72
72
 
73
73
  def __init__(self, scale=0, **kwargs):
74
74
  self.scale = scale
75
75
  super(NumpyDateTime64Column, self).__init__(**kwargs)
76
76
 
77
77
  def read_items(self, n_items, buf):
78
- scale = 10 ** self.scale
79
- frac_scale = 10 ** (self.max_scale - self.scale)
80
-
78
+ # Clickhouse: t seconds is represented as t * 10^scale.
79
+ # datetime64[ns]: t seconds is represented as t * 10^9.
80
+ # Since 0 <= scale <= 9, multiply by the integer 10^(9 - scale).
81
81
  items = super(NumpyDateTime64Column, self).read_items(n_items, buf)
82
82
 
83
- seconds = (items // scale).astype('datetime64[s]')
84
- microseconds = ((items % scale) * frac_scale).astype('timedelta64[us]')
83
+ tmp = np.copy(items)
84
+ tmp *= 10 ** (9 - self.scale)
85
+ dt = tmp.view(dtype='datetime64[ns]')
85
86
 
86
- dt = seconds + microseconds
87
87
  return self.apply_timezones_after_read(dt)
88
88
 
89
89
  def write_items(self, items, buf):
@@ -98,7 +98,7 @@ class NumpyDateTime64Column(NumpyDateTimeColumnBase):
98
98
  items = self.apply_timezones_before_write(items)
99
99
 
100
100
  seconds = items.astype('datetime64[s]')
101
- microseconds = (items - seconds).astype(dtype='timedelta64[us]') \
101
+ microseconds = (items - seconds).astype(dtype='timedelta64[ns]') \
102
102
  .astype(np.uint32) // frac_scale
103
103
 
104
104
  items = seconds.astype(self.dtype) * scale + microseconds
@@ -120,12 +120,12 @@ def create_numpy_datetime_column(spec, column_options):
120
120
 
121
121
  context = column_options['context']
122
122
 
123
- tz_name = timezone = None
123
+ tz_name = None
124
124
  offset_naive = True
125
125
 
126
126
  # As Numpy do not use local timezone for converting timestamp to
127
127
  # datetime we need always detect local timezone for manual converting.
128
- local_timezone = get_localzone_name_compat()
128
+ local_tz_name = get_localzone_name_compat()
129
129
 
130
130
  # Use column's timezone if it's specified.
131
131
  if spec and spec[-1] == ')':
@@ -133,11 +133,11 @@ def create_numpy_datetime_column(spec, column_options):
133
133
  offset_naive = False
134
134
  else:
135
135
  if not context.settings.get('use_client_time_zone', False):
136
- if local_timezone != context.server_info.timezone:
136
+ if local_tz_name != context.server_info.timezone:
137
137
  tz_name = context.server_info.timezone
138
138
 
139
- if tz_name:
140
- timezone = get_timezone(tz_name)
139
+ timezone = get_timezone(tz_name) if tz_name else None
140
+ local_timezone = get_timezone(local_tz_name) if local_tz_name else None
141
141
 
142
142
  return cls(timezone=timezone, offset_naive=offset_naive,
143
143
  local_timezone=local_timezone, **column_options)
@@ -37,7 +37,7 @@ class NumpyLowCardinalityColumn(LowCardinalityColumn):
37
37
  c = pd.Categorical(items)
38
38
 
39
39
  int_type = int(log(len(c.codes), 2) / 8)
40
- int_column = self.int_types[int_type]()
40
+ int_column = self.int_types[int_type](**self.init_kwargs)
41
41
 
42
42
  serialization_type = self.serialization_type | int_type
43
43
 
@@ -66,7 +66,7 @@ class NumpyLowCardinalityColumn(LowCardinalityColumn):
66
66
 
67
67
  # Lowest byte contains info about key type.
68
68
  key_type = serialization_type & 0xf
69
- keys_column = self.int_types[key_type]()
69
+ keys_column = self.int_types[key_type](**self.init_kwargs)
70
70
 
71
71
  nullable = self.nested_column.nullable
72
72
  # Prevent null map reading. Reset nested column nullable flag.
@@ -7,6 +7,7 @@ from .intcolumn import (
7
7
  NumpyInt8Column, NumpyInt16Column, NumpyInt32Column, NumpyInt64Column,
8
8
  NumpyUInt8Column, NumpyUInt16Column, NumpyUInt32Column, NumpyUInt64Column
9
9
  )
10
+ from .boolcolumn import NumpyBoolColumn
10
11
  from .lowcardinalitycolumn import create_numpy_low_cardinality_column
11
12
  from .stringcolumn import create_string_column
12
13
  from .tuplecolumn import create_tuple_column
@@ -16,7 +17,8 @@ column_by_type = {c.ch_type: c for c in [
16
17
  NumpyDateColumn,
17
18
  NumpyFloat32Column, NumpyFloat64Column,
18
19
  NumpyInt8Column, NumpyInt16Column, NumpyInt32Column, NumpyInt64Column,
19
- NumpyUInt8Column, NumpyUInt16Column, NumpyUInt32Column, NumpyUInt64Column
20
+ NumpyUInt8Column, NumpyUInt16Column, NumpyUInt32Column, NumpyUInt64Column,
21
+ NumpyBoolColumn
20
22
  ]}
21
23
 
22
24
 
@@ -15,6 +15,7 @@ from .intcolumn import (
15
15
  UInt8Column, UInt16Column, UInt32Column, UInt64Column
16
16
  )
17
17
  from .lowcardinalitycolumn import create_low_cardinality_column
18
+ from .jsoncolumn import create_json_column
18
19
  from .mapcolumn import create_map_column
19
20
  from .nothingcolumn import NothingColumn
20
21
  from .nullcolumn import NullColumn
@@ -122,6 +123,11 @@ def get_column_by_spec(spec, column_options, use_numpy=None):
122
123
  spec, create_column_with_options, column_options
123
124
  )
124
125
 
126
+ elif spec.startswith("Object('json')"):
127
+ return create_json_column(
128
+ spec, create_column_with_options, column_options
129
+ )
130
+
125
131
  else:
126
132
  for alias, primitive in aliases:
127
133
  if spec.startswith(alias):
@@ -137,8 +143,12 @@ def get_column_by_spec(spec, column_options, use_numpy=None):
137
143
  raise errors.UnknownTypeError('Unknown type {}'.format(spec))
138
144
 
139
145
 
140
- def read_column(context, column_spec, n_items, buf, use_numpy=None):
141
- column_options = {'context': context}
146
+ def read_column(context, column_spec, n_items, buf, use_numpy=None,
147
+ has_custom_serialization=False):
148
+ column_options = {
149
+ 'context': context,
150
+ 'has_custom_serialization': has_custom_serialization
151
+ }
142
152
  col = get_column_by_spec(column_spec, column_options, use_numpy=use_numpy)
143
153
  col.read_state_prefix(buf)
144
154
  return col.read_data(n_items, buf)
@@ -1,13 +1,21 @@
1
1
 
2
2
  from .base import Column
3
- from .util import get_inner_spec, get_inner_columns
3
+ from .util import get_inner_spec, get_inner_columns_with_types
4
4
 
5
5
 
6
6
  class TupleColumn(Column):
7
7
  py_types = (list, tuple)
8
8
 
9
- def __init__(self, nested_columns, **kwargs):
9
+ def __init__(self, names, nested_columns, **kwargs):
10
+ self.names = names
10
11
  self.nested_columns = nested_columns
12
+ client_settings = kwargs['context'].client_settings
13
+ settings = kwargs['context'].settings
14
+ self.namedtuple_as_json = (
15
+ settings.get('allow_experimental_object_type', False) and
16
+ client_settings.get('namedtuple_as_json', True)
17
+ )
18
+
11
19
  super(TupleColumn, self).__init__(**kwargs)
12
20
  self.null_value = tuple(x.null_value for x in nested_columns)
13
21
 
@@ -23,15 +31,33 @@ class TupleColumn(Column):
23
31
 
24
32
  def read_data(self, n_items, buf):
25
33
  rv = [x.read_data(n_items, buf) for x in self.nested_columns]
26
- return list(zip(*rv))
34
+ rv = list(zip(*rv))
35
+
36
+ if self.names[0] and self.namedtuple_as_json:
37
+ return [dict(zip(self.names, x)) for x in rv]
38
+ else:
39
+ return rv
27
40
 
28
41
  def read_items(self, n_items, buf):
29
42
  return self.read_data(n_items, buf)
30
43
 
44
+ def read_state_prefix(self, buf):
45
+ super(TupleColumn, self).read_state_prefix(buf)
46
+
47
+ for x in self.nested_columns:
48
+ x.read_state_prefix(buf)
49
+
50
+ def write_state_prefix(self, buf):
51
+ super(TupleColumn, self).write_state_prefix(buf)
52
+
53
+ for x in self.nested_columns:
54
+ x.write_state_prefix(buf)
55
+
31
56
 
32
57
  def create_tuple_column(spec, column_by_spec_getter, column_options):
33
58
  inner_spec = get_inner_spec('Tuple', spec)
34
- columns = get_inner_columns(inner_spec)
59
+ columns_with_types = get_inner_columns_with_types(inner_spec)
60
+ names, types = zip(*columns_with_types)
35
61
 
36
- return TupleColumn([column_by_spec_getter(x) for x in columns],
62
+ return TupleColumn(names, [column_by_spec_getter(x) for x in types],
37
63
  **column_options)
@@ -54,7 +54,7 @@ class UUIDColumn(FormatColumn):
54
54
 
55
55
  try:
56
56
  if not isinstance(item, UUID):
57
- item = UUID(item)
57
+ item = UUID(int=item) if item is null_value else UUID(item)
58
58
 
59
59
  except ValueError:
60
60
  raise errors.CannotParseUuidError(