kumoai 2.13.0.dev202512091732__cp311-cp311-macosx_11_0_arm64.whl → 2.14.0.dev202601051732__cp311-cp311-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kumoai/__init__.py +23 -26
- kumoai/_version.py +1 -1
- kumoai/client/client.py +6 -0
- kumoai/client/jobs.py +24 -0
- kumoai/client/pquery.py +6 -2
- kumoai/connector/utils.py +21 -7
- kumoai/experimental/rfm/__init__.py +51 -24
- kumoai/experimental/rfm/authenticate.py +3 -4
- kumoai/experimental/rfm/backend/local/graph_store.py +52 -104
- kumoai/experimental/rfm/backend/local/sampler.py +125 -55
- kumoai/experimental/rfm/backend/local/table.py +35 -31
- kumoai/experimental/rfm/backend/snow/__init__.py +2 -0
- kumoai/experimental/rfm/backend/snow/sampler.py +297 -0
- kumoai/experimental/rfm/backend/snow/table.py +174 -49
- kumoai/experimental/rfm/backend/sqlite/__init__.py +4 -2
- kumoai/experimental/rfm/backend/sqlite/sampler.py +398 -0
- kumoai/experimental/rfm/backend/sqlite/table.py +131 -48
- kumoai/experimental/rfm/base/__init__.py +21 -5
- kumoai/experimental/rfm/base/column.py +96 -10
- kumoai/experimental/rfm/base/expression.py +44 -0
- kumoai/experimental/rfm/base/sampler.py +422 -35
- kumoai/experimental/rfm/base/source.py +2 -1
- kumoai/experimental/rfm/base/sql_sampler.py +144 -0
- kumoai/experimental/rfm/base/table.py +386 -195
- kumoai/experimental/rfm/graph.py +350 -178
- kumoai/experimental/rfm/infer/__init__.py +6 -4
- kumoai/experimental/rfm/infer/dtype.py +7 -4
- kumoai/experimental/rfm/infer/multicategorical.py +1 -1
- kumoai/experimental/rfm/infer/pkey.py +4 -2
- kumoai/experimental/rfm/infer/stype.py +35 -0
- kumoai/experimental/rfm/infer/time_col.py +1 -2
- kumoai/experimental/rfm/pquery/executor.py +27 -27
- kumoai/experimental/rfm/pquery/pandas_executor.py +29 -31
- kumoai/experimental/rfm/relbench.py +76 -0
- kumoai/experimental/rfm/rfm.py +630 -408
- kumoai/experimental/rfm/sagemaker.py +4 -4
- kumoai/experimental/rfm/task_table.py +290 -0
- kumoai/pquery/predictive_query.py +10 -6
- kumoai/testing/snow.py +50 -0
- kumoai/trainer/distilled_trainer.py +175 -0
- kumoai/utils/__init__.py +3 -2
- kumoai/utils/display.py +51 -0
- kumoai/utils/progress_logger.py +190 -12
- kumoai/utils/sql.py +3 -0
- {kumoai-2.13.0.dev202512091732.dist-info → kumoai-2.14.0.dev202601051732.dist-info}/METADATA +3 -2
- {kumoai-2.13.0.dev202512091732.dist-info → kumoai-2.14.0.dev202601051732.dist-info}/RECORD +49 -40
- kumoai/experimental/rfm/local_graph_sampler.py +0 -223
- kumoai/experimental/rfm/local_pquery_driver.py +0 -689
- {kumoai-2.13.0.dev202512091732.dist-info → kumoai-2.14.0.dev202601051732.dist-info}/WHEEL +0 -0
- {kumoai-2.13.0.dev202512091732.dist-info → kumoai-2.14.0.dev202601051732.dist-info}/licenses/LICENSE +0 -0
- {kumoai-2.13.0.dev202512091732.dist-info → kumoai-2.14.0.dev202601051732.dist-info}/top_level.txt +0 -0
|
@@ -1,25 +1,32 @@
|
|
|
1
|
+
import warnings
|
|
1
2
|
from abc import ABC, abstractmethod
|
|
2
|
-
from collections import
|
|
3
|
+
from collections.abc import Sequence
|
|
3
4
|
from functools import cached_property
|
|
4
|
-
from typing import Dict, List, Optional, Sequence, Set
|
|
5
5
|
|
|
6
|
+
import numpy as np
|
|
6
7
|
import pandas as pd
|
|
8
|
+
from kumoapi.model_plan import MissingType
|
|
7
9
|
from kumoapi.source_table import UnavailableSourceTable
|
|
8
10
|
from kumoapi.table import Column as ColumnDefinition
|
|
9
11
|
from kumoapi.table import TableDefinition
|
|
10
|
-
from kumoapi.typing import Stype
|
|
12
|
+
from kumoapi.typing import Dtype, Stype
|
|
11
13
|
from typing_extensions import Self
|
|
12
14
|
|
|
13
|
-
from kumoai import
|
|
14
|
-
|
|
15
|
+
from kumoai.experimental.rfm.base import (
|
|
16
|
+
Column,
|
|
17
|
+
ColumnSpec,
|
|
18
|
+
ColumnSpecType,
|
|
19
|
+
DataBackend,
|
|
20
|
+
SourceColumn,
|
|
21
|
+
SourceForeignKey,
|
|
22
|
+
)
|
|
15
23
|
from kumoai.experimental.rfm.infer import (
|
|
16
|
-
|
|
17
|
-
contains_id,
|
|
18
|
-
contains_multicategorical,
|
|
19
|
-
contains_timestamp,
|
|
24
|
+
infer_dtype,
|
|
20
25
|
infer_primary_key,
|
|
26
|
+
infer_stype,
|
|
21
27
|
infer_time_column,
|
|
22
28
|
)
|
|
29
|
+
from kumoai.utils import display, quote_ident
|
|
23
30
|
|
|
24
31
|
|
|
25
32
|
class Table(ABC):
|
|
@@ -29,53 +36,48 @@ class Table(ABC):
|
|
|
29
36
|
|
|
30
37
|
Args:
|
|
31
38
|
name: The name of this table.
|
|
39
|
+
source_name: The source name of this table. If set to ``None``,
|
|
40
|
+
``name`` is being used.
|
|
32
41
|
columns: The selected columns of this table.
|
|
33
42
|
primary_key: The name of the primary key of this table, if it exists.
|
|
34
43
|
time_column: The name of the time column of this table, if it exists.
|
|
35
44
|
end_time_column: The name of the end time column of this table, if it
|
|
36
45
|
exists.
|
|
37
46
|
"""
|
|
47
|
+
_NUM_SAMPLE_ROWS = 1_000
|
|
48
|
+
|
|
38
49
|
def __init__(
|
|
39
50
|
self,
|
|
40
51
|
name: str,
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
52
|
+
source_name: str | None = None,
|
|
53
|
+
columns: Sequence[ColumnSpecType] | None = None,
|
|
54
|
+
primary_key: MissingType | str | None = MissingType.VALUE,
|
|
55
|
+
time_column: str | None = None,
|
|
56
|
+
end_time_column: str | None = None,
|
|
45
57
|
) -> None:
|
|
46
58
|
|
|
47
59
|
self._name = name
|
|
48
|
-
self.
|
|
49
|
-
self.
|
|
50
|
-
self.
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
if
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
if column.is_unique_key
|
|
70
|
-
]
|
|
71
|
-
if primary_key is None and len(unique_keys) == 1:
|
|
72
|
-
primary_key = unique_keys[0]
|
|
73
|
-
|
|
74
|
-
self._columns: Dict[str, Column] = {}
|
|
75
|
-
for column_name in columns or list(self._source_column_dict.keys()):
|
|
76
|
-
self.add_column(column_name)
|
|
77
|
-
|
|
78
|
-
if primary_key is not None:
|
|
60
|
+
self._source_name = source_name or name
|
|
61
|
+
self._column_dict: dict[str, Column] = {}
|
|
62
|
+
self._primary_key: str | None = None
|
|
63
|
+
self._time_column: str | None = None
|
|
64
|
+
self._end_time_column: str | None = None
|
|
65
|
+
self._expr_sample_df = pd.DataFrame(index=range(self._NUM_SAMPLE_ROWS))
|
|
66
|
+
|
|
67
|
+
if columns is None:
|
|
68
|
+
columns = list(self._source_column_dict.keys())
|
|
69
|
+
|
|
70
|
+
self.add_columns(columns)
|
|
71
|
+
|
|
72
|
+
if isinstance(primary_key, MissingType):
|
|
73
|
+
# Infer primary key from source metadata, but only set it in case
|
|
74
|
+
# it is already part of the column set (don't magically add it):
|
|
75
|
+
if any(column.is_source for column in self.columns):
|
|
76
|
+
primary_key = self._source_primary_key
|
|
77
|
+
if (primary_key is not None and primary_key in self
|
|
78
|
+
and self[primary_key].is_source):
|
|
79
|
+
self.primary_key = primary_key
|
|
80
|
+
elif primary_key is not None:
|
|
79
81
|
if primary_key not in self:
|
|
80
82
|
self.add_column(primary_key)
|
|
81
83
|
self.primary_key = primary_key
|
|
@@ -95,13 +97,22 @@ class Table(ABC):
|
|
|
95
97
|
r"""The name of this table."""
|
|
96
98
|
return self._name
|
|
97
99
|
|
|
98
|
-
|
|
100
|
+
@property
|
|
101
|
+
def source_name(self) -> str:
|
|
102
|
+
r"""The source name of this table."""
|
|
103
|
+
return self._source_name
|
|
104
|
+
|
|
105
|
+
@property
|
|
106
|
+
def _quoted_source_name(self) -> str:
|
|
107
|
+
return quote_ident(self._source_name)
|
|
108
|
+
|
|
109
|
+
# Column ##################################################################
|
|
99
110
|
|
|
100
111
|
def has_column(self, name: str) -> bool:
|
|
101
112
|
r"""Returns ``True`` if this table holds a column with name ``name``;
|
|
102
113
|
``False`` otherwise.
|
|
103
114
|
"""
|
|
104
|
-
return name in self.
|
|
115
|
+
return name in self._column_dict
|
|
105
116
|
|
|
106
117
|
def column(self, name: str) -> Column:
|
|
107
118
|
r"""Returns the data column named with name ``name`` in this table.
|
|
@@ -114,65 +125,113 @@ class Table(ABC):
|
|
|
114
125
|
"""
|
|
115
126
|
if not self.has_column(name):
|
|
116
127
|
raise KeyError(f"Column '{name}' not found in table '{self.name}'")
|
|
117
|
-
return self.
|
|
128
|
+
return self._column_dict[name]
|
|
118
129
|
|
|
119
130
|
@property
|
|
120
|
-
def columns(self) ->
|
|
131
|
+
def columns(self) -> list[Column]:
|
|
121
132
|
r"""Returns a list of :class:`Column` objects that represent the
|
|
122
133
|
columns in this table.
|
|
123
134
|
"""
|
|
124
|
-
return list(self.
|
|
135
|
+
return list(self._column_dict.values())
|
|
125
136
|
|
|
126
|
-
def
|
|
127
|
-
r"""Adds a
|
|
137
|
+
def add_columns(self, columns: Sequence[ColumnSpecType]) -> None:
|
|
138
|
+
r"""Adds a set of columns to this table.
|
|
128
139
|
|
|
129
140
|
Args:
|
|
130
|
-
|
|
141
|
+
columns: The columns to add.
|
|
131
142
|
|
|
132
143
|
Raises:
|
|
133
|
-
KeyError: If
|
|
144
|
+
KeyError: If any of the column names already exist in this table.
|
|
134
145
|
"""
|
|
135
|
-
if
|
|
136
|
-
|
|
137
|
-
f"'{self.name}'")
|
|
138
|
-
|
|
139
|
-
if name not in self._source_column_dict:
|
|
140
|
-
raise KeyError(f"Column '{name}' does not exist in the underlying "
|
|
141
|
-
f"source table")
|
|
142
|
-
|
|
143
|
-
try:
|
|
144
|
-
dtype = self._source_column_dict[name].dtype
|
|
145
|
-
except Exception as e:
|
|
146
|
-
raise RuntimeError(f"Could not obtain data type for column "
|
|
147
|
-
f"'{name}' in table '{self.name}'. Change "
|
|
148
|
-
f"the data type of the column in the source "
|
|
149
|
-
f"table or remove it from the table.") from e
|
|
150
|
-
|
|
151
|
-
try:
|
|
152
|
-
ser = self._sample_df[name]
|
|
153
|
-
if contains_id(ser, name, dtype):
|
|
154
|
-
stype = Stype.ID
|
|
155
|
-
elif contains_timestamp(ser, name, dtype):
|
|
156
|
-
stype = Stype.timestamp
|
|
157
|
-
elif contains_multicategorical(ser, name, dtype):
|
|
158
|
-
stype = Stype.multicategorical
|
|
159
|
-
elif contains_categorical(ser, name, dtype):
|
|
160
|
-
stype = Stype.categorical
|
|
161
|
-
else:
|
|
162
|
-
stype = dtype.default_stype
|
|
163
|
-
except Exception as e:
|
|
164
|
-
raise RuntimeError(f"Could not obtain semantic type for column "
|
|
165
|
-
f"'{name}' in table '{self.name}'. Change "
|
|
166
|
-
f"the data type of the column in the source "
|
|
167
|
-
f"table or remove it from the table.") from e
|
|
168
|
-
|
|
169
|
-
self._columns[name] = Column(
|
|
170
|
-
name=name,
|
|
171
|
-
dtype=dtype,
|
|
172
|
-
stype=stype,
|
|
173
|
-
)
|
|
146
|
+
if len(columns) == 0:
|
|
147
|
+
return
|
|
174
148
|
|
|
175
|
-
|
|
149
|
+
column_specs = [ColumnSpec.coerce(column) for column in columns]
|
|
150
|
+
|
|
151
|
+
# Obtain a batch-wise sample for all column expressions:
|
|
152
|
+
expr_specs = [spec for spec in column_specs if not spec.is_source]
|
|
153
|
+
if len(expr_specs) > 0:
|
|
154
|
+
dfs = [
|
|
155
|
+
self._expr_sample_df,
|
|
156
|
+
self._get_expr_sample_df(expr_specs).reset_index(drop=True),
|
|
157
|
+
]
|
|
158
|
+
size = min(map(len, dfs))
|
|
159
|
+
df = pd.concat([dfs[0].iloc[:size], dfs[1].iloc[:size]], axis=1)
|
|
160
|
+
df = df.loc[:, ~df.columns.duplicated(keep='last')]
|
|
161
|
+
self._expr_sample_df = df
|
|
162
|
+
|
|
163
|
+
for column_spec in column_specs:
|
|
164
|
+
if column_spec.name in self:
|
|
165
|
+
raise KeyError(f"Column '{column_spec.name}' already exists "
|
|
166
|
+
f"in table '{self.name}'")
|
|
167
|
+
|
|
168
|
+
dtype = column_spec.dtype
|
|
169
|
+
stype = column_spec.stype
|
|
170
|
+
|
|
171
|
+
if column_spec.is_source:
|
|
172
|
+
if column_spec.name not in self._source_column_dict:
|
|
173
|
+
raise ValueError(
|
|
174
|
+
f"Column '{column_spec.name}' does not exist in the "
|
|
175
|
+
f"underlying source table")
|
|
176
|
+
|
|
177
|
+
if dtype is None:
|
|
178
|
+
dtype = self._source_column_dict[column_spec.name].dtype
|
|
179
|
+
|
|
180
|
+
if dtype == Dtype.unsupported:
|
|
181
|
+
raise ValueError(
|
|
182
|
+
f"Encountered unsupported data type for column "
|
|
183
|
+
f"'{column_spec.name}' in table '{self.name}'. Please "
|
|
184
|
+
f"either change the column's data type or remove the "
|
|
185
|
+
f"column from this table.")
|
|
186
|
+
|
|
187
|
+
if dtype is None:
|
|
188
|
+
if column_spec.is_source:
|
|
189
|
+
ser = self._source_sample_df[column_spec.name]
|
|
190
|
+
else:
|
|
191
|
+
ser = self._expr_sample_df[column_spec.name]
|
|
192
|
+
try:
|
|
193
|
+
dtype = infer_dtype(ser)
|
|
194
|
+
except Exception as e:
|
|
195
|
+
raise RuntimeError(
|
|
196
|
+
f"Encountered unsupported data type '{ser.dtype}' for "
|
|
197
|
+
f"column '{column_spec.name}' in table '{self.name}'. "
|
|
198
|
+
f"Please either manually override the columns's data "
|
|
199
|
+
f"type or remove the column from this table.") from e
|
|
200
|
+
|
|
201
|
+
if stype is None:
|
|
202
|
+
if column_spec.is_source:
|
|
203
|
+
ser = self._source_sample_df[column_spec.name]
|
|
204
|
+
else:
|
|
205
|
+
ser = self._expr_sample_df[column_spec.name]
|
|
206
|
+
try:
|
|
207
|
+
stype = infer_stype(ser, column_spec.name, dtype)
|
|
208
|
+
except Exception as e:
|
|
209
|
+
raise RuntimeError(
|
|
210
|
+
f"Could not determine semantic type for column "
|
|
211
|
+
f"'{column_spec.name}' with data type '{dtype}' in "
|
|
212
|
+
f"table '{self.name}'. Please either change the "
|
|
213
|
+
f"column's data type or remove the column from this "
|
|
214
|
+
f"table.") from e
|
|
215
|
+
|
|
216
|
+
self._column_dict[column_spec.name] = Column(
|
|
217
|
+
name=column_spec.name,
|
|
218
|
+
expr=column_spec.expr,
|
|
219
|
+
dtype=dtype,
|
|
220
|
+
stype=stype,
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
def add_column(self, column: ColumnSpecType) -> Column:
|
|
224
|
+
r"""Adds a column to this table.
|
|
225
|
+
|
|
226
|
+
Args:
|
|
227
|
+
column: The column to add.
|
|
228
|
+
|
|
229
|
+
Raises:
|
|
230
|
+
KeyError: If the column name already exists in this table.
|
|
231
|
+
"""
|
|
232
|
+
column_spec = ColumnSpec.coerce(column)
|
|
233
|
+
self.add_columns([column_spec])
|
|
234
|
+
return self[column_spec.name]
|
|
176
235
|
|
|
177
236
|
def remove_column(self, name: str) -> Self:
|
|
178
237
|
r"""Removes a column from this table.
|
|
@@ -192,7 +251,7 @@ class Table(ABC):
|
|
|
192
251
|
self.time_column = None
|
|
193
252
|
if self._end_time_column == name:
|
|
194
253
|
self.end_time_column = None
|
|
195
|
-
del self.
|
|
254
|
+
del self._column_dict[name]
|
|
196
255
|
|
|
197
256
|
return self
|
|
198
257
|
|
|
@@ -205,22 +264,22 @@ class Table(ABC):
|
|
|
205
264
|
return self._primary_key is not None
|
|
206
265
|
|
|
207
266
|
@property
|
|
208
|
-
def primary_key(self) ->
|
|
267
|
+
def primary_key(self) -> Column | None:
|
|
209
268
|
r"""The primary key column of this table.
|
|
210
269
|
|
|
211
270
|
The getter returns the primary key column of this table, or ``None`` if
|
|
212
271
|
no such primary key is present.
|
|
213
272
|
|
|
214
273
|
The setter sets a column as a primary key on this table, and raises a
|
|
215
|
-
:class:`ValueError` if the primary key has a non-ID
|
|
216
|
-
if the column name does not match a column in the data frame.
|
|
274
|
+
:class:`ValueError` if the primary key has a non-ID compatible data
|
|
275
|
+
type or if the column name does not match a column in the data frame.
|
|
217
276
|
"""
|
|
218
277
|
if self._primary_key is None:
|
|
219
278
|
return None
|
|
220
279
|
return self[self._primary_key]
|
|
221
280
|
|
|
222
281
|
@primary_key.setter
|
|
223
|
-
def primary_key(self, name:
|
|
282
|
+
def primary_key(self, name: str | None) -> None:
|
|
224
283
|
if name is not None and name == self._time_column:
|
|
225
284
|
raise ValueError(f"Cannot specify column '{name}' as a primary "
|
|
226
285
|
f"key since it is already defined to be a time "
|
|
@@ -250,22 +309,23 @@ class Table(ABC):
|
|
|
250
309
|
return self._time_column is not None
|
|
251
310
|
|
|
252
311
|
@property
|
|
253
|
-
def time_column(self) ->
|
|
312
|
+
def time_column(self) -> Column | None:
|
|
254
313
|
r"""The time column of this table.
|
|
255
314
|
|
|
256
315
|
The getter returns the time column of this table, or ``None`` if no
|
|
257
316
|
such time column is present.
|
|
258
317
|
|
|
259
318
|
The setter sets a column as a time column on this table, and raises a
|
|
260
|
-
:class:`ValueError` if the time column has a non-timestamp
|
|
261
|
-
type or if the column name does not match a column in the data
|
|
319
|
+
:class:`ValueError` if the time column has a non-timestamp compatible
|
|
320
|
+
data type or if the column name does not match a column in the data
|
|
321
|
+
frame.
|
|
262
322
|
"""
|
|
263
323
|
if self._time_column is None:
|
|
264
324
|
return None
|
|
265
325
|
return self[self._time_column]
|
|
266
326
|
|
|
267
327
|
@time_column.setter
|
|
268
|
-
def time_column(self, name:
|
|
328
|
+
def time_column(self, name: str | None) -> None:
|
|
269
329
|
if name is not None and name == self._primary_key:
|
|
270
330
|
raise ValueError(f"Cannot specify column '{name}' as a time "
|
|
271
331
|
f"column since it is already defined to be a "
|
|
@@ -295,7 +355,7 @@ class Table(ABC):
|
|
|
295
355
|
return self._end_time_column is not None
|
|
296
356
|
|
|
297
357
|
@property
|
|
298
|
-
def end_time_column(self) ->
|
|
358
|
+
def end_time_column(self) -> Column | None:
|
|
299
359
|
r"""The end time column of this table.
|
|
300
360
|
|
|
301
361
|
The getter returns the end time column of this table, or ``None`` if no
|
|
@@ -303,15 +363,15 @@ class Table(ABC):
|
|
|
303
363
|
|
|
304
364
|
The setter sets a column as an end time column on this table, and
|
|
305
365
|
raises a :class:`ValueError` if the end time column has a non-timestamp
|
|
306
|
-
|
|
307
|
-
frame.
|
|
366
|
+
compatible data type or if the column name does not match a column in
|
|
367
|
+
the data frame.
|
|
308
368
|
"""
|
|
309
369
|
if self._end_time_column is None:
|
|
310
370
|
return None
|
|
311
371
|
return self[self._end_time_column]
|
|
312
372
|
|
|
313
373
|
@end_time_column.setter
|
|
314
|
-
def end_time_column(self, name:
|
|
374
|
+
def end_time_column(self, name: str | None) -> None:
|
|
315
375
|
if name is not None and name == self._primary_key:
|
|
316
376
|
raise ValueError(f"Cannot specify column '{name}' as an end time "
|
|
317
377
|
f"column since it is already defined to be a "
|
|
@@ -380,33 +440,98 @@ class Table(ABC):
|
|
|
380
440
|
|
|
381
441
|
def print_metadata(self) -> None:
|
|
382
442
|
r"""Prints the :meth:`~metadata` of this table."""
|
|
383
|
-
|
|
384
|
-
if self._num_rows
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
443
|
+
msg = f"🏷️ Metadata of Table `{self.name}`"
|
|
444
|
+
if num := self._num_rows:
|
|
445
|
+
msg += " (1 row)" if num == 1 else f" ({num:,} rows)"
|
|
446
|
+
|
|
447
|
+
display.title(msg)
|
|
448
|
+
display.dataframe(self.metadata)
|
|
449
|
+
|
|
450
|
+
def infer_primary_key(self, verbose: bool = True) -> Self:
|
|
451
|
+
r"""Infers the primary key in this table.
|
|
452
|
+
|
|
453
|
+
Args:
|
|
454
|
+
verbose: Whether to print verbose output.
|
|
455
|
+
"""
|
|
456
|
+
if self.has_primary_key():
|
|
457
|
+
return self
|
|
458
|
+
|
|
459
|
+
def _set_primary_key(primary_key: str) -> None:
|
|
460
|
+
self.primary_key = primary_key
|
|
461
|
+
if verbose:
|
|
462
|
+
display.message(f"Inferred primary key `{primary_key}` for "
|
|
463
|
+
f"table `{self.name}`")
|
|
464
|
+
|
|
465
|
+
# Inference from source column metadata:
|
|
466
|
+
if any(column.is_source for column in self.columns):
|
|
467
|
+
primary_key = self._source_primary_key
|
|
468
|
+
if (primary_key is not None and primary_key in self
|
|
469
|
+
and self[primary_key].is_source):
|
|
470
|
+
_set_primary_key(primary_key)
|
|
471
|
+
return self
|
|
472
|
+
|
|
473
|
+
unique_keys = [
|
|
474
|
+
column.name for column in self._source_column_dict.values()
|
|
475
|
+
if column.is_unique_key
|
|
476
|
+
]
|
|
477
|
+
if (len(unique_keys) == 1 # NOTE No composite keys yet.
|
|
478
|
+
and unique_keys[0] in self
|
|
479
|
+
and self[unique_keys[0]].is_source):
|
|
480
|
+
_set_primary_key(unique_keys[0])
|
|
481
|
+
return self
|
|
482
|
+
|
|
483
|
+
# Heuristic-based inference:
|
|
484
|
+
candidates = [
|
|
485
|
+
column.name for column in self.columns if column.stype == Stype.ID
|
|
486
|
+
]
|
|
487
|
+
if len(candidates) == 0:
|
|
488
|
+
for column in self.columns:
|
|
489
|
+
if self.name.lower() == column.name.lower():
|
|
490
|
+
candidates.append(column.name)
|
|
491
|
+
elif (self.name.lower().endswith('s')
|
|
492
|
+
and self.name.lower()[:-1] == column.name.lower()):
|
|
493
|
+
candidates.append(column.name)
|
|
494
|
+
|
|
495
|
+
if primary_key := infer_primary_key(
|
|
496
|
+
table_name=self.name,
|
|
497
|
+
df=self._get_sample_df(),
|
|
498
|
+
candidates=candidates,
|
|
499
|
+
):
|
|
500
|
+
_set_primary_key(primary_key)
|
|
501
|
+
return self
|
|
502
|
+
|
|
503
|
+
return self
|
|
504
|
+
|
|
505
|
+
def infer_time_column(self, verbose: bool = True) -> Self:
|
|
506
|
+
r"""Infers the time column in this table.
|
|
507
|
+
|
|
508
|
+
Args:
|
|
509
|
+
verbose: Whether to print verbose output.
|
|
510
|
+
"""
|
|
511
|
+
if self.has_time_column():
|
|
512
|
+
return self
|
|
513
|
+
|
|
514
|
+
# Heuristic-based inference:
|
|
515
|
+
candidates = [
|
|
516
|
+
column.name for column in self.columns
|
|
517
|
+
if column.stype == Stype.timestamp
|
|
518
|
+
and column.name != self._end_time_column
|
|
519
|
+
]
|
|
520
|
+
|
|
521
|
+
if time_column := infer_time_column(
|
|
522
|
+
df=self._get_sample_df(),
|
|
523
|
+
candidates=candidates,
|
|
524
|
+
):
|
|
525
|
+
self.time_column = time_column
|
|
526
|
+
|
|
527
|
+
if verbose:
|
|
528
|
+
display.message(f"Inferred time column `{time_column}` for "
|
|
529
|
+
f"table `{self.name}`")
|
|
530
|
+
|
|
531
|
+
return self
|
|
407
532
|
|
|
408
533
|
def infer_metadata(self, verbose: bool = True) -> Self:
|
|
409
|
-
r"""Infers metadata, *i.e.*, primary keys and time columns, in
|
|
534
|
+
r"""Infers metadata, *i.e.*, primary keys and time columns, in this
|
|
410
535
|
table.
|
|
411
536
|
|
|
412
537
|
Args:
|
|
@@ -414,48 +539,19 @@ class Table(ABC):
|
|
|
414
539
|
"""
|
|
415
540
|
logs = []
|
|
416
541
|
|
|
417
|
-
# Try to detect primary key if not set:
|
|
418
542
|
if not self.has_primary_key():
|
|
543
|
+
self.infer_primary_key(verbose=False)
|
|
544
|
+
if self.has_primary_key():
|
|
545
|
+
logs.append(f"primary key `{self._primary_key}`")
|
|
419
546
|
|
|
420
|
-
def is_candidate(column: Column) -> bool:
|
|
421
|
-
if column.stype == Stype.ID:
|
|
422
|
-
return True
|
|
423
|
-
if all(column.stype != Stype.ID for column in self.columns):
|
|
424
|
-
if self.name == column.name:
|
|
425
|
-
return True
|
|
426
|
-
if (self.name.endswith('s')
|
|
427
|
-
and self.name[:-1] == column.name):
|
|
428
|
-
return True
|
|
429
|
-
return False
|
|
430
|
-
|
|
431
|
-
candidates = [
|
|
432
|
-
column.name for column in self.columns if is_candidate(column)
|
|
433
|
-
]
|
|
434
|
-
|
|
435
|
-
if primary_key := infer_primary_key(
|
|
436
|
-
table_name=self.name,
|
|
437
|
-
df=self._sample_df,
|
|
438
|
-
candidates=candidates,
|
|
439
|
-
):
|
|
440
|
-
self.primary_key = primary_key
|
|
441
|
-
logs.append(f"primary key '{primary_key}'")
|
|
442
|
-
|
|
443
|
-
# Try to detect time column if not set:
|
|
444
547
|
if not self.has_time_column():
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
and column.name != self._end_time_column
|
|
449
|
-
]
|
|
450
|
-
if time_column := infer_time_column(
|
|
451
|
-
df=self._sample_df,
|
|
452
|
-
candidates=candidates,
|
|
453
|
-
):
|
|
454
|
-
self.time_column = time_column
|
|
455
|
-
logs.append(f"time column '{time_column}'")
|
|
548
|
+
self.infer_time_column(verbose=False)
|
|
549
|
+
if self.has_time_column():
|
|
550
|
+
logs.append(f"time column `{self._time_column}`")
|
|
456
551
|
|
|
457
552
|
if verbose and len(logs) > 0:
|
|
458
|
-
|
|
553
|
+
display.message(f"Inferred {' and '.join(logs)} for table "
|
|
554
|
+
f"`{self.name}`")
|
|
459
555
|
|
|
460
556
|
return self
|
|
461
557
|
|
|
@@ -473,6 +569,114 @@ class Table(ABC):
|
|
|
473
569
|
end_time_col=self._end_time_column,
|
|
474
570
|
)
|
|
475
571
|
|
|
572
|
+
@cached_property
|
|
573
|
+
def _source_column_dict(self) -> dict[str, SourceColumn]:
|
|
574
|
+
source_columns = self._get_source_columns()
|
|
575
|
+
if len(source_columns) == 0:
|
|
576
|
+
raise ValueError(f"Table '{self.name}' has no columns")
|
|
577
|
+
return {column.name: column for column in source_columns}
|
|
578
|
+
|
|
579
|
+
@cached_property
|
|
580
|
+
def _source_primary_key(self) -> str | None:
|
|
581
|
+
primary_keys = [
|
|
582
|
+
column.name for column in self._source_column_dict.values()
|
|
583
|
+
if column.is_primary_key
|
|
584
|
+
]
|
|
585
|
+
# NOTE No composite keys yet.
|
|
586
|
+
return primary_keys[0] if len(primary_keys) == 1 else None
|
|
587
|
+
|
|
588
|
+
@cached_property
|
|
589
|
+
def _source_foreign_key_dict(self) -> dict[str, SourceForeignKey]:
|
|
590
|
+
return {key.name: key for key in self._get_source_foreign_keys()}
|
|
591
|
+
|
|
592
|
+
@cached_property
|
|
593
|
+
def _source_sample_df(self) -> pd.DataFrame:
|
|
594
|
+
return self._get_source_sample_df().reset_index(drop=True)
|
|
595
|
+
|
|
596
|
+
@cached_property
|
|
597
|
+
def _num_rows(self) -> int | None:
|
|
598
|
+
return self._get_num_rows()
|
|
599
|
+
|
|
600
|
+
def _get_sample_df(self) -> pd.DataFrame:
|
|
601
|
+
dfs: list[pd.DataFrame] = []
|
|
602
|
+
if any(column.is_source for column in self.columns):
|
|
603
|
+
dfs.append(self._source_sample_df)
|
|
604
|
+
if any(not column.is_source for column in self.columns):
|
|
605
|
+
dfs.append(self._expr_sample_df)
|
|
606
|
+
|
|
607
|
+
if len(dfs) == 0:
|
|
608
|
+
return pd.DataFrame(index=range(1000))
|
|
609
|
+
if len(dfs) == 1:
|
|
610
|
+
return dfs[0]
|
|
611
|
+
|
|
612
|
+
size = min(map(len, dfs))
|
|
613
|
+
df = pd.concat([dfs[0].iloc[:size], dfs[1].iloc[:size]], axis=1)
|
|
614
|
+
df = df.loc[:, ~df.columns.duplicated(keep='last')]
|
|
615
|
+
return df
|
|
616
|
+
|
|
617
|
+
@staticmethod
|
|
618
|
+
def _sanitize(
|
|
619
|
+
df: pd.DataFrame,
|
|
620
|
+
dtype_dict: dict[str, Dtype | None] | None = None,
|
|
621
|
+
stype_dict: dict[str, Stype | None] | None = None,
|
|
622
|
+
) -> pd.DataFrame:
|
|
623
|
+
r"""Sanitzes a :class:`pandas.DataFrame` in-place such that its data
|
|
624
|
+
types match table data and semantic type specification.
|
|
625
|
+
"""
|
|
626
|
+
def _to_datetime(ser: pd.Series) -> pd.Series:
|
|
627
|
+
if not pd.api.types.is_datetime64_any_dtype(ser):
|
|
628
|
+
with warnings.catch_warnings():
|
|
629
|
+
warnings.filterwarnings(
|
|
630
|
+
'ignore',
|
|
631
|
+
message='Could not infer format',
|
|
632
|
+
)
|
|
633
|
+
ser = pd.to_datetime(ser, errors='coerce')
|
|
634
|
+
if isinstance(ser.dtype, pd.DatetimeTZDtype):
|
|
635
|
+
ser = ser.dt.tz_localize(None)
|
|
636
|
+
if ser.dtype != 'datetime64[ns]':
|
|
637
|
+
ser = ser.astype('datetime64[ns]')
|
|
638
|
+
return ser
|
|
639
|
+
|
|
640
|
+
def _to_list(ser: pd.Series, dtype: Dtype | None) -> pd.Series:
|
|
641
|
+
if (pd.api.types.is_string_dtype(ser)
|
|
642
|
+
and dtype in {Dtype.intlist, Dtype.floatlist}):
|
|
643
|
+
try:
|
|
644
|
+
ser = ser.map(lambda row: np.fromstring(
|
|
645
|
+
row.strip('[]'),
|
|
646
|
+
sep=',',
|
|
647
|
+
dtype=int if dtype == Dtype.intlist else np.float32,
|
|
648
|
+
) if row is not None else None)
|
|
649
|
+
except Exception:
|
|
650
|
+
pass
|
|
651
|
+
|
|
652
|
+
if pd.api.types.is_string_dtype(ser):
|
|
653
|
+
try:
|
|
654
|
+
import orjson as json
|
|
655
|
+
except ImportError:
|
|
656
|
+
import json
|
|
657
|
+
try:
|
|
658
|
+
ser = ser.map(lambda row: json.loads(row)
|
|
659
|
+
if row is not None else None)
|
|
660
|
+
except Exception:
|
|
661
|
+
pass
|
|
662
|
+
|
|
663
|
+
return ser
|
|
664
|
+
|
|
665
|
+
for column_name in df.columns:
|
|
666
|
+
dtype = (dtype_dict or {}).get(column_name)
|
|
667
|
+
stype = (stype_dict or {}).get(column_name)
|
|
668
|
+
|
|
669
|
+
if dtype == Dtype.time:
|
|
670
|
+
df[column_name] = _to_datetime(df[column_name])
|
|
671
|
+
elif stype == Stype.timestamp:
|
|
672
|
+
df[column_name] = _to_datetime(df[column_name])
|
|
673
|
+
elif dtype is not None and dtype.is_list():
|
|
674
|
+
df[column_name] = _to_list(df[column_name], dtype)
|
|
675
|
+
elif stype == Stype.sequence:
|
|
676
|
+
df[column_name] = _to_list(df[column_name], Dtype.floatlist)
|
|
677
|
+
|
|
678
|
+
return df
|
|
679
|
+
|
|
476
680
|
# Python builtins #########################################################
|
|
477
681
|
|
|
478
682
|
def __hash__(self) -> int:
|
|
@@ -503,43 +707,30 @@ class Table(ABC):
|
|
|
503
707
|
|
|
504
708
|
# Abstract Methods ########################################################
|
|
505
709
|
|
|
506
|
-
@
|
|
507
|
-
|
|
508
|
-
|
|
710
|
+
@property
|
|
711
|
+
@abstractmethod
|
|
712
|
+
def backend(self) -> DataBackend:
|
|
713
|
+
r"""The data backend of this table."""
|
|
509
714
|
|
|
510
715
|
@abstractmethod
|
|
511
|
-
def _get_source_columns(self) ->
|
|
716
|
+
def _get_source_columns(self) -> list[SourceColumn]:
|
|
512
717
|
pass
|
|
513
718
|
|
|
514
|
-
@cached_property
|
|
515
|
-
def _source_foreign_key_dict(self) -> Dict[str, SourceForeignKey]:
|
|
516
|
-
fkeys = self._get_source_foreign_keys()
|
|
517
|
-
# NOTE Drop all keys that link to different primary keys in the same
|
|
518
|
-
# table since we don't support composite keys yet:
|
|
519
|
-
table_pkeys: Dict[str, Set[str]] = defaultdict(set)
|
|
520
|
-
for fkey in fkeys:
|
|
521
|
-
table_pkeys[fkey.dst_table].add(fkey.primary_key)
|
|
522
|
-
return {
|
|
523
|
-
fkey.name: fkey
|
|
524
|
-
for fkey in fkeys if len(table_pkeys[fkey.dst_table]) == 1
|
|
525
|
-
}
|
|
526
|
-
|
|
527
719
|
@abstractmethod
|
|
528
|
-
def _get_source_foreign_keys(self) ->
|
|
720
|
+
def _get_source_foreign_keys(self) -> list[SourceForeignKey]:
|
|
529
721
|
pass
|
|
530
722
|
|
|
531
|
-
@cached_property
|
|
532
|
-
def _sample_df(self) -> pd.DataFrame:
|
|
533
|
-
return self._get_sample_df()
|
|
534
|
-
|
|
535
723
|
@abstractmethod
|
|
536
|
-
def
|
|
724
|
+
def _get_source_sample_df(self) -> pd.DataFrame:
|
|
537
725
|
pass
|
|
538
726
|
|
|
539
|
-
@
|
|
540
|
-
def
|
|
541
|
-
|
|
727
|
+
@abstractmethod
|
|
728
|
+
def _get_expr_sample_df(
|
|
729
|
+
self,
|
|
730
|
+
columns: Sequence[ColumnSpec],
|
|
731
|
+
) -> pd.DataFrame:
|
|
732
|
+
pass
|
|
542
733
|
|
|
543
734
|
@abstractmethod
|
|
544
|
-
def _get_num_rows(self) ->
|
|
735
|
+
def _get_num_rows(self) -> int | None:
|
|
545
736
|
pass
|