kumoai 2.12.0.dev202511111731__cp311-cp311-macosx_11_0_arm64.whl → 2.13.0.dev202512091732__cp311-cp311-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. kumoai/__init__.py +18 -9
  2. kumoai/_version.py +1 -1
  3. kumoai/client/client.py +9 -13
  4. kumoai/connector/utils.py +23 -2
  5. kumoai/experimental/rfm/__init__.py +162 -46
  6. kumoai/experimental/rfm/backend/__init__.py +0 -0
  7. kumoai/experimental/rfm/backend/local/__init__.py +42 -0
  8. kumoai/experimental/rfm/{local_graph_store.py → backend/local/graph_store.py} +20 -30
  9. kumoai/experimental/rfm/backend/local/sampler.py +242 -0
  10. kumoai/experimental/rfm/backend/local/table.py +109 -0
  11. kumoai/experimental/rfm/backend/snow/__init__.py +35 -0
  12. kumoai/experimental/rfm/backend/snow/table.py +117 -0
  13. kumoai/experimental/rfm/backend/sqlite/__init__.py +30 -0
  14. kumoai/experimental/rfm/backend/sqlite/table.py +101 -0
  15. kumoai/experimental/rfm/base/__init__.py +14 -0
  16. kumoai/experimental/rfm/base/column.py +66 -0
  17. kumoai/experimental/rfm/base/sampler.py +374 -0
  18. kumoai/experimental/rfm/base/source.py +18 -0
  19. kumoai/experimental/rfm/{local_table.py → base/table.py} +139 -139
  20. kumoai/experimental/rfm/{local_graph.py → graph.py} +334 -79
  21. kumoai/experimental/rfm/infer/__init__.py +6 -0
  22. kumoai/experimental/rfm/infer/dtype.py +79 -0
  23. kumoai/experimental/rfm/infer/pkey.py +126 -0
  24. kumoai/experimental/rfm/infer/time_col.py +62 -0
  25. kumoai/experimental/rfm/local_graph_sampler.py +43 -4
  26. kumoai/experimental/rfm/local_pquery_driver.py +1 -1
  27. kumoai/experimental/rfm/pquery/pandas_executor.py +1 -1
  28. kumoai/experimental/rfm/rfm.py +28 -27
  29. kumoai/experimental/rfm/sagemaker.py +138 -0
  30. kumoai/spcs.py +1 -3
  31. kumoai/testing/decorators.py +1 -1
  32. {kumoai-2.12.0.dev202511111731.dist-info → kumoai-2.13.0.dev202512091732.dist-info}/METADATA +12 -2
  33. {kumoai-2.12.0.dev202511111731.dist-info → kumoai-2.13.0.dev202512091732.dist-info}/RECORD +36 -21
  34. kumoai/experimental/rfm/utils.py +0 -344
  35. {kumoai-2.12.0.dev202511111731.dist-info → kumoai-2.13.0.dev202512091732.dist-info}/WHEEL +0 -0
  36. {kumoai-2.12.0.dev202511111731.dist-info → kumoai-2.13.0.dev202512091732.dist-info}/licenses/LICENSE +0 -0
  37. {kumoai-2.12.0.dev202511111731.dist-info → kumoai-2.13.0.dev202512091732.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,242 @@
1
+ from typing import TYPE_CHECKING, Literal
2
+
3
+ import numpy as np
4
+ import pandas as pd
5
+ from kumoapi.pquery import ValidatedPredictiveQuery
6
+
7
+ from kumoai.experimental.rfm.backend.local import LocalGraphStore
8
+ from kumoai.experimental.rfm.base import Sampler, SamplerOutput, TargetOutput
9
+ from kumoai.experimental.rfm.pquery import PQueryPandasExecutor
10
+ from kumoai.utils import ProgressLogger
11
+
12
+ if TYPE_CHECKING:
13
+ from kumoai.experimental.rfm import Graph
14
+
15
+
16
+ class LocalSampler(Sampler):
17
+ def __init__(
18
+ self,
19
+ graph: 'Graph',
20
+ verbose: bool | ProgressLogger = True,
21
+ ) -> None:
22
+ super().__init__(graph=graph)
23
+
24
+ import kumoai.kumolib as kumolib
25
+
26
+ self._graph_store = LocalGraphStore(graph, verbose)
27
+ self._graph_sampler = kumolib.NeighborSampler(
28
+ list(self.table_stype_dict.keys()),
29
+ self.edge_types,
30
+ {
31
+ '__'.join(edge_type): colptr
32
+ for edge_type, colptr in self._graph_store.colptr_dict.items()
33
+ },
34
+ {
35
+ '__'.join(edge_type): row
36
+ for edge_type, row in self._graph_store.row_dict.items()
37
+ },
38
+ self._graph_store.time_dict,
39
+ )
40
+
41
+ def _sample_subgraph(
42
+ self,
43
+ entity_table_name: str,
44
+ entity_pkey: pd.Series,
45
+ anchor_time: pd.Series,
46
+ columns_dict: dict[str, set[str]],
47
+ num_neighbors: list[int],
48
+ ) -> SamplerOutput:
49
+
50
+ num_neighbors_dict: dict[str, list[int]] = {
51
+ '__'.join(edge_type): num_neighbors
52
+ for edge_type in self.edge_types
53
+ }
54
+
55
+ (
56
+ row_dict,
57
+ col_dict,
58
+ node_dict,
59
+ batch_dict,
60
+ num_sampled_nodes_dict,
61
+ num_sampled_edges_dict,
62
+ ) = self._graph_sampler.sample(
63
+ num_neighbors_dict,
64
+ {},
65
+ entity_table_name,
66
+ self._graph_store.get_node_id(entity_table_name, entity_pkey),
67
+ anchor_time.astype(int).to_numpy() // 1000**3, # to seconds
68
+ )
69
+
70
+ df_dict: dict[str, pd.DataFrame] = {}
71
+ inverse_dict: dict[str, np.ndarray] = {}
72
+ for table_name, node in node_dict.items():
73
+ df = self._graph_store.df_dict[table_name]
74
+ columns = columns_dict[table_name]
75
+ if self.end_time_column_dict.get(table_name, None) in columns:
76
+ df = df.iloc[node]
77
+ elif len(columns) == 0:
78
+ df = df.iloc[node]
79
+ else:
80
+ # Only store unique rows in `df` above a certain threshold:
81
+ unique_node, inverse = np.unique(node, return_inverse=True)
82
+ if len(node) > 1.05 * len(unique_node):
83
+ df = df.iloc[unique_node]
84
+ inverse_dict[table_name] = inverse
85
+ else:
86
+ df = df.iloc[node]
87
+ df = df.reset_index(drop=True)
88
+ df = df[list(columns)]
89
+ df_dict[table_name] = df
90
+
91
+ num_sampled_nodes_dict = {
92
+ table_name: num_sampled_nodes.tolist()
93
+ for table_name, num_sampled_nodes in
94
+ num_sampled_nodes_dict.items()
95
+ }
96
+
97
+ row_dict = {
98
+ edge_type: row_dict['__'.join(edge_type)]
99
+ for edge_type in self.edge_types
100
+ }
101
+ col_dict = {
102
+ edge_type: col_dict['__'.join(edge_type)]
103
+ for edge_type in self.edge_types
104
+ }
105
+ num_sampled_edges_dict = {
106
+ edge_type: num_sampled_edges_dict['__'.join(edge_type)].tolist()
107
+ for edge_type in self.edge_types
108
+ }
109
+
110
+ return SamplerOutput(
111
+ df_dict=df_dict,
112
+ inverse_dict=inverse_dict,
113
+ batch_dict=batch_dict,
114
+ num_sampled_nodes_dict=num_sampled_nodes_dict,
115
+ row_dict=row_dict,
116
+ col_dict=col_dict,
117
+ num_sampled_edges_dict=num_sampled_edges_dict,
118
+ )
119
+
120
+ def _sample_target(
121
+ self,
122
+ query: ValidatedPredictiveQuery,
123
+ num_examples: int,
124
+ anchor_time: pd.Timestamp | Literal['entity'],
125
+ columns_dict: dict[str, set[str]],
126
+ time_offset_dict: dict[
127
+ tuple[str, str, str],
128
+ tuple[pd.DateOffset | None, pd.DateOffset],
129
+ ],
130
+ random_seed: int | None = None,
131
+ ) -> TargetOutput:
132
+
133
+ candidate = pd.Series([0, 1]) # TODO
134
+ anchor_time = pd.Series(anchor_time).repeat(len(candidate))
135
+ anchor_time = anchor_time.reset_index(drop=True)
136
+ if anchor_time.dtype != 'datetime64[ns]':
137
+ anchor_time = anchor_time.astype('datetime64[ns]')
138
+
139
+ y, mask = self._execute_query(
140
+ query=query,
141
+ entity_pkey=candidate,
142
+ anchor_time=anchor_time,
143
+ columns_dict=columns_dict,
144
+ time_offset_dict=time_offset_dict,
145
+ )
146
+
147
+ return TargetOutput(
148
+ entity_pkey=candidate.iloc[mask],
149
+ anchor_time=anchor_time.iloc[mask],
150
+ target=y.iloc[mask],
151
+ num_trials=len(y),
152
+ )
153
+
154
+ def _execute_query(
155
+ self,
156
+ query: ValidatedPredictiveQuery,
157
+ entity_pkey: pd.Series,
158
+ anchor_time: pd.Series,
159
+ columns_dict: dict[str, set[str]],
160
+ time_offset_dict: dict[
161
+ tuple[str, str, str],
162
+ tuple[pd.DateOffset | None, pd.DateOffset],
163
+ ],
164
+ ) -> tuple[pd.Series, np.ndarray]:
165
+ num_hops = 1 if len(time_offset_dict) > 0 else 0
166
+ num_neighbors_dict: dict[str, list[int]] = {}
167
+ unix_time_offset_dict: dict[str, list[list[int | None]]] = {}
168
+ for edge_type, (start, end) in time_offset_dict.items():
169
+ unix_time_offset_dict['__'.join(edge_type)] = [[
170
+ date_offset_to_seconds(start) if start is not None else None,
171
+ date_offset_to_seconds(end),
172
+ ]]
173
+ for edge_type in set(self.edge_types) - set(time_offset_dict.keys()):
174
+ num_neighbors_dict['__'.join(edge_type)] = [0] * num_hops
175
+
176
+ _, _, node_dict, batch_dict, _, _ = self._graph_sampler.sample(
177
+ num_neighbors_dict,
178
+ unix_time_offset_dict,
179
+ query.entity_table,
180
+ self._graph_store.get_node_id(query.entity_table, entity_pkey),
181
+ anchor_time.astype(int).to_numpy() // 1000**3,
182
+ )
183
+
184
+ feat_dict: dict[str, pd.DataFrame] = {}
185
+ time_dict: dict[str, pd.Series] = {}
186
+ for table_name, columns in columns_dict.items():
187
+ df = self._graph_store.df_dict[table_name]
188
+ df = df.iloc[node_dict[table_name]].reset_index(drop=True)
189
+ df = df[list(columns)]
190
+ feat_dict[table_name] = df
191
+ if time_column := self.time_column_dict.get(table_name):
192
+ time_dict[table_name] = df[time_column]
193
+
194
+ return PQueryPandasExecutor().execute(
195
+ query=query,
196
+ feat_dict=feat_dict,
197
+ time_dict=time_dict,
198
+ batch_dict=batch_dict,
199
+ anchor_time=anchor_time,
200
+ num_forecasts=query.num_forecasts,
201
+ )
202
+
203
+
204
+ # Helper Methods ##############################################################
205
+
206
+
207
+ def date_offset_to_seconds(offset: pd.DateOffset) -> int:
208
+ r"""Convert a :class:`pandas.DateOffset` into a number of seconds.
209
+
210
+ .. note::
211
+ We are conservative and take months and years as their maximum value.
212
+ Additional values are then dropped in label computation where we know
213
+ the actual dates.
214
+ """
215
+ MAX_DAYS_IN_MONTH = 31
216
+ MAX_DAYS_IN_YEAR = 366
217
+
218
+ SECONDS_IN_MINUTE = 60
219
+ SECONDS_IN_HOUR = 60 * SECONDS_IN_MINUTE
220
+ SECONDS_IN_DAY = 24 * SECONDS_IN_HOUR
221
+
222
+ total_sec = 0
223
+ multiplier = getattr(offset, 'n', 1) # The multiplier (if present).
224
+
225
+ for attr, value in offset.__dict__.items():
226
+ if value is None or value == 0:
227
+ continue
228
+ scaled_value = value * multiplier
229
+ if attr == 'years':
230
+ total_sec += scaled_value * MAX_DAYS_IN_YEAR * SECONDS_IN_DAY
231
+ elif attr == 'months':
232
+ total_sec += scaled_value * MAX_DAYS_IN_MONTH * SECONDS_IN_DAY
233
+ elif attr == 'days':
234
+ total_sec += scaled_value * SECONDS_IN_DAY
235
+ elif attr == 'hours':
236
+ total_sec += scaled_value * SECONDS_IN_HOUR
237
+ elif attr == 'minutes':
238
+ total_sec += scaled_value * SECONDS_IN_MINUTE
239
+ elif attr == 'seconds':
240
+ total_sec += scaled_value
241
+
242
+ return total_sec
@@ -0,0 +1,109 @@
1
+ import warnings
2
+ from typing import List, Optional
3
+
4
+ import pandas as pd
5
+
6
+ from kumoai.experimental.rfm.base import SourceColumn, SourceForeignKey, Table
7
+ from kumoai.experimental.rfm.infer import infer_dtype
8
+
9
+
10
+ class LocalTable(Table):
11
+ r"""A table backed by a :class:`pandas.DataFrame`.
12
+
13
+ A :class:`LocalTable` fully specifies the relevant metadata, *i.e.*
14
+ selected columns, column semantic types, primary keys and time columns.
15
+ :class:`LocalTable` is used to create a :class:`Graph`.
16
+
17
+ .. code-block:: python
18
+
19
+ import pandas as pd
20
+ import kumoai.experimental.rfm as rfm
21
+
22
+ # Load data from a CSV file:
23
+ df = pd.read_csv("data.csv")
24
+
25
+ # Create a table from a `pandas.DataFrame` and infer its metadata ...
26
+ table = rfm.LocalTable(df, name="my_table").infer_metadata()
27
+
28
+ # ... or create a table explicitly:
29
+ table = rfm.LocalTable(
30
+ df=df,
31
+ name="my_table",
32
+ primary_key="id",
33
+ time_column="time",
34
+ end_time_column=None,
35
+ )
36
+
37
+ # Verify metadata:
38
+ table.print_metadata()
39
+
40
+ # Change the semantic type of a column:
41
+ table[column].stype = "text"
42
+
43
+ Args:
44
+ df: The data frame to create this table from.
45
+ name: The name of this table.
46
+ primary_key: The name of the primary key of this table, if it exists.
47
+ time_column: The name of the time column of this table, if it exists.
48
+ end_time_column: The name of the end time column of this table, if it
49
+ exists.
50
+ """
51
+ def __init__(
52
+ self,
53
+ df: pd.DataFrame,
54
+ name: str,
55
+ primary_key: Optional[str] = None,
56
+ time_column: Optional[str] = None,
57
+ end_time_column: Optional[str] = None,
58
+ ) -> None:
59
+
60
+ if df.empty:
61
+ raise ValueError("Data frame is empty")
62
+ if isinstance(df.columns, pd.MultiIndex):
63
+ raise ValueError("Data frame must not have a multi-index")
64
+ if not df.columns.is_unique:
65
+ raise ValueError("Data frame must have unique column names")
66
+ if any(col == '' for col in df.columns):
67
+ raise ValueError("Data frame must have non-empty column names")
68
+
69
+ self._data = df.copy(deep=False)
70
+
71
+ super().__init__(
72
+ name=name,
73
+ columns=list(df.columns),
74
+ primary_key=primary_key,
75
+ time_column=time_column,
76
+ end_time_column=end_time_column,
77
+ )
78
+
79
+ def _get_source_columns(self) -> List[SourceColumn]:
80
+ source_columns: List[SourceColumn] = []
81
+ for column in self._data.columns:
82
+ ser = self._data[column]
83
+ try:
84
+ dtype = infer_dtype(ser)
85
+ except Exception:
86
+ warnings.warn(f"Data type inference for column '{column}' in "
87
+ f"table '{self.name}' failed. Consider changing "
88
+ f"the data type of the column to use it within "
89
+ f"this table.")
90
+ continue
91
+
92
+ source_column = SourceColumn(
93
+ name=column,
94
+ dtype=dtype,
95
+ is_primary_key=False,
96
+ is_unique_key=False,
97
+ )
98
+ source_columns.append(source_column)
99
+
100
+ return source_columns
101
+
102
+ def _get_source_foreign_keys(self) -> List[SourceForeignKey]:
103
+ return []
104
+
105
+ def _get_sample_df(self) -> pd.DataFrame:
106
+ return self._data
107
+
108
+ def _get_num_rows(self) -> Optional[int]:
109
+ return len(self._data)
@@ -0,0 +1,35 @@
1
+ from typing import Any, TypeAlias
2
+
3
+ try:
4
+ import snowflake.connector
5
+ except ImportError:
6
+ raise ImportError("No module named 'snowflake'. Please install Kumo SDK "
7
+ "with the 'snowflake' extension via "
8
+ "`pip install kumoai[snowflake]`.")
9
+
10
+ Connection: TypeAlias = snowflake.connector.SnowflakeConnection
11
+
12
+
13
+ def connect(**kwargs: Any) -> Connection:
14
+ r"""Opens a connection to a :class:`snowflake` database.
15
+
16
+ If available, will return a connection to the active session.
17
+
18
+ kwargs: Connection arguments, following the :class:`snowflake` protocol.
19
+ """
20
+ try:
21
+ from snowflake.snowpark.context import get_active_session
22
+ return get_active_session().connection
23
+ except Exception:
24
+ pass
25
+
26
+ return snowflake.connector.connect(**kwargs)
27
+
28
+
29
+ from .table import SnowTable # noqa: E402
30
+
31
+ __all__ = [
32
+ 'connect',
33
+ 'Connection',
34
+ 'SnowTable',
35
+ ]
@@ -0,0 +1,117 @@
1
+ import re
2
+ from typing import List, Optional, Sequence
3
+
4
+ import pandas as pd
5
+ from kumoapi.typing import Dtype
6
+
7
+ from kumoai.experimental.rfm.backend.snow import Connection
8
+ from kumoai.experimental.rfm.base import SourceColumn, SourceForeignKey, Table
9
+
10
+
11
+ class SnowTable(Table):
12
+ r"""A table backed by a :class:`sqlite` database.
13
+
14
+ Args:
15
+ connection: The connection to a :class:`snowflake` database.
16
+ name: The name of this table.
17
+ database: The database.
18
+ schema: The schema.
19
+ columns: The selected columns of this table.
20
+ primary_key: The name of the primary key of this table, if it exists.
21
+ time_column: The name of the time column of this table, if it exists.
22
+ end_time_column: The name of the end time column of this table, if it
23
+ exists.
24
+ """
25
+ def __init__(
26
+ self,
27
+ connection: Connection,
28
+ name: str,
29
+ database: str | None = None,
30
+ schema: str | None = None,
31
+ columns: Optional[Sequence[str]] = None,
32
+ primary_key: Optional[str] = None,
33
+ time_column: Optional[str] = None,
34
+ end_time_column: Optional[str] = None,
35
+ ) -> None:
36
+
37
+ if database is not None and schema is None:
38
+ raise ValueError(f"Missing 'schema' for table '{name}' in "
39
+ f"database '{database}'")
40
+
41
+ self._connection = connection
42
+ self._database = database
43
+ self._schema = schema
44
+
45
+ super().__init__(
46
+ name=name,
47
+ columns=columns,
48
+ primary_key=primary_key,
49
+ time_column=time_column,
50
+ end_time_column=end_time_column,
51
+ )
52
+
53
+ @property
54
+ def fqn_name(self) -> str:
55
+ names: List[str] = []
56
+ if self._database is not None:
57
+ assert self._schema is not None
58
+ names.extend([self._database, self._schema])
59
+ elif self._schema is not None:
60
+ names.append(self._schema)
61
+ names.append(self._name)
62
+ return '.'.join(names)
63
+
64
+ def _get_source_columns(self) -> List[SourceColumn]:
65
+ source_columns: List[SourceColumn] = []
66
+ with self._connection.cursor() as cursor:
67
+ try:
68
+ cursor.execute(f"DESCRIBE TABLE {self.fqn_name}")
69
+ except Exception as e:
70
+ raise ValueError(
71
+ f"Table '{self.fqn_name}' does not exist") from e
72
+
73
+ for row in cursor.fetchall():
74
+ column, type, _, _, _, is_pkey, is_unique = row[:7]
75
+
76
+ type = type.strip().upper()
77
+ if type.startswith('NUMBER'):
78
+ dtype = Dtype.int
79
+ elif type.startswith('VARCHAR'):
80
+ dtype = Dtype.string
81
+ elif type == 'FLOAT':
82
+ dtype = Dtype.float
83
+ elif type == 'BOOLEAN':
84
+ dtype = Dtype.bool
85
+ elif re.search('DATE|TIMESTAMP', type):
86
+ dtype = Dtype.date
87
+ else:
88
+ continue
89
+
90
+ source_column = SourceColumn(
91
+ name=column,
92
+ dtype=dtype,
93
+ is_primary_key=is_pkey.strip().upper() == 'Y',
94
+ is_unique_key=is_unique.strip().upper() == 'Y',
95
+ )
96
+ source_columns.append(source_column)
97
+
98
+ return source_columns
99
+
100
+ def _get_source_foreign_keys(self) -> List[SourceForeignKey]:
101
+ source_fkeys: List[SourceForeignKey] = []
102
+ with self._connection.cursor() as cursor:
103
+ cursor.execute(f"SHOW IMPORTED KEYS IN TABLE {self.fqn_name}")
104
+ for row in cursor.fetchall():
105
+ _, _, _, dst_table, pkey, _, _, _, fkey = row[:9]
106
+ source_fkeys.append(SourceForeignKey(fkey, dst_table, pkey))
107
+ return source_fkeys
108
+
109
+ def _get_sample_df(self) -> pd.DataFrame:
110
+ with self._connection.cursor() as cursor:
111
+ columns = ', '.join(self._source_column_dict.keys())
112
+ cursor.execute(f"SELECT {columns} FROM {self.fqn_name} LIMIT 1000")
113
+ table = cursor.fetch_arrow_all()
114
+ return table.to_pandas(types_mapper=pd.ArrowDtype)
115
+
116
+ def _get_num_rows(self) -> Optional[int]:
117
+ return None
@@ -0,0 +1,30 @@
1
+ from pathlib import Path
2
+ from typing import Any, TypeAlias, Union
3
+
4
+ try:
5
+ import adbc_driver_sqlite.dbapi as adbc
6
+ except ImportError:
7
+ raise ImportError("No module named 'adbc_driver_sqlite'. Please install "
8
+ "Kumo SDK with the 'sqlite' extension via "
9
+ "`pip install kumoai[sqlite]`.")
10
+
11
+ Connection: TypeAlias = adbc.AdbcSqliteConnection
12
+
13
+
14
+ def connect(uri: Union[str, Path, None] = None, **kwargs: Any) -> Connection:
15
+ r"""Opens a connection to a :class:`sqlite` database.
16
+
17
+ uri: The path to the database file to be opened.
18
+ kwargs: Additional connection arguments, following the
19
+ :class:`adbc_driver_sqlite` protocol.
20
+ """
21
+ return adbc.connect(uri, **kwargs)
22
+
23
+
24
+ from .table import SQLiteTable # noqa: E402
25
+
26
+ __all__ = [
27
+ 'connect',
28
+ 'Connection',
29
+ 'SQLiteTable',
30
+ ]
@@ -0,0 +1,101 @@
1
+ import re
2
+ import warnings
3
+ from typing import List, Optional, Sequence
4
+
5
+ import pandas as pd
6
+ from kumoapi.typing import Dtype
7
+
8
+ from kumoai.experimental.rfm.backend.sqlite import Connection
9
+ from kumoai.experimental.rfm.base import SourceColumn, SourceForeignKey, Table
10
+ from kumoai.experimental.rfm.infer import infer_dtype
11
+
12
+
13
+ class SQLiteTable(Table):
14
+ r"""A table backed by a :class:`sqlite` database.
15
+
16
+ Args:
17
+ connection: The connection to a :class:`sqlite` database.
18
+ name: The name of this table.
19
+ columns: The selected columns of this table.
20
+ primary_key: The name of the primary key of this table, if it exists.
21
+ time_column: The name of the time column of this table, if it exists.
22
+ end_time_column: The name of the end time column of this table, if it
23
+ exists.
24
+ """
25
+ def __init__(
26
+ self,
27
+ connection: Connection,
28
+ name: str,
29
+ columns: Optional[Sequence[str]] = None,
30
+ primary_key: Optional[str] = None,
31
+ time_column: Optional[str] = None,
32
+ end_time_column: Optional[str] = None,
33
+ ) -> None:
34
+
35
+ self._connection = connection
36
+
37
+ super().__init__(
38
+ name=name,
39
+ columns=columns,
40
+ primary_key=primary_key,
41
+ time_column=time_column,
42
+ end_time_column=end_time_column,
43
+ )
44
+
45
+ def _get_source_columns(self) -> List[SourceColumn]:
46
+ source_columns: List[SourceColumn] = []
47
+ with self._connection.cursor() as cursor:
48
+ cursor.execute(f"PRAGMA table_info({self.name})")
49
+ rows = cursor.fetchall()
50
+
51
+ if len(rows) == 0:
52
+ raise ValueError(f"Table '{self.name}' does not exist")
53
+
54
+ for _, column, type, _, _, is_pkey in rows:
55
+ # Determine column affinity:
56
+ type = type.strip().upper()
57
+ if re.search('INT', type):
58
+ dtype = Dtype.int
59
+ elif re.search('TEXT|CHAR|CLOB', type):
60
+ dtype = Dtype.string
61
+ elif re.search('REAL|FLOA|DOUB', type):
62
+ dtype = Dtype.float
63
+ else: # NUMERIC affinity.
64
+ ser = self._sample_df[column]
65
+ try:
66
+ dtype = infer_dtype(ser)
67
+ except Exception:
68
+ warnings.warn(
69
+ f"Data type inference for column '{column}' in "
70
+ f"table '{self.name}' failed. Consider changing "
71
+ f"the data type of the column to use it within "
72
+ f"this table.")
73
+ continue
74
+
75
+ source_column = SourceColumn(
76
+ name=column,
77
+ dtype=dtype,
78
+ is_primary_key=bool(is_pkey),
79
+ is_unique_key=False,
80
+ )
81
+ source_columns.append(source_column)
82
+
83
+ return source_columns
84
+
85
+ def _get_source_foreign_keys(self) -> List[SourceForeignKey]:
86
+ source_fkeys: List[SourceForeignKey] = []
87
+ with self._connection.cursor() as cursor:
88
+ cursor.execute(f"PRAGMA foreign_key_list({self.name})")
89
+ for _, _, dst_table, fkey, pkey, _, _, _ in cursor.fetchall():
90
+ source_fkeys.append(SourceForeignKey(fkey, dst_table, pkey))
91
+ return source_fkeys
92
+
93
+ def _get_sample_df(self) -> pd.DataFrame:
94
+ with self._connection.cursor() as cursor:
95
+ cursor.execute(f"SELECT * FROM {self.name} "
96
+ f"ORDER BY rowid LIMIT 1000")
97
+ table = cursor.fetch_arrow_table()
98
+ return table.to_pandas(types_mapper=pd.ArrowDtype)
99
+
100
+ def _get_num_rows(self) -> Optional[int]:
101
+ return None
@@ -0,0 +1,14 @@
1
+ from .source import SourceColumn, SourceForeignKey
2
+ from .column import Column
3
+ from .table import Table
4
+ from .sampler import SamplerOutput, TargetOutput, Sampler
5
+
6
+ __all__ = [
7
+ 'SourceColumn',
8
+ 'SourceForeignKey',
9
+ 'Column',
10
+ 'Table',
11
+ 'SamplerOutput',
12
+ 'TargetOutput',
13
+ 'Sampler',
14
+ ]