kumoai 2.12.1__py3-none-any.whl → 2.14.0.dev202512141732__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. kumoai/__init__.py +18 -9
  2. kumoai/_version.py +1 -1
  3. kumoai/client/client.py +9 -13
  4. kumoai/client/pquery.py +6 -2
  5. kumoai/connector/utils.py +23 -2
  6. kumoai/experimental/rfm/__init__.py +162 -46
  7. kumoai/experimental/rfm/backend/__init__.py +0 -0
  8. kumoai/experimental/rfm/backend/local/__init__.py +42 -0
  9. kumoai/experimental/rfm/{local_graph_store.py → backend/local/graph_store.py} +37 -90
  10. kumoai/experimental/rfm/backend/local/sampler.py +313 -0
  11. kumoai/experimental/rfm/backend/local/table.py +119 -0
  12. kumoai/experimental/rfm/backend/snow/__init__.py +37 -0
  13. kumoai/experimental/rfm/backend/snow/sampler.py +119 -0
  14. kumoai/experimental/rfm/backend/snow/table.py +135 -0
  15. kumoai/experimental/rfm/backend/sqlite/__init__.py +32 -0
  16. kumoai/experimental/rfm/backend/sqlite/sampler.py +112 -0
  17. kumoai/experimental/rfm/backend/sqlite/table.py +115 -0
  18. kumoai/experimental/rfm/base/__init__.py +23 -0
  19. kumoai/experimental/rfm/base/column.py +66 -0
  20. kumoai/experimental/rfm/base/sampler.py +773 -0
  21. kumoai/experimental/rfm/base/source.py +19 -0
  22. kumoai/experimental/rfm/{local_table.py → base/table.py} +152 -141
  23. kumoai/experimental/rfm/{local_graph.py → graph.py} +352 -80
  24. kumoai/experimental/rfm/infer/__init__.py +6 -0
  25. kumoai/experimental/rfm/infer/dtype.py +79 -0
  26. kumoai/experimental/rfm/infer/pkey.py +126 -0
  27. kumoai/experimental/rfm/infer/time_col.py +62 -0
  28. kumoai/experimental/rfm/pquery/pandas_executor.py +1 -1
  29. kumoai/experimental/rfm/rfm.py +233 -174
  30. kumoai/experimental/rfm/sagemaker.py +138 -0
  31. kumoai/spcs.py +1 -3
  32. kumoai/testing/decorators.py +1 -1
  33. kumoai/testing/snow.py +50 -0
  34. kumoai/utils/__init__.py +2 -0
  35. kumoai/utils/sql.py +3 -0
  36. {kumoai-2.12.1.dist-info → kumoai-2.14.0.dev202512141732.dist-info}/METADATA +12 -2
  37. {kumoai-2.12.1.dist-info → kumoai-2.14.0.dev202512141732.dist-info}/RECORD +40 -23
  38. kumoai/experimental/rfm/local_graph_sampler.py +0 -184
  39. kumoai/experimental/rfm/local_pquery_driver.py +0 -689
  40. kumoai/experimental/rfm/utils.py +0 -344
  41. {kumoai-2.12.1.dist-info → kumoai-2.14.0.dev202512141732.dist-info}/WHEEL +0 -0
  42. {kumoai-2.12.1.dist-info → kumoai-2.14.0.dev202512141732.dist-info}/licenses/LICENSE +0 -0
  43. {kumoai-2.12.1.dist-info → kumoai-2.14.0.dev202512141732.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,313 @@
1
+ from typing import TYPE_CHECKING, Literal
2
+
3
+ import numpy as np
4
+ import pandas as pd
5
+ from kumoapi.pquery import ValidatedPredictiveQuery
6
+
7
+ from kumoai.experimental.rfm.backend.local import LocalGraphStore
8
+ from kumoai.experimental.rfm.base import Sampler, SamplerOutput
9
+ from kumoai.experimental.rfm.pquery import PQueryPandasExecutor
10
+ from kumoai.utils import ProgressLogger
11
+
12
+ if TYPE_CHECKING:
13
+ from kumoai.experimental.rfm import Graph
14
+
15
+
16
+ class LocalSampler(Sampler):
17
+ def __init__(
18
+ self,
19
+ graph: 'Graph',
20
+ verbose: bool | ProgressLogger = True,
21
+ ) -> None:
22
+ super().__init__(graph=graph)
23
+
24
+ import kumoai.kumolib as kumolib
25
+
26
+ self._graph_store = LocalGraphStore(graph, verbose)
27
+ self._graph_sampler = kumolib.NeighborSampler(
28
+ list(self.table_stype_dict.keys()),
29
+ self.edge_types,
30
+ {
31
+ '__'.join(edge_type): colptr
32
+ for edge_type, colptr in self._graph_store.colptr_dict.items()
33
+ },
34
+ {
35
+ '__'.join(edge_type): row
36
+ for edge_type, row in self._graph_store.row_dict.items()
37
+ },
38
+ self._graph_store.time_dict,
39
+ )
40
+
41
+ def _get_min_max_time_dict(
42
+ self,
43
+ table_names: list[str],
44
+ ) -> dict[str, tuple[pd.Timestamp, pd.Timestamp]]:
45
+ return {
46
+ key: value
47
+ for key, value in self._graph_store.min_max_time_dict.items()
48
+ if key in table_names
49
+ }
50
+
51
+ def _sample_subgraph(
52
+ self,
53
+ entity_table_name: str,
54
+ entity_pkey: pd.Series,
55
+ anchor_time: pd.Series | Literal['entity'],
56
+ columns_dict: dict[str, set[str]],
57
+ num_neighbors: list[int],
58
+ ) -> SamplerOutput:
59
+
60
+ index = self._graph_store.get_node_id(entity_table_name, entity_pkey)
61
+
62
+ if isinstance(anchor_time, pd.Series):
63
+ time = anchor_time.astype(int).to_numpy() // 1000**3 # to seconds
64
+ else:
65
+ assert anchor_time == 'entity'
66
+ time = self._graph_store.time_dict[entity_table_name][index]
67
+
68
+ (
69
+ row_dict,
70
+ col_dict,
71
+ node_dict,
72
+ batch_dict,
73
+ num_sampled_nodes_dict,
74
+ num_sampled_edges_dict,
75
+ ) = self._graph_sampler.sample(
76
+ {
77
+ '__'.join(edge_type): num_neighbors
78
+ for edge_type in self.edge_types
79
+ },
80
+ {},
81
+ entity_table_name,
82
+ index,
83
+ time,
84
+ )
85
+
86
+ df_dict: dict[str, pd.DataFrame] = {}
87
+ inverse_dict: dict[str, np.ndarray] = {}
88
+ for table_name, node in node_dict.items():
89
+ df = self._graph_store.df_dict[table_name]
90
+ columns = columns_dict[table_name]
91
+ if self.end_time_column_dict.get(table_name, None) in columns:
92
+ df = df.iloc[node]
93
+ elif len(columns) == 0:
94
+ df = df.iloc[node]
95
+ else:
96
+ # Only store unique rows in `df` above a certain threshold:
97
+ unique_node, inverse = np.unique(node, return_inverse=True)
98
+ if len(node) > 1.05 * len(unique_node):
99
+ df = df.iloc[unique_node]
100
+ inverse_dict[table_name] = inverse
101
+ else:
102
+ df = df.iloc[node]
103
+ df = df.reset_index(drop=True)
104
+ df = df[list(columns)]
105
+ df_dict[table_name] = df
106
+
107
+ num_sampled_nodes_dict = {
108
+ table_name: num_sampled_nodes.tolist()
109
+ for table_name, num_sampled_nodes in
110
+ num_sampled_nodes_dict.items()
111
+ }
112
+
113
+ row_dict = {
114
+ edge_type: row_dict['__'.join(edge_type)]
115
+ for edge_type in self.edge_types
116
+ }
117
+ col_dict = {
118
+ edge_type: col_dict['__'.join(edge_type)]
119
+ for edge_type in self.edge_types
120
+ }
121
+ num_sampled_edges_dict = {
122
+ edge_type: num_sampled_edges_dict['__'.join(edge_type)].tolist()
123
+ for edge_type in self.edge_types
124
+ }
125
+
126
+ return SamplerOutput(
127
+ anchor_time=time * 1000**3, # to nanoseconds
128
+ df_dict=df_dict,
129
+ inverse_dict=inverse_dict,
130
+ batch_dict=batch_dict,
131
+ num_sampled_nodes_dict=num_sampled_nodes_dict,
132
+ row_dict=row_dict,
133
+ col_dict=col_dict,
134
+ num_sampled_edges_dict=num_sampled_edges_dict,
135
+ )
136
+
137
+ def _sample_entity_table(
138
+ self,
139
+ table_name: str,
140
+ columns: set[str],
141
+ num_rows: int,
142
+ random_seed: int | None = None,
143
+ ) -> pd.DataFrame:
144
+ pkey_map = self._graph_store.pkey_map_dict[table_name]
145
+ if len(pkey_map) > num_rows:
146
+ pkey_map = pkey_map.sample(
147
+ n=num_rows,
148
+ random_state=random_seed,
149
+ ignore_index=True,
150
+ )
151
+ df = self._graph_store.df_dict[table_name]
152
+ df = df.iloc[pkey_map['arange']][list(columns)]
153
+ return df
154
+
155
+ def _sample_target(
156
+ self,
157
+ query: ValidatedPredictiveQuery,
158
+ entity_df: pd.DataFrame,
159
+ train_index: np.ndarray,
160
+ train_time: pd.Series,
161
+ num_train_examples: int,
162
+ test_index: np.ndarray,
163
+ test_time: pd.Series,
164
+ num_test_examples: int,
165
+ columns_dict: dict[str, set[str]],
166
+ time_offset_dict: dict[
167
+ tuple[str, str, str],
168
+ tuple[pd.DateOffset | None, pd.DateOffset],
169
+ ],
170
+ ) -> tuple[pd.Series, np.ndarray, pd.Series, np.ndarray]:
171
+
172
+ train_y, train_mask = self._sample_target_set(
173
+ query=query,
174
+ pkey=entity_df[self.primary_key_dict[query.entity_table]],
175
+ index=train_index,
176
+ anchor_time=train_time,
177
+ num_examples=num_train_examples,
178
+ columns_dict=columns_dict,
179
+ time_offset_dict=time_offset_dict,
180
+ )
181
+
182
+ test_y, test_mask = self._sample_target_set(
183
+ query=query,
184
+ pkey=entity_df[self.primary_key_dict[query.entity_table]],
185
+ index=test_index,
186
+ anchor_time=test_time,
187
+ num_examples=num_test_examples,
188
+ columns_dict=columns_dict,
189
+ time_offset_dict=time_offset_dict,
190
+ )
191
+
192
+ return train_y, train_mask, test_y, test_mask
193
+
194
+ def _sample_target_set(
195
+ self,
196
+ query: ValidatedPredictiveQuery,
197
+ pkey: pd.Series,
198
+ index: np.ndarray,
199
+ anchor_time: pd.Series,
200
+ num_examples: int,
201
+ columns_dict: dict[str, set[str]],
202
+ time_offset_dict: dict[
203
+ tuple[str, str, str],
204
+ tuple[pd.DateOffset | None, pd.DateOffset],
205
+ ],
206
+ batch_size: int = 10_000,
207
+ ) -> tuple[pd.Series, np.ndarray]:
208
+
209
+ num_hops = 1 if len(time_offset_dict) > 0 else 0
210
+ num_neighbors_dict: dict[str, list[int]] = {}
211
+ unix_time_offset_dict: dict[str, list[list[int | None]]] = {}
212
+ for edge_type, (start, end) in time_offset_dict.items():
213
+ unix_time_offset_dict['__'.join(edge_type)] = [[
214
+ date_offset_to_seconds(start) if start is not None else None,
215
+ date_offset_to_seconds(end),
216
+ ]]
217
+ for edge_type in set(self.edge_types) - set(time_offset_dict.keys()):
218
+ num_neighbors_dict['__'.join(edge_type)] = [0] * num_hops
219
+
220
+ if anchor_time.dtype != 'datetime64[ns]':
221
+ anchor_time = anchor_time.astype('datetime64')
222
+
223
+ count = 0
224
+ ys: list[pd.Series] = []
225
+ mask = np.full(len(index), False, dtype=bool)
226
+ for start in range(0, len(index), batch_size):
227
+ subset = pkey.iloc[index[start:start + batch_size]]
228
+ time = anchor_time.iloc[start:start + batch_size]
229
+
230
+ _, _, node_dict, batch_dict, _, _ = self._graph_sampler.sample(
231
+ num_neighbors_dict,
232
+ unix_time_offset_dict,
233
+ query.entity_table,
234
+ self._graph_store.get_node_id(query.entity_table, subset),
235
+ time.astype(int).to_numpy() // 1000**3, # to seconds
236
+ )
237
+
238
+ feat_dict: dict[str, pd.DataFrame] = {}
239
+ time_dict: dict[str, pd.Series] = {}
240
+ for table_name, columns in columns_dict.items():
241
+ df = self._graph_store.df_dict[table_name]
242
+ df = df.iloc[node_dict[table_name]].reset_index(drop=True)
243
+ df = df[list(columns)]
244
+ feat_dict[table_name] = df
245
+
246
+ time_column = self.time_column_dict.get(table_name)
247
+ if time_column in columns:
248
+ time_dict[table_name] = df[time_column]
249
+
250
+ y, _mask = PQueryPandasExecutor().execute(
251
+ query=query,
252
+ feat_dict=feat_dict,
253
+ time_dict=time_dict,
254
+ batch_dict=batch_dict,
255
+ anchor_time=time,
256
+ num_forecasts=query.num_forecasts,
257
+ )
258
+ ys.append(y)
259
+ mask[start:start + batch_size] = _mask
260
+
261
+ count += len(y)
262
+ if count >= num_examples:
263
+ break
264
+
265
+ if len(ys) == 0:
266
+ y = pd.Series([], dtype=float)
267
+ elif len(ys) == 1:
268
+ y = ys[0]
269
+ else:
270
+ y = pd.concat(ys, axis=0, ignore_index=True)
271
+
272
+ return y, mask
273
+
274
+
275
+ # Helper Methods ##############################################################
276
+
277
+
278
+ def date_offset_to_seconds(offset: pd.DateOffset) -> int:
279
+ r"""Convert a :class:`pandas.DateOffset` into a number of seconds.
280
+
281
+ .. note::
282
+ We are conservative and take months and years as their maximum value.
283
+ Additional values are then dropped in label computation where we know
284
+ the actual dates.
285
+ """
286
+ MAX_DAYS_IN_MONTH = 31
287
+ MAX_DAYS_IN_YEAR = 366
288
+
289
+ SECONDS_IN_MINUTE = 60
290
+ SECONDS_IN_HOUR = 60 * SECONDS_IN_MINUTE
291
+ SECONDS_IN_DAY = 24 * SECONDS_IN_HOUR
292
+
293
+ total_sec = 0
294
+ multiplier = getattr(offset, 'n', 1) # The multiplier (if present).
295
+
296
+ for attr, value in offset.__dict__.items():
297
+ if value is None or value == 0:
298
+ continue
299
+ scaled_value = value * multiplier
300
+ if attr == 'years':
301
+ total_sec += scaled_value * MAX_DAYS_IN_YEAR * SECONDS_IN_DAY
302
+ elif attr == 'months':
303
+ total_sec += scaled_value * MAX_DAYS_IN_MONTH * SECONDS_IN_DAY
304
+ elif attr == 'days':
305
+ total_sec += scaled_value * SECONDS_IN_DAY
306
+ elif attr == 'hours':
307
+ total_sec += scaled_value * SECONDS_IN_HOUR
308
+ elif attr == 'minutes':
309
+ total_sec += scaled_value * SECONDS_IN_MINUTE
310
+ elif attr == 'seconds':
311
+ total_sec += scaled_value
312
+
313
+ return total_sec
@@ -0,0 +1,119 @@
1
+ import warnings
2
+ from typing import List, Optional, cast
3
+
4
+ import pandas as pd
5
+
6
+ from kumoai.experimental.rfm.base import (
7
+ DataBackend,
8
+ SourceColumn,
9
+ SourceForeignKey,
10
+ Table,
11
+ )
12
+ from kumoai.experimental.rfm.infer import infer_dtype
13
+
14
+
15
+ class LocalTable(Table):
16
+ r"""A table backed by a :class:`pandas.DataFrame`.
17
+
18
+ A :class:`LocalTable` fully specifies the relevant metadata, *i.e.*
19
+ selected columns, column semantic types, primary keys and time columns.
20
+ :class:`LocalTable` is used to create a :class:`Graph`.
21
+
22
+ .. code-block:: python
23
+
24
+ import pandas as pd
25
+ import kumoai.experimental.rfm as rfm
26
+
27
+ # Load data from a CSV file:
28
+ df = pd.read_csv("data.csv")
29
+
30
+ # Create a table from a `pandas.DataFrame` and infer its metadata ...
31
+ table = rfm.LocalTable(df, name="my_table").infer_metadata()
32
+
33
+ # ... or create a table explicitly:
34
+ table = rfm.LocalTable(
35
+ df=df,
36
+ name="my_table",
37
+ primary_key="id",
38
+ time_column="time",
39
+ end_time_column=None,
40
+ )
41
+
42
+ # Verify metadata:
43
+ table.print_metadata()
44
+
45
+ # Change the semantic type of a column:
46
+ table[column].stype = "text"
47
+
48
+ Args:
49
+ df: The data frame to create this table from.
50
+ name: The name of this table.
51
+ primary_key: The name of the primary key of this table, if it exists.
52
+ time_column: The name of the time column of this table, if it exists.
53
+ end_time_column: The name of the end time column of this table, if it
54
+ exists.
55
+ """
56
+ def __init__(
57
+ self,
58
+ df: pd.DataFrame,
59
+ name: str,
60
+ primary_key: Optional[str] = None,
61
+ time_column: Optional[str] = None,
62
+ end_time_column: Optional[str] = None,
63
+ ) -> None:
64
+
65
+ if df.empty:
66
+ raise ValueError("Data frame is empty")
67
+ if isinstance(df.columns, pd.MultiIndex):
68
+ raise ValueError("Data frame must not have a multi-index")
69
+ if not df.columns.is_unique:
70
+ raise ValueError("Data frame must have unique column names")
71
+ if any(col == '' for col in df.columns):
72
+ raise ValueError("Data frame must have non-empty column names")
73
+
74
+ self._data = df.copy(deep=False)
75
+
76
+ super().__init__(
77
+ name=name,
78
+ columns=list(df.columns),
79
+ primary_key=primary_key,
80
+ time_column=time_column,
81
+ end_time_column=end_time_column,
82
+ )
83
+
84
+ @property
85
+ def backend(self) -> DataBackend:
86
+ return cast(DataBackend, DataBackend.LOCAL)
87
+
88
+ def _get_source_columns(self) -> List[SourceColumn]:
89
+ source_columns: List[SourceColumn] = []
90
+ for column in self._data.columns:
91
+ ser = self._data[column]
92
+ try:
93
+ dtype = infer_dtype(ser)
94
+ except Exception:
95
+ warnings.warn(f"Data type inference for column '{column}' in "
96
+ f"table '{self.name}' failed. Consider changing "
97
+ f"the data type of the column to use it within "
98
+ f"this table.")
99
+ continue
100
+
101
+ source_column = SourceColumn(
102
+ name=column,
103
+ dtype=dtype,
104
+ is_primary_key=False,
105
+ is_unique_key=False,
106
+ is_nullable=True,
107
+ )
108
+ source_columns.append(source_column)
109
+
110
+ return source_columns
111
+
112
+ def _get_source_foreign_keys(self) -> List[SourceForeignKey]:
113
+ return []
114
+
115
+ def _get_sample_df(self) -> pd.DataFrame:
116
+ return self._data
117
+
118
+ def _get_num_rows(self) -> Optional[int]:
119
+ return len(self._data)
@@ -0,0 +1,37 @@
1
+ from typing import Any, TypeAlias
2
+
3
+ try:
4
+ import snowflake.connector
5
+ except ImportError:
6
+ raise ImportError("No module named 'snowflake'. Please install Kumo SDK "
7
+ "with the 'snowflake' extension via "
8
+ "`pip install kumoai[snowflake]`.")
9
+
10
+ Connection: TypeAlias = snowflake.connector.SnowflakeConnection
11
+
12
+
13
+ def connect(**kwargs: Any) -> Connection:
14
+ r"""Opens a connection to a :class:`snowflake` database.
15
+
16
+ If available, will return a connection to the active session.
17
+
18
+ kwargs: Connection arguments, following the :class:`snowflake` protocol.
19
+ """
20
+ try:
21
+ from snowflake.snowpark.context import get_active_session
22
+ return get_active_session().connection
23
+ except Exception:
24
+ pass
25
+
26
+ return snowflake.connector.connect(**kwargs)
27
+
28
+
29
+ from .table import SnowTable # noqa: E402
30
+ from .sampler import SnowSampler # noqa: E402
31
+
32
+ __all__ = [
33
+ 'connect',
34
+ 'Connection',
35
+ 'SnowTable',
36
+ 'SnowSampler',
37
+ ]
@@ -0,0 +1,119 @@
1
+ from typing import TYPE_CHECKING, Literal
2
+
3
+ import numpy as np
4
+ import pandas as pd
5
+ from kumoapi.pquery import ValidatedPredictiveQuery
6
+
7
+ from kumoai.experimental.rfm.backend.snow import SnowTable
8
+ from kumoai.experimental.rfm.base import Sampler, SamplerOutput
9
+ from kumoai.utils import ProgressLogger, quote_ident
10
+
11
+ if TYPE_CHECKING:
12
+ from kumoai.experimental.rfm import Graph
13
+
14
+
15
+ class SnowSampler(Sampler):
16
+ def __init__(
17
+ self,
18
+ graph: 'Graph',
19
+ verbose: bool | ProgressLogger = True,
20
+ ) -> None:
21
+ super().__init__(graph=graph)
22
+
23
+ self._fqn_dict: dict[str, str] = {}
24
+ for table in graph.tables.values():
25
+ assert isinstance(table, SnowTable)
26
+ self._connection = table._connection
27
+ self._fqn_dict[table.name] = table.fqn
28
+
29
+ @property
30
+ def fqn_dict(self) -> dict[str, str]:
31
+ r"""The fully-qualified quoted names for all tables in the graph."""
32
+ return self._fqn_dict
33
+
34
+ def _get_min_max_time_dict(
35
+ self,
36
+ table_names: list[str],
37
+ ) -> dict[str, tuple[pd.Timestamp, pd.Timestamp]]:
38
+ selects: list[str] = []
39
+ for table_name in table_names:
40
+ time_column = self.time_column_dict[table_name]
41
+ select = (f"SELECT\n"
42
+ f" %s as table_name,\n"
43
+ f" MIN({quote_ident(time_column)}) as min_date,\n"
44
+ f" MAX({quote_ident(time_column)}) as max_date\n"
45
+ f"FROM {self.fqn_dict[table_name]}")
46
+ selects.append(select)
47
+ sql = "\nUNION ALL\n".join(selects)
48
+
49
+ out_dict: dict[str, tuple[pd.Timestamp, pd.Timestamp]] = {}
50
+ with self._connection.cursor() as cursor:
51
+ cursor.execute(sql, table_names)
52
+ rows = cursor.fetchall()
53
+ for table_name, _min, _max in rows:
54
+ out_dict[table_name] = (
55
+ pd.Timestamp.max if _min is None else pd.Timestamp(_min),
56
+ pd.Timestamp.min if _max is None else pd.Timestamp(_max),
57
+ )
58
+
59
+ return out_dict
60
+
61
+ def _sample_subgraph(
62
+ self,
63
+ entity_table_name: str,
64
+ entity_pkey: pd.Series,
65
+ anchor_time: pd.Series | Literal['entity'],
66
+ columns_dict: dict[str, set[str]],
67
+ num_neighbors: list[int],
68
+ ) -> SamplerOutput:
69
+ raise NotImplementedError
70
+
71
+ def _sample_entity_table(
72
+ self,
73
+ table_name: str,
74
+ columns: set[str],
75
+ num_rows: int,
76
+ random_seed: int | None = None,
77
+ ) -> pd.DataFrame:
78
+ # NOTE Snowflake does support `SEED` only as part of `SYSTEM` sampling.
79
+ num_rows = min(num_rows, 1_000_000) # Snowflake's upper limit.
80
+
81
+ filters: list[str] = []
82
+ primary_key = self.primary_key_dict[table_name]
83
+ if self.source_table_dict[table_name][primary_key].is_nullable:
84
+ filters.append(f" {quote_ident(primary_key)} IS NOT NULL")
85
+ time_column = self.time_column_dict.get(table_name)
86
+ if (time_column is not None and
87
+ self.source_table_dict[table_name][time_column].is_nullable):
88
+ filters.append(f" {quote_ident(time_column)} IS NOT NULL")
89
+
90
+ sql = (f"SELECT {', '.join(quote_ident(col) for col in columns)}\n"
91
+ f"FROM {self.fqn_dict[table_name]}\n"
92
+ f"SAMPLE ROW ({num_rows} ROWS)")
93
+ if len(filters) > 0:
94
+ sql += f"\nWHERE{' AND'.join(filters)}"
95
+
96
+ with self._connection.cursor() as cursor:
97
+ # NOTE This may return duplicate primary keys. This is okay.
98
+ cursor.execute(sql)
99
+ table = cursor.fetch_arrow_all()
100
+
101
+ return table.to_pandas(types_mapper=pd.ArrowDtype)
102
+
103
+ def _sample_target(
104
+ self,
105
+ query: ValidatedPredictiveQuery,
106
+ entity_df: pd.DataFrame,
107
+ train_index: np.ndarray,
108
+ train_time: pd.Series,
109
+ num_train_examples: int,
110
+ test_index: np.ndarray,
111
+ test_time: pd.Series,
112
+ num_test_examples: int,
113
+ columns_dict: dict[str, set[str]],
114
+ time_offset_dict: dict[
115
+ tuple[str, str, str],
116
+ tuple[pd.DateOffset | None, pd.DateOffset],
117
+ ],
118
+ ) -> tuple[pd.Series, np.ndarray, pd.Series, np.ndarray]:
119
+ raise NotImplementedError