kumoai 2.13.0.dev202512040649__cp313-cp313-win_amd64.whl → 2.14.0.dev202512211732__cp313-cp313-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. kumoai/__init__.py +12 -0
  2. kumoai/_version.py +1 -1
  3. kumoai/client/pquery.py +6 -2
  4. kumoai/experimental/rfm/__init__.py +33 -8
  5. kumoai/experimental/rfm/authenticate.py +3 -4
  6. kumoai/experimental/rfm/backend/local/__init__.py +4 -0
  7. kumoai/experimental/rfm/{local_graph_store.py → backend/local/graph_store.py} +52 -91
  8. kumoai/experimental/rfm/backend/local/sampler.py +315 -0
  9. kumoai/experimental/rfm/backend/local/table.py +21 -16
  10. kumoai/experimental/rfm/backend/snow/__init__.py +2 -0
  11. kumoai/experimental/rfm/backend/snow/sampler.py +252 -0
  12. kumoai/experimental/rfm/backend/snow/table.py +102 -48
  13. kumoai/experimental/rfm/backend/sqlite/__init__.py +4 -2
  14. kumoai/experimental/rfm/backend/sqlite/sampler.py +349 -0
  15. kumoai/experimental/rfm/backend/sqlite/table.py +84 -31
  16. kumoai/experimental/rfm/base/__init__.py +26 -3
  17. kumoai/experimental/rfm/base/column.py +14 -12
  18. kumoai/experimental/rfm/base/column_expression.py +50 -0
  19. kumoai/experimental/rfm/base/sampler.py +773 -0
  20. kumoai/experimental/rfm/base/source.py +1 -0
  21. kumoai/experimental/rfm/base/sql_sampler.py +84 -0
  22. kumoai/experimental/rfm/base/sql_table.py +229 -0
  23. kumoai/experimental/rfm/base/table.py +173 -138
  24. kumoai/experimental/rfm/graph.py +302 -108
  25. kumoai/experimental/rfm/infer/__init__.py +6 -4
  26. kumoai/experimental/rfm/infer/dtype.py +3 -3
  27. kumoai/experimental/rfm/infer/pkey.py +4 -2
  28. kumoai/experimental/rfm/infer/stype.py +35 -0
  29. kumoai/experimental/rfm/infer/time_col.py +1 -2
  30. kumoai/experimental/rfm/pquery/executor.py +27 -27
  31. kumoai/experimental/rfm/pquery/pandas_executor.py +30 -32
  32. kumoai/experimental/rfm/rfm.py +299 -230
  33. kumoai/experimental/rfm/sagemaker.py +4 -4
  34. kumoai/kumolib.cp313-win_amd64.pyd +0 -0
  35. kumoai/pquery/predictive_query.py +10 -6
  36. kumoai/testing/snow.py +50 -0
  37. kumoai/utils/__init__.py +3 -2
  38. kumoai/utils/progress_logger.py +178 -12
  39. kumoai/utils/sql.py +3 -0
  40. {kumoai-2.13.0.dev202512040649.dist-info → kumoai-2.14.0.dev202512211732.dist-info}/METADATA +3 -2
  41. {kumoai-2.13.0.dev202512040649.dist-info → kumoai-2.14.0.dev202512211732.dist-info}/RECORD +44 -36
  42. kumoai/experimental/rfm/local_graph_sampler.py +0 -223
  43. kumoai/experimental/rfm/local_pquery_driver.py +0 -689
  44. {kumoai-2.13.0.dev202512040649.dist-info → kumoai-2.14.0.dev202512211732.dist-info}/WHEEL +0 -0
  45. {kumoai-2.13.0.dev202512040649.dist-info → kumoai-2.14.0.dev202512211732.dist-info}/licenses/LICENSE +0 -0
  46. {kumoai-2.13.0.dev202512040649.dist-info → kumoai-2.14.0.dev202512211732.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,315 @@
1
+ from typing import TYPE_CHECKING, Literal
2
+
3
+ import numpy as np
4
+ import pandas as pd
5
+ from kumoapi.pquery import ValidatedPredictiveQuery
6
+
7
+ from kumoai.experimental.rfm.backend.local import LocalGraphStore
8
+ from kumoai.experimental.rfm.base import Sampler, SamplerOutput
9
+ from kumoai.experimental.rfm.pquery import PQueryPandasExecutor
10
+ from kumoai.utils import ProgressLogger
11
+
12
+ if TYPE_CHECKING:
13
+ from kumoai.experimental.rfm import Graph
14
+
15
+
16
+ class LocalSampler(Sampler):
17
+ def __init__(
18
+ self,
19
+ graph: 'Graph',
20
+ verbose: bool | ProgressLogger = True,
21
+ ) -> None:
22
+ super().__init__(graph=graph, verbose=verbose)
23
+
24
+ import kumoai.kumolib as kumolib
25
+
26
+ self._graph_store = LocalGraphStore(graph, verbose)
27
+ self._graph_sampler = kumolib.NeighborSampler(
28
+ list(self.table_stype_dict.keys()),
29
+ self.edge_types,
30
+ {
31
+ '__'.join(edge_type): colptr
32
+ for edge_type, colptr in self._graph_store.colptr_dict.items()
33
+ },
34
+ {
35
+ '__'.join(edge_type): row
36
+ for edge_type, row in self._graph_store.row_dict.items()
37
+ },
38
+ self._graph_store.time_dict,
39
+ )
40
+
41
+ def _get_min_max_time_dict(
42
+ self,
43
+ table_names: list[str],
44
+ ) -> dict[str, tuple[pd.Timestamp, pd.Timestamp]]:
45
+ return {
46
+ key: value
47
+ for key, value in self._graph_store.min_max_time_dict.items()
48
+ if key in table_names
49
+ }
50
+
51
+ def _sample_subgraph(
52
+ self,
53
+ entity_table_name: str,
54
+ entity_pkey: pd.Series,
55
+ anchor_time: pd.Series | Literal['entity'],
56
+ columns_dict: dict[str, set[str]],
57
+ num_neighbors: list[int],
58
+ ) -> SamplerOutput:
59
+
60
+ index = self._graph_store.get_node_id(entity_table_name, entity_pkey)
61
+
62
+ if isinstance(anchor_time, pd.Series):
63
+ time = anchor_time.astype(int).to_numpy() // 1000**3 # to seconds
64
+ else:
65
+ assert anchor_time == 'entity'
66
+ time = self._graph_store.time_dict[entity_table_name][index]
67
+
68
+ (
69
+ row_dict,
70
+ col_dict,
71
+ node_dict,
72
+ batch_dict,
73
+ num_sampled_nodes_dict,
74
+ num_sampled_edges_dict,
75
+ ) = self._graph_sampler.sample(
76
+ {
77
+ '__'.join(edge_type): num_neighbors
78
+ for edge_type in self.edge_types
79
+ },
80
+ {},
81
+ entity_table_name,
82
+ index,
83
+ time,
84
+ )
85
+
86
+ df_dict: dict[str, pd.DataFrame] = {}
87
+ inverse_dict: dict[str, np.ndarray] = {}
88
+ for table_name, node in node_dict.items():
89
+ df = self._graph_store.df_dict[table_name]
90
+ columns = columns_dict[table_name]
91
+ if self.end_time_column_dict.get(table_name, None) in columns:
92
+ df = df.iloc[node]
93
+ elif len(columns) == 0:
94
+ df = df.iloc[node]
95
+ else:
96
+ # Only store unique rows in `df` above a certain threshold:
97
+ unique_node, inverse = np.unique(node, return_inverse=True)
98
+ if len(node) > 1.05 * len(unique_node):
99
+ df = df.iloc[unique_node]
100
+ inverse_dict[table_name] = inverse
101
+ else:
102
+ df = df.iloc[node]
103
+ df = df.reset_index(drop=True)
104
+ df = df[list(columns)]
105
+ df_dict[table_name] = df
106
+
107
+ num_sampled_nodes_dict = {
108
+ table_name: num_sampled_nodes.tolist()
109
+ for table_name, num_sampled_nodes in
110
+ num_sampled_nodes_dict.items()
111
+ }
112
+
113
+ row_dict = {
114
+ edge_type: row_dict['__'.join(edge_type)]
115
+ for edge_type in self.edge_types
116
+ }
117
+ col_dict = {
118
+ edge_type: col_dict['__'.join(edge_type)]
119
+ for edge_type in self.edge_types
120
+ }
121
+ num_sampled_edges_dict = {
122
+ edge_type: num_sampled_edges_dict['__'.join(edge_type)].tolist()
123
+ for edge_type in self.edge_types
124
+ }
125
+
126
+ return SamplerOutput(
127
+ anchor_time=time * 1000**3, # to nanoseconds
128
+ df_dict=df_dict,
129
+ inverse_dict=inverse_dict,
130
+ batch_dict=batch_dict,
131
+ num_sampled_nodes_dict=num_sampled_nodes_dict,
132
+ row_dict=row_dict,
133
+ col_dict=col_dict,
134
+ num_sampled_edges_dict=num_sampled_edges_dict,
135
+ )
136
+
137
+ def _sample_entity_table(
138
+ self,
139
+ table_name: str,
140
+ columns: set[str],
141
+ num_rows: int,
142
+ random_seed: int | None = None,
143
+ ) -> pd.DataFrame:
144
+ pkey_map = self._graph_store.pkey_map_dict[table_name]
145
+ if len(pkey_map) > num_rows:
146
+ pkey_map = pkey_map.sample(
147
+ n=num_rows,
148
+ random_state=random_seed,
149
+ ignore_index=True,
150
+ )
151
+ df = self._graph_store.df_dict[table_name]
152
+ df = df.iloc[pkey_map['arange']][list(columns)]
153
+ return df
154
+
155
+ def _sample_target(
156
+ self,
157
+ query: ValidatedPredictiveQuery,
158
+ entity_df: pd.DataFrame,
159
+ train_index: np.ndarray,
160
+ train_time: pd.Series,
161
+ num_train_examples: int,
162
+ test_index: np.ndarray,
163
+ test_time: pd.Series,
164
+ num_test_examples: int,
165
+ columns_dict: dict[str, set[str]],
166
+ time_offset_dict: dict[
167
+ tuple[str, str, str],
168
+ tuple[pd.DateOffset | None, pd.DateOffset],
169
+ ],
170
+ ) -> tuple[pd.Series, np.ndarray, pd.Series, np.ndarray]:
171
+
172
+ train_y, train_mask = self._sample_target_set(
173
+ query=query,
174
+ pkey=entity_df[self.primary_key_dict[query.entity_table]],
175
+ index=train_index,
176
+ anchor_time=train_time,
177
+ num_examples=num_train_examples,
178
+ columns_dict=columns_dict,
179
+ time_offset_dict=time_offset_dict,
180
+ )
181
+
182
+ test_y, test_mask = self._sample_target_set(
183
+ query=query,
184
+ pkey=entity_df[self.primary_key_dict[query.entity_table]],
185
+ index=test_index,
186
+ anchor_time=test_time,
187
+ num_examples=num_test_examples,
188
+ columns_dict=columns_dict,
189
+ time_offset_dict=time_offset_dict,
190
+ )
191
+
192
+ return train_y, train_mask, test_y, test_mask
193
+
194
+ # Helper Methods ##########################################################
195
+
196
+ def _sample_target_set(
197
+ self,
198
+ query: ValidatedPredictiveQuery,
199
+ pkey: pd.Series,
200
+ index: np.ndarray,
201
+ anchor_time: pd.Series,
202
+ num_examples: int,
203
+ columns_dict: dict[str, set[str]],
204
+ time_offset_dict: dict[
205
+ tuple[str, str, str],
206
+ tuple[pd.DateOffset | None, pd.DateOffset],
207
+ ],
208
+ batch_size: int = 10_000,
209
+ ) -> tuple[pd.Series, np.ndarray]:
210
+
211
+ num_hops = 1 if len(time_offset_dict) > 0 else 0
212
+ num_neighbors_dict: dict[str, list[int]] = {}
213
+ unix_time_offset_dict: dict[str, list[list[int | None]]] = {}
214
+ for edge_type, (start, end) in time_offset_dict.items():
215
+ unix_time_offset_dict['__'.join(edge_type)] = [[
216
+ date_offset_to_seconds(start) if start is not None else None,
217
+ date_offset_to_seconds(end),
218
+ ]]
219
+ for edge_type in set(self.edge_types) - set(time_offset_dict.keys()):
220
+ num_neighbors_dict['__'.join(edge_type)] = [0] * num_hops
221
+
222
+ if anchor_time.dtype != 'datetime64[ns]':
223
+ anchor_time = anchor_time.astype('datetime64')
224
+
225
+ count = 0
226
+ ys: list[pd.Series] = []
227
+ mask = np.full(len(index), False, dtype=bool)
228
+ for start in range(0, len(index), batch_size):
229
+ subset = pkey.iloc[index[start:start + batch_size]]
230
+ time = anchor_time.iloc[start:start + batch_size]
231
+
232
+ _, _, node_dict, batch_dict, _, _ = self._graph_sampler.sample(
233
+ num_neighbors_dict,
234
+ unix_time_offset_dict,
235
+ query.entity_table,
236
+ self._graph_store.get_node_id(query.entity_table, subset),
237
+ time.astype(int).to_numpy() // 1000**3, # to seconds
238
+ )
239
+
240
+ feat_dict: dict[str, pd.DataFrame] = {}
241
+ time_dict: dict[str, pd.Series] = {}
242
+ for table_name, columns in columns_dict.items():
243
+ df = self._graph_store.df_dict[table_name]
244
+ df = df.iloc[node_dict[table_name]].reset_index(drop=True)
245
+ df = df[list(columns)]
246
+ feat_dict[table_name] = df
247
+
248
+ time_column = self.time_column_dict.get(table_name)
249
+ if time_column in columns:
250
+ time_dict[table_name] = df[time_column]
251
+
252
+ y, _mask = PQueryPandasExecutor().execute(
253
+ query=query,
254
+ feat_dict=feat_dict,
255
+ time_dict=time_dict,
256
+ batch_dict=batch_dict,
257
+ anchor_time=time,
258
+ num_forecasts=query.num_forecasts,
259
+ )
260
+ ys.append(y)
261
+ mask[start:start + batch_size] = _mask
262
+
263
+ count += len(y)
264
+ if count >= num_examples:
265
+ break
266
+
267
+ if len(ys) == 0:
268
+ y = pd.Series([], dtype=float)
269
+ elif len(ys) == 1:
270
+ y = ys[0]
271
+ else:
272
+ y = pd.concat(ys, axis=0, ignore_index=True)
273
+
274
+ return y, mask
275
+
276
+
277
+ # Helper Functions ############################################################
278
+
279
+
280
+ def date_offset_to_seconds(offset: pd.DateOffset) -> int:
281
+ r"""Convert a :class:`pandas.DateOffset` into a number of seconds.
282
+
283
+ .. note::
284
+ We are conservative and take months and years as their maximum value.
285
+ Additional values are then dropped in label computation where we know
286
+ the actual dates.
287
+ """
288
+ MAX_DAYS_IN_MONTH = 31
289
+ MAX_DAYS_IN_YEAR = 366
290
+
291
+ SECONDS_IN_MINUTE = 60
292
+ SECONDS_IN_HOUR = 60 * SECONDS_IN_MINUTE
293
+ SECONDS_IN_DAY = 24 * SECONDS_IN_HOUR
294
+
295
+ total_sec = 0
296
+ multiplier = getattr(offset, 'n', 1) # The multiplier (if present).
297
+
298
+ for attr, value in offset.__dict__.items():
299
+ if value is None or value == 0:
300
+ continue
301
+ scaled_value = value * multiplier
302
+ if attr == 'years':
303
+ total_sec += scaled_value * MAX_DAYS_IN_YEAR * SECONDS_IN_DAY
304
+ elif attr == 'months':
305
+ total_sec += scaled_value * MAX_DAYS_IN_MONTH * SECONDS_IN_DAY
306
+ elif attr == 'days':
307
+ total_sec += scaled_value * SECONDS_IN_DAY
308
+ elif attr == 'hours':
309
+ total_sec += scaled_value * SECONDS_IN_HOUR
310
+ elif attr == 'minutes':
311
+ total_sec += scaled_value * SECONDS_IN_MINUTE
312
+ elif attr == 'seconds':
313
+ total_sec += scaled_value
314
+
315
+ return total_sec
@@ -1,9 +1,10 @@
1
1
  import warnings
2
- from typing import List, Optional
2
+ from typing import cast
3
3
 
4
4
  import pandas as pd
5
+ from kumoapi.model_plan import MissingType
5
6
 
6
- from kumoai.experimental.rfm.base import SourceColumn, SourceForeignKey, Table
7
+ from kumoai.experimental.rfm.base import DataBackend, SourceColumn, Table
7
8
  from kumoai.experimental.rfm.infer import infer_dtype
8
9
 
9
10
 
@@ -52,9 +53,9 @@ class LocalTable(Table):
52
53
  self,
53
54
  df: pd.DataFrame,
54
55
  name: str,
55
- primary_key: Optional[str] = None,
56
- time_column: Optional[str] = None,
57
- end_time_column: Optional[str] = None,
56
+ primary_key: MissingType | str | None = MissingType.VALUE,
57
+ time_column: str | None = None,
58
+ end_time_column: str | None = None,
58
59
  ) -> None:
59
60
 
60
61
  if df.empty:
@@ -76,17 +77,23 @@ class LocalTable(Table):
76
77
  end_time_column=end_time_column,
77
78
  )
78
79
 
79
- def _get_source_columns(self) -> List[SourceColumn]:
80
- source_columns: List[SourceColumn] = []
80
+ @property
81
+ def backend(self) -> DataBackend:
82
+ return cast(DataBackend, DataBackend.LOCAL)
83
+
84
+ def _get_source_columns(self) -> list[SourceColumn]:
85
+ source_columns: list[SourceColumn] = []
81
86
  for column in self._data.columns:
82
87
  ser = self._data[column]
83
88
  try:
84
89
  dtype = infer_dtype(ser)
85
90
  except Exception:
86
- warnings.warn(f"Data type inference for column '{column}' in "
87
- f"table '{self.name}' failed. Consider changing "
88
- f"the data type of the column to use it within "
89
- f"this table.")
91
+ warnings.warn(f"Encountered unsupported data type "
92
+ f"'{ser.dtype}' for column '{column}' in table "
93
+ f"'{self.name}'. Please change the data type of "
94
+ f"the column in the `pandas.DataFrame` to use "
95
+ f"it within this table, or remove it to "
96
+ f"suppress this warning.")
90
97
  continue
91
98
 
92
99
  source_column = SourceColumn(
@@ -94,16 +101,14 @@ class LocalTable(Table):
94
101
  dtype=dtype,
95
102
  is_primary_key=False,
96
103
  is_unique_key=False,
104
+ is_nullable=True,
97
105
  )
98
106
  source_columns.append(source_column)
99
107
 
100
108
  return source_columns
101
109
 
102
- def _get_source_foreign_keys(self) -> List[SourceForeignKey]:
103
- return []
104
-
105
- def _get_sample_df(self) -> pd.DataFrame:
110
+ def _get_source_sample_df(self) -> pd.DataFrame:
106
111
  return self._data
107
112
 
108
- def _get_num_rows(self) -> Optional[int]:
113
+ def _get_num_rows(self) -> int | None:
109
114
  return len(self._data)
@@ -27,9 +27,11 @@ def connect(**kwargs: Any) -> Connection:
27
27
 
28
28
 
29
29
  from .table import SnowTable # noqa: E402
30
+ from .sampler import SnowSampler # noqa: E402
30
31
 
31
32
  __all__ = [
32
33
  'connect',
33
34
  'Connection',
34
35
  'SnowTable',
36
+ 'SnowSampler',
35
37
  ]
@@ -0,0 +1,252 @@
1
+ import json
2
+ from collections.abc import Iterator
3
+ from contextlib import contextmanager
4
+
5
+ import numpy as np
6
+ import pandas as pd
7
+ import pyarrow as pa
8
+ from kumoapi.pquery import ValidatedPredictiveQuery
9
+
10
+ from kumoai.experimental.rfm.backend.snow import Connection
11
+ from kumoai.experimental.rfm.base import SQLSampler
12
+ from kumoai.experimental.rfm.pquery import PQueryPandasExecutor
13
+ from kumoai.utils import quote_ident
14
+
15
+
16
+ @contextmanager
17
+ def paramstyle(connection: Connection, style: str = 'qmark') -> Iterator[None]:
18
+ _style = connection._paramstyle
19
+ connection._paramstyle = style
20
+ yield
21
+ connection._paramstyle = _style
22
+
23
+
24
+ class SnowSampler(SQLSampler):
25
+ def _get_min_max_time_dict(
26
+ self,
27
+ table_names: list[str],
28
+ ) -> dict[str, tuple[pd.Timestamp, pd.Timestamp]]:
29
+ selects: list[str] = []
30
+ for table_name in table_names:
31
+ time_column = self.time_column_dict[table_name]
32
+ select = (f"SELECT\n"
33
+ f" ? as table_name,\n"
34
+ f" MIN({quote_ident(time_column)}) as min_date,\n"
35
+ f" MAX({quote_ident(time_column)}) as max_date\n"
36
+ f"FROM {self.fqn_dict[table_name]}")
37
+ selects.append(select)
38
+ sql = "\nUNION ALL\n".join(selects)
39
+
40
+ out_dict: dict[str, tuple[pd.Timestamp, pd.Timestamp]] = {}
41
+ with paramstyle(self._connection), self._connection.cursor() as cursor:
42
+ cursor.execute(sql, table_names)
43
+ rows = cursor.fetchall()
44
+ for table_name, _min, _max in rows:
45
+ out_dict[table_name] = (
46
+ pd.Timestamp.max if _min is None else pd.Timestamp(_min),
47
+ pd.Timestamp.min if _max is None else pd.Timestamp(_max),
48
+ )
49
+
50
+ return out_dict
51
+
52
+ def _sample_entity_table(
53
+ self,
54
+ table_name: str,
55
+ columns: set[str],
56
+ num_rows: int,
57
+ random_seed: int | None = None,
58
+ ) -> pd.DataFrame:
59
+ # NOTE Snowflake does support `SEED` only as part of `SYSTEM` sampling.
60
+ num_rows = min(num_rows, 1_000_000) # Snowflake's upper limit.
61
+
62
+ filters: list[str] = []
63
+ primary_key = self.primary_key_dict[table_name]
64
+ if self.source_table_dict[table_name][primary_key].is_nullable:
65
+ filters.append(f" {quote_ident(primary_key)} IS NOT NULL")
66
+ time_column = self.time_column_dict.get(table_name)
67
+ if (time_column is not None and
68
+ self.source_table_dict[table_name][time_column].is_nullable):
69
+ filters.append(f" {quote_ident(time_column)} IS NOT NULL")
70
+
71
+ sql = (f"SELECT {', '.join(quote_ident(col) for col in columns)}\n"
72
+ f"FROM {self.fqn_dict[table_name]}\n"
73
+ f"SAMPLE ROW ({num_rows} ROWS)")
74
+ if len(filters) > 0:
75
+ sql += f"\nWHERE{' AND'.join(filters)}"
76
+
77
+ with self._connection.cursor() as cursor:
78
+ # NOTE This may return duplicate primary keys. This is okay.
79
+ cursor.execute(sql)
80
+ table = cursor.fetch_arrow_all()
81
+
82
+ return self._sanitize(table_name, table)
83
+
84
+ def _sample_target(
85
+ self,
86
+ query: ValidatedPredictiveQuery,
87
+ entity_df: pd.DataFrame,
88
+ train_index: np.ndarray,
89
+ train_time: pd.Series,
90
+ num_train_examples: int,
91
+ test_index: np.ndarray,
92
+ test_time: pd.Series,
93
+ num_test_examples: int,
94
+ columns_dict: dict[str, set[str]],
95
+ time_offset_dict: dict[
96
+ tuple[str, str, str],
97
+ tuple[pd.DateOffset | None, pd.DateOffset],
98
+ ],
99
+ ) -> tuple[pd.Series, np.ndarray, pd.Series, np.ndarray]:
100
+
101
+ # NOTE For Snowflake, we execute everything at once to pay minimal
102
+ # query initialization costs.
103
+ index = np.concatenate([train_index, test_index])
104
+ time = pd.concat([train_time, test_time], axis=0, ignore_index=True)
105
+
106
+ entity_df = entity_df.iloc[index].reset_index(drop=True)
107
+
108
+ feat_dict: dict[str, pd.DataFrame] = {query.entity_table: entity_df}
109
+ time_dict: dict[str, pd.Series] = {}
110
+ time_column = self.time_column_dict.get(query.entity_table)
111
+ if time_column in columns_dict[query.entity_table]:
112
+ time_dict[query.entity_table] = entity_df[time_column]
113
+ batch_dict: dict[str, np.ndarray] = {
114
+ query.entity_table: np.arange(len(entity_df)),
115
+ }
116
+ for edge_type, (min_offset, max_offset) in time_offset_dict.items():
117
+ table_name, fkey, _ = edge_type
118
+ feat_dict[table_name], batch_dict[table_name] = self._by_time(
119
+ table_name=table_name,
120
+ fkey=fkey,
121
+ pkey=entity_df[self.primary_key_dict[query.entity_table]],
122
+ anchor_time=time,
123
+ min_offset=min_offset,
124
+ max_offset=max_offset,
125
+ columns=columns_dict[table_name],
126
+ )
127
+ time_column = self.time_column_dict.get(table_name)
128
+ if time_column in columns_dict[table_name]:
129
+ time_dict[table_name] = feat_dict[table_name][time_column]
130
+
131
+ y, mask = PQueryPandasExecutor().execute(
132
+ query=query,
133
+ feat_dict=feat_dict,
134
+ time_dict=time_dict,
135
+ batch_dict=batch_dict,
136
+ anchor_time=time,
137
+ num_forecasts=query.num_forecasts,
138
+ )
139
+
140
+ train_mask = mask[:len(train_index)]
141
+ test_mask = mask[len(train_index):]
142
+
143
+ boundary = int(train_mask.sum())
144
+ train_y = y.iloc[:boundary]
145
+ test_y = y.iloc[boundary:].reset_index(drop=True)
146
+
147
+ return train_y, train_mask, test_y, test_mask
148
+
149
+ def _by_pkey(
150
+ self,
151
+ table_name: str,
152
+ pkey: pd.Series,
153
+ columns: set[str],
154
+ ) -> tuple[pd.DataFrame, np.ndarray]:
155
+
156
+ pkey_name = self.primary_key_dict[table_name]
157
+ source_table = self.source_table_dict[table_name]
158
+
159
+ payload = json.dumps(list(pkey))
160
+
161
+ sql = ("WITH TMP as (\n"
162
+ " SELECT\n"
163
+ " f.index as BATCH,\n")
164
+ if source_table[pkey_name].dtype.is_int():
165
+ sql += " f.value::NUMBER as ID\n"
166
+ elif source_table[pkey_name].dtype.is_float():
167
+ sql += " f.value::FLOAT as ID\n"
168
+ else:
169
+ sql += " f.value::VARCHAR as ID\n"
170
+ sql += (f" FROM TABLE(FLATTEN(INPUT => PARSE_JSON(?))) f\n"
171
+ f")\n"
172
+ f"SELECT TMP.BATCH as __BATCH__, "
173
+ f"{', '.join('ENT.' + quote_ident(col) for col in columns)}\n"
174
+ f"FROM TMP\n"
175
+ f"JOIN {self.fqn_dict[table_name]} ENT\n"
176
+ f" ON ENT.{quote_ident(pkey_name)} = TMP.ID")
177
+
178
+ with paramstyle(self._connection), self._connection.cursor() as cursor:
179
+ cursor.execute(sql, (payload, ))
180
+ table = cursor.fetch_arrow_all()
181
+
182
+ # Remove any duplicated primary keys in post-processing:
183
+ tmp = table.append_column('__TMP__', pa.array(range(len(table))))
184
+ gb = tmp.group_by('__BATCH__').aggregate([('__TMP__', 'min')])
185
+ table = table.take(gb['__TMP___min'])
186
+
187
+ batch = table['__BATCH__'].cast(pa.int64()).to_numpy()
188
+ table = table.remove_column(table.schema.get_field_index('__BATCH__'))
189
+
190
+ return table.to_pandas(), batch # TODO Use `self._sanitize`.
191
+
192
+ # Helper Methods ##########################################################
193
+
194
+ def _by_time(
195
+ self,
196
+ table_name: str,
197
+ fkey: str,
198
+ pkey: pd.Series,
199
+ anchor_time: pd.Series,
200
+ min_offset: pd.DateOffset | None,
201
+ max_offset: pd.DateOffset,
202
+ columns: set[str],
203
+ ) -> tuple[pd.DataFrame, np.ndarray]:
204
+
205
+ end_time = anchor_time + max_offset
206
+ end_time = end_time.dt.strftime("%Y-%m-%d %H:%M:%S")
207
+ if min_offset is not None:
208
+ start_time = anchor_time + min_offset
209
+ start_time = start_time.dt.strftime("%Y-%m-%d %H:%M:%S")
210
+ payload = json.dumps(list(zip(pkey, end_time, start_time)))
211
+ else:
212
+ payload = json.dumps(list(zip(pkey, end_time)))
213
+
214
+ # Based on benchmarking, JSON payload is the fastest way to query by
215
+ # custom indices (compared to large `IN` clauses or temporary tables):
216
+ source_table = self.source_table_dict[table_name]
217
+ time_column = self.time_column_dict[table_name]
218
+ sql = ("WITH TMP as (\n"
219
+ " SELECT\n"
220
+ " f.index as BATCH,\n")
221
+ if source_table[fkey].dtype.is_int():
222
+ sql += " f.value[0]::NUMBER as ID,\n"
223
+ elif source_table[fkey].dtype.is_float():
224
+ sql += " f.value[0]::FLOAT as ID,\n"
225
+ else:
226
+ sql += " f.value[0]::VARCHAR as ID,\n"
227
+ sql += " f.value[1]::TIMESTAMP_NTZ as END_TIME"
228
+ if min_offset is not None:
229
+ sql += ",\n f.value[2]::TIMESTAMP_NTZ as START_TIME"
230
+ sql += (f"\n"
231
+ f" FROM TABLE(FLATTEN(INPUT => PARSE_JSON(?))) f\n"
232
+ f")\n"
233
+ f"SELECT TMP.BATCH as __BATCH__, "
234
+ f"{', '.join('FACT.' + quote_ident(col) for col in columns)}\n"
235
+ f"FROM TMP\n"
236
+ f"JOIN {self.fqn_dict[table_name]} FACT\n"
237
+ f" ON FACT.{quote_ident(fkey)} = TMP.ID\n"
238
+ f" AND FACT.{quote_ident(time_column)} <= TMP.END_TIME")
239
+ if min_offset is not None:
240
+ sql += f"\n AND FACT.{quote_ident(time_column)} > TMP.START_TIME"
241
+
242
+ with paramstyle(self._connection), self._connection.cursor() as cursor:
243
+ cursor.execute(sql, (payload, ))
244
+ table = cursor.fetch_arrow_all()
245
+
246
+ batch = table['__BATCH__'].cast(pa.int64()).to_numpy()
247
+ table = table.remove_column(table.schema.get_field_index('__BATCH__'))
248
+
249
+ return self._sanitize(table_name, table), batch
250
+
251
+ def _sanitize(self, table_name: str, table: pa.table) -> pd.DataFrame:
252
+ return table.to_pandas(types_mapper=pd.ArrowDtype)