kumoai 2.13.0.dev202512040649__cp313-cp313-win_amd64.whl → 2.14.0.dev202601081732__cp313-cp313-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kumoai/__init__.py +35 -26
- kumoai/_version.py +1 -1
- kumoai/client/client.py +6 -0
- kumoai/client/jobs.py +26 -0
- kumoai/client/pquery.py +6 -2
- kumoai/connector/utils.py +21 -7
- kumoai/experimental/rfm/__init__.py +51 -24
- kumoai/experimental/rfm/authenticate.py +3 -4
- kumoai/experimental/rfm/backend/local/__init__.py +4 -0
- kumoai/experimental/rfm/{local_graph_store.py → backend/local/graph_store.py} +62 -110
- kumoai/experimental/rfm/backend/local/sampler.py +312 -0
- kumoai/experimental/rfm/backend/local/table.py +35 -31
- kumoai/experimental/rfm/backend/snow/__init__.py +2 -0
- kumoai/experimental/rfm/backend/snow/sampler.py +366 -0
- kumoai/experimental/rfm/backend/snow/table.py +177 -50
- kumoai/experimental/rfm/backend/sqlite/__init__.py +4 -2
- kumoai/experimental/rfm/backend/sqlite/sampler.py +454 -0
- kumoai/experimental/rfm/backend/sqlite/table.py +131 -48
- kumoai/experimental/rfm/base/__init__.py +23 -3
- kumoai/experimental/rfm/base/column.py +96 -10
- kumoai/experimental/rfm/base/expression.py +44 -0
- kumoai/experimental/rfm/base/sampler.py +782 -0
- kumoai/experimental/rfm/base/source.py +2 -1
- kumoai/experimental/rfm/base/sql_sampler.py +247 -0
- kumoai/experimental/rfm/base/table.py +404 -203
- kumoai/experimental/rfm/graph.py +374 -172
- kumoai/experimental/rfm/infer/__init__.py +6 -4
- kumoai/experimental/rfm/infer/dtype.py +7 -4
- kumoai/experimental/rfm/infer/multicategorical.py +1 -1
- kumoai/experimental/rfm/infer/pkey.py +4 -2
- kumoai/experimental/rfm/infer/stype.py +35 -0
- kumoai/experimental/rfm/infer/time_col.py +1 -2
- kumoai/experimental/rfm/pquery/executor.py +27 -27
- kumoai/experimental/rfm/pquery/pandas_executor.py +30 -32
- kumoai/experimental/rfm/relbench.py +76 -0
- kumoai/experimental/rfm/rfm.py +762 -467
- kumoai/experimental/rfm/sagemaker.py +4 -4
- kumoai/experimental/rfm/task_table.py +292 -0
- kumoai/kumolib.cp313-win_amd64.pyd +0 -0
- kumoai/pquery/predictive_query.py +10 -6
- kumoai/pquery/training_table.py +16 -2
- kumoai/testing/snow.py +50 -0
- kumoai/trainer/distilled_trainer.py +175 -0
- kumoai/utils/__init__.py +3 -2
- kumoai/utils/display.py +87 -0
- kumoai/utils/progress_logger.py +190 -12
- kumoai/utils/sql.py +3 -0
- {kumoai-2.13.0.dev202512040649.dist-info → kumoai-2.14.0.dev202601081732.dist-info}/METADATA +3 -2
- {kumoai-2.13.0.dev202512040649.dist-info → kumoai-2.14.0.dev202601081732.dist-info}/RECORD +52 -41
- kumoai/experimental/rfm/local_graph_sampler.py +0 -223
- kumoai/experimental/rfm/local_pquery_driver.py +0 -689
- {kumoai-2.13.0.dev202512040649.dist-info → kumoai-2.14.0.dev202601081732.dist-info}/WHEEL +0 -0
- {kumoai-2.13.0.dev202512040649.dist-info → kumoai-2.14.0.dev202601081732.dist-info}/licenses/LICENSE +0 -0
- {kumoai-2.13.0.dev202512040649.dist-info → kumoai-2.14.0.dev202601081732.dist-info}/top_level.txt +0 -0
|
@@ -1,13 +1,12 @@
|
|
|
1
|
-
import
|
|
2
|
-
from typing import Dict, List, Optional, Tuple, Union
|
|
1
|
+
from typing import TYPE_CHECKING
|
|
3
2
|
|
|
4
3
|
import numpy as np
|
|
5
4
|
import pandas as pd
|
|
6
5
|
from kumoapi.rfm.context import Subgraph
|
|
7
|
-
from kumoapi.typing import Stype
|
|
8
6
|
|
|
9
|
-
from kumoai.experimental.rfm import
|
|
10
|
-
from kumoai.
|
|
7
|
+
from kumoai.experimental.rfm.backend.local import LocalTable
|
|
8
|
+
from kumoai.experimental.rfm.base import Table
|
|
9
|
+
from kumoai.utils import ProgressLogger
|
|
11
10
|
|
|
12
11
|
try:
|
|
13
12
|
import torch
|
|
@@ -15,42 +14,40 @@ try:
|
|
|
15
14
|
except ImportError:
|
|
16
15
|
WITH_TORCH = False
|
|
17
16
|
|
|
17
|
+
if TYPE_CHECKING:
|
|
18
|
+
from kumoai.experimental.rfm import Graph
|
|
19
|
+
|
|
18
20
|
|
|
19
21
|
class LocalGraphStore:
|
|
20
22
|
def __init__(
|
|
21
23
|
self,
|
|
22
|
-
graph: Graph,
|
|
23
|
-
verbose:
|
|
24
|
+
graph: 'Graph',
|
|
25
|
+
verbose: bool | ProgressLogger = True,
|
|
24
26
|
) -> None:
|
|
25
27
|
|
|
26
28
|
if not isinstance(verbose, ProgressLogger):
|
|
27
|
-
verbose =
|
|
28
|
-
"Materializing graph",
|
|
29
|
+
verbose = ProgressLogger.default(
|
|
30
|
+
msg="Materializing graph",
|
|
29
31
|
verbose=verbose,
|
|
30
32
|
)
|
|
31
33
|
|
|
32
34
|
with verbose as logger:
|
|
33
35
|
self.df_dict, self.mask_dict = self.sanitize(graph)
|
|
34
|
-
self.stype_dict = self.get_stype_dict(graph)
|
|
35
36
|
logger.log("Sanitized input data")
|
|
36
37
|
|
|
37
|
-
self.
|
|
38
|
+
self.pkey_map_dict = self.get_pkey_map_dict(graph)
|
|
38
39
|
num_pkeys = sum(t.has_primary_key() for t in graph.tables.values())
|
|
39
40
|
if num_pkeys > 1:
|
|
40
41
|
logger.log(f"Collected primary keys from {num_pkeys} tables")
|
|
41
42
|
else:
|
|
42
43
|
logger.log(f"Collected primary key from {num_pkeys} table")
|
|
43
44
|
|
|
44
|
-
(
|
|
45
|
-
|
|
46
|
-
self.
|
|
47
|
-
self.
|
|
48
|
-
self.min_time,
|
|
49
|
-
self.max_time,
|
|
50
|
-
) = self.get_time_data(graph)
|
|
51
|
-
if self.max_time != pd.Timestamp.min:
|
|
45
|
+
self.time_dict, self.min_max_time_dict = self.get_time_data(graph)
|
|
46
|
+
if len(self.min_max_time_dict) > 0:
|
|
47
|
+
min_time = min(t for t, _ in self.min_max_time_dict.values())
|
|
48
|
+
max_time = max(t for _, t in self.min_max_time_dict.values())
|
|
52
49
|
logger.log(f"Identified temporal graph from "
|
|
53
|
-
f"{
|
|
50
|
+
f"{min_time.date()} to {max_time.date()}")
|
|
54
51
|
else:
|
|
55
52
|
logger.log("Identified static graph without timestamps")
|
|
56
53
|
|
|
@@ -60,14 +57,6 @@ class LocalGraphStore:
|
|
|
60
57
|
logger.log(f"Created graph with {num_nodes:,} nodes and "
|
|
61
58
|
f"{num_edges:,} edges")
|
|
62
59
|
|
|
63
|
-
@property
|
|
64
|
-
def node_types(self) -> List[str]:
|
|
65
|
-
return list(self.df_dict.keys())
|
|
66
|
-
|
|
67
|
-
@property
|
|
68
|
-
def edge_types(self) -> List[Tuple[str, str, str]]:
|
|
69
|
-
return list(self.row_dict.keys())
|
|
70
|
-
|
|
71
60
|
def get_node_id(self, table_name: str, pkey: pd.Series) -> np.ndarray:
|
|
72
61
|
r"""Returns the node ID given primary keys.
|
|
73
62
|
|
|
@@ -103,8 +92,8 @@ class LocalGraphStore:
|
|
|
103
92
|
|
|
104
93
|
def sanitize(
|
|
105
94
|
self,
|
|
106
|
-
graph: Graph,
|
|
107
|
-
) ->
|
|
95
|
+
graph: 'Graph',
|
|
96
|
+
) -> tuple[dict[str, pd.DataFrame], dict[str, np.ndarray]]:
|
|
108
97
|
r"""Sanitizes raw data according to table schema definition:
|
|
109
98
|
|
|
110
99
|
In particular, it:
|
|
@@ -113,30 +102,24 @@ class LocalGraphStore:
|
|
|
113
102
|
* drops duplicate primary keys
|
|
114
103
|
* removes rows with missing primary keys or time values
|
|
115
104
|
"""
|
|
116
|
-
df_dict:
|
|
105
|
+
df_dict: dict[str, pd.DataFrame] = {}
|
|
117
106
|
for table_name, table in graph.tables.items():
|
|
118
107
|
assert isinstance(table, LocalTable)
|
|
119
|
-
|
|
120
|
-
|
|
108
|
+
df_dict[table_name] = Table._sanitize(
|
|
109
|
+
df=table._data.copy(deep=False).reset_index(drop=True),
|
|
110
|
+
dtype_dict={
|
|
111
|
+
column.name: column.dtype
|
|
112
|
+
for column in table.columns
|
|
113
|
+
},
|
|
114
|
+
stype_dict={
|
|
115
|
+
column.name: column.stype
|
|
116
|
+
for column in table.columns
|
|
117
|
+
},
|
|
118
|
+
)
|
|
121
119
|
|
|
122
|
-
mask_dict:
|
|
120
|
+
mask_dict: dict[str, np.ndarray] = {}
|
|
123
121
|
for table in graph.tables.values():
|
|
124
|
-
|
|
125
|
-
if col.stype == Stype.timestamp:
|
|
126
|
-
ser = df_dict[table.name][col.name]
|
|
127
|
-
if not pd.api.types.is_datetime64_any_dtype(ser):
|
|
128
|
-
with warnings.catch_warnings():
|
|
129
|
-
warnings.filterwarnings(
|
|
130
|
-
'ignore',
|
|
131
|
-
message='Could not infer format',
|
|
132
|
-
)
|
|
133
|
-
ser = pd.to_datetime(ser, errors='coerce')
|
|
134
|
-
df_dict[table.name][col.name] = ser
|
|
135
|
-
if isinstance(ser.dtype, pd.DatetimeTZDtype):
|
|
136
|
-
ser = ser.dt.tz_localize(None)
|
|
137
|
-
df_dict[table.name][col.name] = ser
|
|
138
|
-
|
|
139
|
-
mask: Optional[np.ndarray] = None
|
|
122
|
+
mask: np.ndarray | None = None
|
|
140
123
|
if table._time_column is not None:
|
|
141
124
|
ser = df_dict[table.name][table._time_column]
|
|
142
125
|
mask = ser.notna().to_numpy()
|
|
@@ -151,34 +134,16 @@ class LocalGraphStore:
|
|
|
151
134
|
|
|
152
135
|
return df_dict, mask_dict
|
|
153
136
|
|
|
154
|
-
def
|
|
155
|
-
stype_dict: Dict[str, Dict[str, Stype]] = {}
|
|
156
|
-
foreign_keys = {(edge.src_table, edge.fkey) for edge in graph.edges}
|
|
157
|
-
for table in graph.tables.values():
|
|
158
|
-
stype_dict[table.name] = {}
|
|
159
|
-
for column in table.columns:
|
|
160
|
-
if column == table.primary_key:
|
|
161
|
-
continue
|
|
162
|
-
if (table.name, column.name) in foreign_keys:
|
|
163
|
-
continue
|
|
164
|
-
stype_dict[table.name][column.name] = column.stype
|
|
165
|
-
return stype_dict
|
|
166
|
-
|
|
167
|
-
def get_pkey_data(
|
|
137
|
+
def get_pkey_map_dict(
|
|
168
138
|
self,
|
|
169
|
-
graph: Graph,
|
|
170
|
-
) ->
|
|
171
|
-
|
|
172
|
-
Dict[str, pd.DataFrame],
|
|
173
|
-
]:
|
|
174
|
-
pkey_name_dict: Dict[str, str] = {}
|
|
175
|
-
pkey_map_dict: Dict[str, pd.DataFrame] = {}
|
|
139
|
+
graph: 'Graph',
|
|
140
|
+
) -> dict[str, pd.DataFrame]:
|
|
141
|
+
pkey_map_dict: dict[str, pd.DataFrame] = {}
|
|
176
142
|
|
|
177
143
|
for table in graph.tables.values():
|
|
178
144
|
if table._primary_key is None:
|
|
179
145
|
continue
|
|
180
146
|
|
|
181
|
-
pkey_name_dict[table.name] = table._primary_key
|
|
182
147
|
pkey = self.df_dict[table.name][table._primary_key]
|
|
183
148
|
pkey_map = pd.DataFrame(
|
|
184
149
|
dict(arange=range(len(pkey))),
|
|
@@ -200,61 +165,48 @@ class LocalGraphStore:
|
|
|
200
165
|
|
|
201
166
|
pkey_map_dict[table.name] = pkey_map
|
|
202
167
|
|
|
203
|
-
return
|
|
168
|
+
return pkey_map_dict
|
|
204
169
|
|
|
205
170
|
def get_time_data(
|
|
206
171
|
self,
|
|
207
|
-
graph: Graph,
|
|
208
|
-
) ->
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
Dict[str, np.ndarray],
|
|
212
|
-
pd.Timestamp,
|
|
213
|
-
pd.Timestamp,
|
|
172
|
+
graph: 'Graph',
|
|
173
|
+
) -> tuple[
|
|
174
|
+
dict[str, np.ndarray],
|
|
175
|
+
dict[str, tuple[pd.Timestamp, pd.Timestamp]],
|
|
214
176
|
]:
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
time_dict: Dict[str, np.ndarray] = {}
|
|
218
|
-
min_time = pd.Timestamp.max
|
|
219
|
-
max_time = pd.Timestamp.min
|
|
177
|
+
time_dict: dict[str, np.ndarray] = {}
|
|
178
|
+
min_max_time_dict: dict[str, tuple[pd.Timestamp, pd.Timestamp]] = {}
|
|
220
179
|
for table in graph.tables.values():
|
|
221
|
-
if table._end_time_column is not None:
|
|
222
|
-
end_time_column_dict[table.name] = table._end_time_column
|
|
223
|
-
|
|
224
180
|
if table._time_column is None:
|
|
225
181
|
continue
|
|
226
182
|
|
|
227
183
|
time = self.df_dict[table.name][table._time_column]
|
|
228
|
-
time_dict[table.name] = time.astype(
|
|
229
|
-
int).to_numpy() // 1000**3
|
|
230
|
-
time_column_dict[table.name] = table._time_column
|
|
184
|
+
time_dict[table.name] = time.astype(int).to_numpy() // 1000**3
|
|
231
185
|
|
|
232
186
|
if table.name in self.mask_dict.keys():
|
|
233
187
|
time = time[self.mask_dict[table.name]]
|
|
234
188
|
if len(time) > 0:
|
|
235
|
-
|
|
236
|
-
|
|
189
|
+
min_max_time_dict[table.name] = (time.min(), time.max())
|
|
190
|
+
else:
|
|
191
|
+
min_max_time_dict[table.name] = (
|
|
192
|
+
pd.Timestamp.max,
|
|
193
|
+
pd.Timestamp.min,
|
|
194
|
+
)
|
|
237
195
|
|
|
238
|
-
return
|
|
239
|
-
time_column_dict,
|
|
240
|
-
end_time_column_dict,
|
|
241
|
-
time_dict,
|
|
242
|
-
min_time,
|
|
243
|
-
max_time,
|
|
244
|
-
)
|
|
196
|
+
return time_dict, min_max_time_dict
|
|
245
197
|
|
|
246
198
|
def get_csc(
|
|
247
199
|
self,
|
|
248
|
-
graph: Graph,
|
|
249
|
-
) ->
|
|
250
|
-
|
|
251
|
-
|
|
200
|
+
graph: 'Graph',
|
|
201
|
+
) -> tuple[
|
|
202
|
+
dict[tuple[str, str, str], np.ndarray],
|
|
203
|
+
dict[tuple[str, str, str], np.ndarray],
|
|
252
204
|
]:
|
|
253
205
|
# A mapping from raw primary keys to node indices (0 to N-1):
|
|
254
|
-
map_dict:
|
|
206
|
+
map_dict: dict[str, pd.CategoricalDtype] = {}
|
|
255
207
|
# A dictionary to manage offsets of node indices for invalid rows:
|
|
256
|
-
offset_dict:
|
|
257
|
-
for table_name in
|
|
208
|
+
offset_dict: dict[str, np.ndarray] = {}
|
|
209
|
+
for table_name in {edge.dst_table for edge in graph.edges}:
|
|
258
210
|
ser = self.df_dict[table_name][graph[table_name]._primary_key]
|
|
259
211
|
if table_name in self.mask_dict.keys():
|
|
260
212
|
mask = self.mask_dict[table_name]
|
|
@@ -263,8 +215,8 @@ class LocalGraphStore:
|
|
|
263
215
|
map_dict[table_name] = pd.CategoricalDtype(ser, ordered=True)
|
|
264
216
|
|
|
265
217
|
# Build CSC graph representation:
|
|
266
|
-
row_dict:
|
|
267
|
-
colptr_dict:
|
|
218
|
+
row_dict: dict[tuple[str, str, str], np.ndarray] = {}
|
|
219
|
+
colptr_dict: dict[tuple[str, str, str], np.ndarray] = {}
|
|
268
220
|
for src_table, fkey, dst_table in graph.edges:
|
|
269
221
|
src_df = self.df_dict[src_table]
|
|
270
222
|
dst_df = self.df_dict[dst_table]
|
|
@@ -326,7 +278,7 @@ def _argsort(input: np.ndarray) -> np.ndarray:
|
|
|
326
278
|
return torch.from_numpy(input).argsort().numpy()
|
|
327
279
|
|
|
328
280
|
|
|
329
|
-
def _lexsort(inputs:
|
|
281
|
+
def _lexsort(inputs: list[np.ndarray]) -> np.ndarray:
|
|
330
282
|
assert len(inputs) >= 1
|
|
331
283
|
|
|
332
284
|
if not WITH_TORCH:
|
|
@@ -0,0 +1,312 @@
|
|
|
1
|
+
from typing import TYPE_CHECKING, Literal
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
import pandas as pd
|
|
5
|
+
from kumoapi.pquery import ValidatedPredictiveQuery
|
|
6
|
+
|
|
7
|
+
from kumoai.experimental.rfm.backend.local import LocalGraphStore
|
|
8
|
+
from kumoai.experimental.rfm.base import Sampler, SamplerOutput
|
|
9
|
+
from kumoai.experimental.rfm.pquery import PQueryPandasExecutor
|
|
10
|
+
from kumoai.utils import ProgressLogger
|
|
11
|
+
|
|
12
|
+
if TYPE_CHECKING:
|
|
13
|
+
from kumoai.experimental.rfm import Graph
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class LocalSampler(Sampler):
|
|
17
|
+
def __init__(
|
|
18
|
+
self,
|
|
19
|
+
graph: 'Graph',
|
|
20
|
+
verbose: bool | ProgressLogger = True,
|
|
21
|
+
) -> None:
|
|
22
|
+
super().__init__(graph=graph, verbose=verbose)
|
|
23
|
+
|
|
24
|
+
import kumoai.kumolib as kumolib
|
|
25
|
+
|
|
26
|
+
self._graph_store = LocalGraphStore(graph, verbose)
|
|
27
|
+
self._graph_sampler = kumolib.NeighborSampler(
|
|
28
|
+
list(self.table_stype_dict.keys()),
|
|
29
|
+
self.edge_types,
|
|
30
|
+
{
|
|
31
|
+
'__'.join(edge_type): colptr
|
|
32
|
+
for edge_type, colptr in self._graph_store.colptr_dict.items()
|
|
33
|
+
},
|
|
34
|
+
{
|
|
35
|
+
'__'.join(edge_type): row
|
|
36
|
+
for edge_type, row in self._graph_store.row_dict.items()
|
|
37
|
+
},
|
|
38
|
+
self._graph_store.time_dict,
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
def _get_min_max_time_dict(
|
|
42
|
+
self,
|
|
43
|
+
table_names: list[str],
|
|
44
|
+
) -> dict[str, tuple[pd.Timestamp, pd.Timestamp]]:
|
|
45
|
+
return {
|
|
46
|
+
key: value
|
|
47
|
+
for key, value in self._graph_store.min_max_time_dict.items()
|
|
48
|
+
if key in table_names
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
def _sample_subgraph(
|
|
52
|
+
self,
|
|
53
|
+
entity_table_name: str,
|
|
54
|
+
entity_pkey: pd.Series,
|
|
55
|
+
anchor_time: pd.Series | Literal['entity'],
|
|
56
|
+
columns_dict: dict[str, set[str]],
|
|
57
|
+
num_neighbors: list[int],
|
|
58
|
+
) -> SamplerOutput:
|
|
59
|
+
|
|
60
|
+
index = self._graph_store.get_node_id(entity_table_name, entity_pkey)
|
|
61
|
+
|
|
62
|
+
if isinstance(anchor_time, pd.Series):
|
|
63
|
+
time = anchor_time.astype(int).to_numpy() // 1000**3 # to seconds
|
|
64
|
+
else:
|
|
65
|
+
assert anchor_time == 'entity'
|
|
66
|
+
time = self._graph_store.time_dict[entity_table_name][index]
|
|
67
|
+
|
|
68
|
+
(
|
|
69
|
+
row_dict,
|
|
70
|
+
col_dict,
|
|
71
|
+
node_dict,
|
|
72
|
+
batch_dict,
|
|
73
|
+
num_sampled_nodes_dict,
|
|
74
|
+
num_sampled_edges_dict,
|
|
75
|
+
) = self._graph_sampler.sample(
|
|
76
|
+
{
|
|
77
|
+
'__'.join(edge_type): num_neighbors
|
|
78
|
+
for edge_type in self.edge_types
|
|
79
|
+
},
|
|
80
|
+
{},
|
|
81
|
+
entity_table_name,
|
|
82
|
+
index,
|
|
83
|
+
time,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
df_dict: dict[str, pd.DataFrame] = {}
|
|
87
|
+
inverse_dict: dict[str, np.ndarray] = {}
|
|
88
|
+
for table_name, node in node_dict.items():
|
|
89
|
+
df = self._graph_store.df_dict[table_name]
|
|
90
|
+
columns = columns_dict[table_name]
|
|
91
|
+
if self.end_time_column_dict.get(table_name, None) in columns:
|
|
92
|
+
df = df.iloc[node]
|
|
93
|
+
elif len(columns) == 0:
|
|
94
|
+
df = df.iloc[node]
|
|
95
|
+
else:
|
|
96
|
+
# Only store unique rows in `df` above a certain threshold:
|
|
97
|
+
unique_node, inverse = np.unique(node, return_inverse=True)
|
|
98
|
+
if len(node) > 1.05 * len(unique_node):
|
|
99
|
+
df = df.iloc[unique_node]
|
|
100
|
+
inverse_dict[table_name] = inverse
|
|
101
|
+
else:
|
|
102
|
+
df = df.iloc[node]
|
|
103
|
+
df = df.reset_index(drop=True)
|
|
104
|
+
df = df[list(columns)]
|
|
105
|
+
df_dict[table_name] = df
|
|
106
|
+
|
|
107
|
+
num_sampled_nodes_dict = {
|
|
108
|
+
table_name: num_sampled_nodes.tolist()
|
|
109
|
+
for table_name, num_sampled_nodes in
|
|
110
|
+
num_sampled_nodes_dict.items()
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
row_dict = {
|
|
114
|
+
edge_type: row_dict['__'.join(edge_type)]
|
|
115
|
+
for edge_type in self.edge_types
|
|
116
|
+
}
|
|
117
|
+
col_dict = {
|
|
118
|
+
edge_type: col_dict['__'.join(edge_type)]
|
|
119
|
+
for edge_type in self.edge_types
|
|
120
|
+
}
|
|
121
|
+
num_sampled_edges_dict = {
|
|
122
|
+
edge_type: num_sampled_edges_dict['__'.join(edge_type)].tolist()
|
|
123
|
+
for edge_type in self.edge_types
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
return SamplerOutput(
|
|
127
|
+
anchor_time=time * 1000**3, # to nanoseconds
|
|
128
|
+
df_dict=df_dict,
|
|
129
|
+
inverse_dict=inverse_dict,
|
|
130
|
+
batch_dict=batch_dict,
|
|
131
|
+
num_sampled_nodes_dict=num_sampled_nodes_dict,
|
|
132
|
+
row_dict=row_dict,
|
|
133
|
+
col_dict=col_dict,
|
|
134
|
+
num_sampled_edges_dict=num_sampled_edges_dict,
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
def _sample_entity_table(
|
|
138
|
+
self,
|
|
139
|
+
table_name: str,
|
|
140
|
+
columns: set[str],
|
|
141
|
+
num_rows: int,
|
|
142
|
+
random_seed: int | None = None,
|
|
143
|
+
) -> pd.DataFrame:
|
|
144
|
+
pkey_map = self._graph_store.pkey_map_dict[table_name]
|
|
145
|
+
if len(pkey_map) > num_rows:
|
|
146
|
+
pkey_map = pkey_map.sample(
|
|
147
|
+
n=num_rows,
|
|
148
|
+
random_state=random_seed,
|
|
149
|
+
ignore_index=True,
|
|
150
|
+
)
|
|
151
|
+
df = self._graph_store.df_dict[table_name]
|
|
152
|
+
df = df.iloc[pkey_map['arange']][list(columns)]
|
|
153
|
+
return df
|
|
154
|
+
|
|
155
|
+
def _sample_target(
|
|
156
|
+
self,
|
|
157
|
+
query: ValidatedPredictiveQuery,
|
|
158
|
+
entity_df: pd.DataFrame,
|
|
159
|
+
train_index: np.ndarray,
|
|
160
|
+
train_time: pd.Series,
|
|
161
|
+
num_train_examples: int,
|
|
162
|
+
test_index: np.ndarray,
|
|
163
|
+
test_time: pd.Series,
|
|
164
|
+
num_test_examples: int,
|
|
165
|
+
columns_dict: dict[str, set[str]],
|
|
166
|
+
time_offset_dict: dict[
|
|
167
|
+
tuple[str, str, str],
|
|
168
|
+
tuple[pd.DateOffset | None, pd.DateOffset],
|
|
169
|
+
],
|
|
170
|
+
) -> tuple[pd.Series, np.ndarray, pd.Series, np.ndarray]:
|
|
171
|
+
|
|
172
|
+
train_y, train_mask = self._sample_target_set(
|
|
173
|
+
query=query,
|
|
174
|
+
pkey=entity_df[self.primary_key_dict[query.entity_table]],
|
|
175
|
+
index=train_index,
|
|
176
|
+
anchor_time=train_time,
|
|
177
|
+
num_examples=num_train_examples,
|
|
178
|
+
columns_dict=columns_dict,
|
|
179
|
+
time_offset_dict=time_offset_dict,
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
test_y, test_mask = self._sample_target_set(
|
|
183
|
+
query=query,
|
|
184
|
+
pkey=entity_df[self.primary_key_dict[query.entity_table]],
|
|
185
|
+
index=test_index,
|
|
186
|
+
anchor_time=test_time,
|
|
187
|
+
num_examples=num_test_examples,
|
|
188
|
+
columns_dict=columns_dict,
|
|
189
|
+
time_offset_dict=time_offset_dict,
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
return train_y, train_mask, test_y, test_mask
|
|
193
|
+
|
|
194
|
+
# Helper Methods ##########################################################
|
|
195
|
+
|
|
196
|
+
def _sample_target_set(
|
|
197
|
+
self,
|
|
198
|
+
query: ValidatedPredictiveQuery,
|
|
199
|
+
pkey: pd.Series,
|
|
200
|
+
index: np.ndarray,
|
|
201
|
+
anchor_time: pd.Series,
|
|
202
|
+
num_examples: int,
|
|
203
|
+
columns_dict: dict[str, set[str]],
|
|
204
|
+
time_offset_dict: dict[
|
|
205
|
+
tuple[str, str, str],
|
|
206
|
+
tuple[pd.DateOffset | None, pd.DateOffset],
|
|
207
|
+
],
|
|
208
|
+
batch_size: int = 10_000,
|
|
209
|
+
) -> tuple[pd.Series, np.ndarray]:
|
|
210
|
+
|
|
211
|
+
num_hops = 1 if len(time_offset_dict) > 0 else 0
|
|
212
|
+
num_neighbors_dict: dict[str, list[int]] = {}
|
|
213
|
+
unix_time_offset_dict: dict[str, list[list[int | None]]] = {}
|
|
214
|
+
for edge_type, (start, end) in time_offset_dict.items():
|
|
215
|
+
unix_time_offset_dict['__'.join(edge_type)] = [[
|
|
216
|
+
date_offset_to_seconds(start) if start is not None else None,
|
|
217
|
+
date_offset_to_seconds(end),
|
|
218
|
+
]]
|
|
219
|
+
for edge_type in set(self.edge_types) - set(time_offset_dict.keys()):
|
|
220
|
+
num_neighbors_dict['__'.join(edge_type)] = [0] * num_hops
|
|
221
|
+
|
|
222
|
+
count = 0
|
|
223
|
+
ys: list[pd.Series] = []
|
|
224
|
+
mask = np.full(len(index), False, dtype=bool)
|
|
225
|
+
for start in range(0, len(index), batch_size):
|
|
226
|
+
subset = pkey.iloc[index[start:start + batch_size]]
|
|
227
|
+
time = anchor_time.iloc[start:start + batch_size]
|
|
228
|
+
|
|
229
|
+
_, _, node_dict, batch_dict, _, _ = self._graph_sampler.sample(
|
|
230
|
+
num_neighbors_dict,
|
|
231
|
+
unix_time_offset_dict,
|
|
232
|
+
query.entity_table,
|
|
233
|
+
self._graph_store.get_node_id(query.entity_table, subset),
|
|
234
|
+
time.astype(int).to_numpy() // 1000**3, # to seconds
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
feat_dict: dict[str, pd.DataFrame] = {}
|
|
238
|
+
time_dict: dict[str, pd.Series] = {}
|
|
239
|
+
for table_name, columns in columns_dict.items():
|
|
240
|
+
df = self._graph_store.df_dict[table_name]
|
|
241
|
+
df = df.iloc[node_dict[table_name]].reset_index(drop=True)
|
|
242
|
+
df = df[list(columns)]
|
|
243
|
+
feat_dict[table_name] = df
|
|
244
|
+
|
|
245
|
+
time_column = self.time_column_dict.get(table_name)
|
|
246
|
+
if time_column in columns:
|
|
247
|
+
time_dict[table_name] = df[time_column]
|
|
248
|
+
|
|
249
|
+
y, _mask = PQueryPandasExecutor().execute(
|
|
250
|
+
query=query,
|
|
251
|
+
feat_dict=feat_dict,
|
|
252
|
+
time_dict=time_dict,
|
|
253
|
+
batch_dict=batch_dict,
|
|
254
|
+
anchor_time=time,
|
|
255
|
+
num_forecasts=query.num_forecasts,
|
|
256
|
+
)
|
|
257
|
+
ys.append(y)
|
|
258
|
+
mask[start:start + batch_size] = _mask
|
|
259
|
+
|
|
260
|
+
count += len(y)
|
|
261
|
+
if count >= num_examples:
|
|
262
|
+
break
|
|
263
|
+
|
|
264
|
+
if len(ys) == 0:
|
|
265
|
+
y = pd.Series([], dtype=float)
|
|
266
|
+
elif len(ys) == 1:
|
|
267
|
+
y = ys[0]
|
|
268
|
+
else:
|
|
269
|
+
y = pd.concat(ys, axis=0, ignore_index=True)
|
|
270
|
+
|
|
271
|
+
return y, mask
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
# Helper Functions ############################################################
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
def date_offset_to_seconds(offset: pd.DateOffset) -> int:
|
|
278
|
+
r"""Convert a :class:`pandas.DateOffset` into a number of seconds.
|
|
279
|
+
|
|
280
|
+
.. note::
|
|
281
|
+
We are conservative and take months and years as their maximum value.
|
|
282
|
+
Additional values are then dropped in label computation where we know
|
|
283
|
+
the actual dates.
|
|
284
|
+
"""
|
|
285
|
+
MAX_DAYS_IN_MONTH = 31
|
|
286
|
+
MAX_DAYS_IN_YEAR = 366
|
|
287
|
+
|
|
288
|
+
SECONDS_IN_MINUTE = 60
|
|
289
|
+
SECONDS_IN_HOUR = 60 * SECONDS_IN_MINUTE
|
|
290
|
+
SECONDS_IN_DAY = 24 * SECONDS_IN_HOUR
|
|
291
|
+
|
|
292
|
+
total_sec = 0
|
|
293
|
+
multiplier = getattr(offset, 'n', 1) # The multiplier (if present).
|
|
294
|
+
|
|
295
|
+
for attr, value in offset.__dict__.items():
|
|
296
|
+
if value is None or value == 0:
|
|
297
|
+
continue
|
|
298
|
+
scaled_value = value * multiplier
|
|
299
|
+
if attr == 'years':
|
|
300
|
+
total_sec += scaled_value * MAX_DAYS_IN_YEAR * SECONDS_IN_DAY
|
|
301
|
+
elif attr == 'months':
|
|
302
|
+
total_sec += scaled_value * MAX_DAYS_IN_MONTH * SECONDS_IN_DAY
|
|
303
|
+
elif attr == 'days':
|
|
304
|
+
total_sec += scaled_value * SECONDS_IN_DAY
|
|
305
|
+
elif attr == 'hours':
|
|
306
|
+
total_sec += scaled_value * SECONDS_IN_HOUR
|
|
307
|
+
elif attr == 'minutes':
|
|
308
|
+
total_sec += scaled_value * SECONDS_IN_MINUTE
|
|
309
|
+
elif attr == 'seconds':
|
|
310
|
+
total_sec += scaled_value
|
|
311
|
+
|
|
312
|
+
return total_sec
|
|
@@ -1,10 +1,15 @@
|
|
|
1
|
-
import
|
|
2
|
-
from typing import List, Optional
|
|
1
|
+
from typing import Sequence, cast
|
|
3
2
|
|
|
4
3
|
import pandas as pd
|
|
4
|
+
from kumoapi.model_plan import MissingType
|
|
5
5
|
|
|
6
|
-
from kumoai.experimental.rfm.base import
|
|
7
|
-
|
|
6
|
+
from kumoai.experimental.rfm.base import (
|
|
7
|
+
ColumnSpec,
|
|
8
|
+
DataBackend,
|
|
9
|
+
SourceColumn,
|
|
10
|
+
SourceForeignKey,
|
|
11
|
+
Table,
|
|
12
|
+
)
|
|
8
13
|
|
|
9
14
|
|
|
10
15
|
class LocalTable(Table):
|
|
@@ -52,9 +57,9 @@ class LocalTable(Table):
|
|
|
52
57
|
self,
|
|
53
58
|
df: pd.DataFrame,
|
|
54
59
|
name: str,
|
|
55
|
-
primary_key:
|
|
56
|
-
time_column:
|
|
57
|
-
end_time_column:
|
|
60
|
+
primary_key: MissingType | str | None = MissingType.VALUE,
|
|
61
|
+
time_column: str | None = None,
|
|
62
|
+
end_time_column: str | None = None,
|
|
58
63
|
) -> None:
|
|
59
64
|
|
|
60
65
|
if df.empty:
|
|
@@ -70,40 +75,39 @@ class LocalTable(Table):
|
|
|
70
75
|
|
|
71
76
|
super().__init__(
|
|
72
77
|
name=name,
|
|
73
|
-
columns=list(df.columns),
|
|
74
78
|
primary_key=primary_key,
|
|
75
79
|
time_column=time_column,
|
|
76
80
|
end_time_column=end_time_column,
|
|
77
81
|
)
|
|
78
82
|
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
f"the data type of the column to use it within "
|
|
89
|
-
f"this table.")
|
|
90
|
-
continue
|
|
91
|
-
|
|
92
|
-
source_column = SourceColumn(
|
|
93
|
-
name=column,
|
|
94
|
-
dtype=dtype,
|
|
83
|
+
@property
|
|
84
|
+
def backend(self) -> DataBackend:
|
|
85
|
+
return cast(DataBackend, DataBackend.LOCAL)
|
|
86
|
+
|
|
87
|
+
def _get_source_columns(self) -> list[SourceColumn]:
|
|
88
|
+
return [
|
|
89
|
+
SourceColumn(
|
|
90
|
+
name=column_name,
|
|
91
|
+
dtype=None,
|
|
95
92
|
is_primary_key=False,
|
|
96
93
|
is_unique_key=False,
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
return source_columns
|
|
94
|
+
is_nullable=True,
|
|
95
|
+
) for column_name in self._data.columns
|
|
96
|
+
]
|
|
101
97
|
|
|
102
|
-
def _get_source_foreign_keys(self) ->
|
|
98
|
+
def _get_source_foreign_keys(self) -> list[SourceForeignKey]:
|
|
103
99
|
return []
|
|
104
100
|
|
|
105
|
-
def
|
|
101
|
+
def _get_source_sample_df(self) -> pd.DataFrame:
|
|
106
102
|
return self._data
|
|
107
103
|
|
|
108
|
-
def
|
|
104
|
+
def _get_expr_sample_df(
|
|
105
|
+
self,
|
|
106
|
+
columns: Sequence[ColumnSpec],
|
|
107
|
+
) -> pd.DataFrame:
|
|
108
|
+
raise RuntimeError(f"Column expressions are not supported in "
|
|
109
|
+
f"'{self.__class__.__name__}'. Please apply your "
|
|
110
|
+
f"expressions on the `pd.DataFrame` directly.")
|
|
111
|
+
|
|
112
|
+
def _get_num_rows(self) -> int | None:
|
|
109
113
|
return len(self._data)
|