kumoai 2.13.0.dev202512061731__cp313-cp313-macosx_11_0_arm64.whl → 2.13.0.dev202512081731__cp313-cp313-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kumoai/_version.py +1 -1
- kumoai/experimental/rfm/backend/local/sampler.py +27 -42
- kumoai/experimental/rfm/base/__init__.py +3 -3
- kumoai/experimental/rfm/base/sampler.py +133 -47
- {kumoai-2.13.0.dev202512061731.dist-info → kumoai-2.13.0.dev202512081731.dist-info}/METADATA +1 -1
- {kumoai-2.13.0.dev202512061731.dist-info → kumoai-2.13.0.dev202512081731.dist-info}/RECORD +9 -9
- {kumoai-2.13.0.dev202512061731.dist-info → kumoai-2.13.0.dev202512081731.dist-info}/WHEEL +0 -0
- {kumoai-2.13.0.dev202512061731.dist-info → kumoai-2.13.0.dev202512081731.dist-info}/licenses/LICENSE +0 -0
- {kumoai-2.13.0.dev202512061731.dist-info → kumoai-2.13.0.dev202512081731.dist-info}/top_level.txt +0 -0
kumoai/_version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = '2.13.0.
|
|
1
|
+
__version__ = '2.13.0.dev202512081731'
|
|
@@ -4,7 +4,7 @@ import numpy as np
|
|
|
4
4
|
import pandas as pd
|
|
5
5
|
|
|
6
6
|
from kumoai.experimental.rfm.backend.local import LocalGraphStore
|
|
7
|
-
from kumoai.experimental.rfm.base import
|
|
7
|
+
from kumoai.experimental.rfm.base import BackwardSamplerOutput, Sampler
|
|
8
8
|
from kumoai.utils import ProgressLogger
|
|
9
9
|
|
|
10
10
|
if TYPE_CHECKING:
|
|
@@ -36,30 +36,19 @@ class LocalSampler(Sampler):
|
|
|
36
36
|
self._graph_store.time_dict,
|
|
37
37
|
)
|
|
38
38
|
|
|
39
|
-
def
|
|
39
|
+
def _sample_backward(
|
|
40
40
|
self,
|
|
41
41
|
entity_table_name: str,
|
|
42
42
|
entity_pkey: pd.Series,
|
|
43
43
|
anchor_time: pd.Series,
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
return_edges: bool = False,
|
|
48
|
-
) -> SamplerOutput:
|
|
44
|
+
columns_dict: dict[str, set[str]],
|
|
45
|
+
num_neighbors: list[int],
|
|
46
|
+
) -> BackwardSamplerOutput:
|
|
49
47
|
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
num_neighbors_dict: dict[str, list[int]] = {}
|
|
55
|
-
|
|
56
|
-
for edge_type, specs in edge_spec_dict.items():
|
|
57
|
-
edge_type_str = '__'.join(edge_type)
|
|
58
|
-
num_neighbors_dict[edge_type_str] = [0] * num_hops
|
|
59
|
-
for hop, spec in enumerate(specs):
|
|
60
|
-
# TODO Add support for time-based sampling.
|
|
61
|
-
assert spec.num_neighbors is not None
|
|
62
|
-
num_neighbors_dict[edge_type_str][hop] = spec.num_neighbors
|
|
48
|
+
num_neighbors_dict: dict[str, list[int]] = {
|
|
49
|
+
'__'.join(edge_type): num_neighbors
|
|
50
|
+
for edge_type in self.edge_types
|
|
51
|
+
}
|
|
63
52
|
|
|
64
53
|
(
|
|
65
54
|
row_dict,
|
|
@@ -80,10 +69,12 @@ class LocalSampler(Sampler):
|
|
|
80
69
|
inverse_dict: dict[str, np.ndarray] = {}
|
|
81
70
|
for table_name, node in node_dict.items():
|
|
82
71
|
df = self._graph_store.df_dict[table_name]
|
|
83
|
-
columns =
|
|
72
|
+
columns = columns_dict[table_name]
|
|
84
73
|
if self.end_time_column_dict.get(table_name, None) in columns:
|
|
85
74
|
df = df.iloc[node]
|
|
86
|
-
elif len(columns)
|
|
75
|
+
elif len(columns) == 0:
|
|
76
|
+
df = df.iloc[node]
|
|
77
|
+
else:
|
|
87
78
|
# Only store unique rows in `df` above a certain threshold:
|
|
88
79
|
unique_node, inverse = np.unique(node, return_inverse=True)
|
|
89
80
|
if len(node) > 1.05 * len(unique_node):
|
|
@@ -91,10 +82,8 @@ class LocalSampler(Sampler):
|
|
|
91
82
|
inverse_dict[table_name] = inverse
|
|
92
83
|
else:
|
|
93
84
|
df = df.iloc[node]
|
|
94
|
-
else:
|
|
95
|
-
df = df.iloc[node]
|
|
96
85
|
df = df.reset_index(drop=True)
|
|
97
|
-
df = df[columns]
|
|
86
|
+
df = df[list(columns)]
|
|
98
87
|
df_dict[table_name] = df
|
|
99
88
|
|
|
100
89
|
num_sampled_nodes_dict = {
|
|
@@ -103,24 +92,20 @@ class LocalSampler(Sampler):
|
|
|
103
92
|
num_sampled_nodes_dict.items()
|
|
104
93
|
}
|
|
105
94
|
|
|
106
|
-
|
|
107
|
-
row_dict
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
col_dict
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
num_sampled_edges_dict
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
for edge_type in edge_spec_dict.keys()
|
|
119
|
-
}
|
|
120
|
-
else:
|
|
121
|
-
row_dict = col_dict = num_sampled_edges_dict = None
|
|
95
|
+
row_dict = {
|
|
96
|
+
edge_type: row_dict['__'.join(edge_type)]
|
|
97
|
+
for edge_type in self.edge_types
|
|
98
|
+
}
|
|
99
|
+
col_dict = {
|
|
100
|
+
edge_type: col_dict['__'.join(edge_type)]
|
|
101
|
+
for edge_type in self.edge_types
|
|
102
|
+
}
|
|
103
|
+
num_sampled_edges_dict = {
|
|
104
|
+
edge_type: num_sampled_edges_dict['__'.join(edge_type)].tolist()
|
|
105
|
+
for edge_type in self.edge_types
|
|
106
|
+
}
|
|
122
107
|
|
|
123
|
-
return
|
|
108
|
+
return BackwardSamplerOutput(
|
|
124
109
|
df_dict=df_dict,
|
|
125
110
|
inverse_dict=inverse_dict,
|
|
126
111
|
batch_dict=batch_dict,
|
|
@@ -1,14 +1,14 @@
|
|
|
1
1
|
from .source import SourceColumn, SourceForeignKey
|
|
2
2
|
from .column import Column
|
|
3
3
|
from .table import Table
|
|
4
|
-
from .sampler import
|
|
4
|
+
from .sampler import BackwardSamplerOutput, ForwardSamplerOutput, Sampler
|
|
5
5
|
|
|
6
6
|
__all__ = [
|
|
7
7
|
'SourceColumn',
|
|
8
8
|
'SourceForeignKey',
|
|
9
9
|
'Column',
|
|
10
10
|
'Table',
|
|
11
|
-
'
|
|
12
|
-
'
|
|
11
|
+
'BackwardSamplerOutput',
|
|
12
|
+
'ForwardSamplerOutput',
|
|
13
13
|
'Sampler',
|
|
14
14
|
]
|
|
@@ -1,11 +1,14 @@
|
|
|
1
1
|
import copy
|
|
2
2
|
import re
|
|
3
3
|
from abc import ABC, abstractmethod
|
|
4
|
+
from collections import defaultdict
|
|
4
5
|
from dataclasses import dataclass
|
|
5
|
-
from typing import TYPE_CHECKING
|
|
6
|
+
from typing import TYPE_CHECKING, Literal
|
|
6
7
|
|
|
7
8
|
import numpy as np
|
|
8
9
|
import pandas as pd
|
|
10
|
+
from kumoapi.pquery import ValidatedPredictiveQuery
|
|
11
|
+
from kumoapi.pquery.AST import Aggregation, ASTNode
|
|
9
12
|
from kumoapi.rfm.context import EdgeLayout, Link, Subgraph, Table
|
|
10
13
|
from kumoapi.typing import Stype
|
|
11
14
|
|
|
@@ -14,28 +17,21 @@ if TYPE_CHECKING:
|
|
|
14
17
|
|
|
15
18
|
|
|
16
19
|
@dataclass
|
|
17
|
-
class
|
|
18
|
-
num_neighbors: int | None = None
|
|
19
|
-
time_offsets: tuple[
|
|
20
|
-
pd.DateOffset | None,
|
|
21
|
-
pd.DateOffset,
|
|
22
|
-
] | None = None
|
|
23
|
-
|
|
24
|
-
def __post_init__(self) -> None:
|
|
25
|
-
if (self.num_neighbors is None) == (self.time_offsets is None):
|
|
26
|
-
raise ValueError("Only one of 'num_neighbors' and 'time_offsets' "
|
|
27
|
-
"must be provided")
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
@dataclass
|
|
31
|
-
class SamplerOutput:
|
|
20
|
+
class BackwardSamplerOutput:
|
|
32
21
|
df_dict: dict[str, pd.DataFrame]
|
|
33
22
|
inverse_dict: dict[str, np.ndarray]
|
|
34
23
|
batch_dict: dict[str, np.ndarray]
|
|
35
24
|
num_sampled_nodes_dict: dict[str, list[int]]
|
|
36
|
-
row_dict: dict[tuple[str, str, str], np.ndarray]
|
|
37
|
-
col_dict: dict[tuple[str, str, str], np.ndarray]
|
|
38
|
-
num_sampled_edges_dict: dict[tuple[str, str, str], list[int]]
|
|
25
|
+
row_dict: dict[tuple[str, str, str], np.ndarray]
|
|
26
|
+
col_dict: dict[tuple[str, str, str], np.ndarray]
|
|
27
|
+
num_sampled_edges_dict: dict[tuple[str, str, str], list[int]]
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@dataclass
|
|
31
|
+
class ForwardSamplerOutput:
|
|
32
|
+
entity_pkey: pd.Series
|
|
33
|
+
anchor_time: pd.Series
|
|
34
|
+
target: pd.Series
|
|
39
35
|
|
|
40
36
|
|
|
41
37
|
class Sampler(ABC):
|
|
@@ -104,11 +100,6 @@ class Sampler(ABC):
|
|
|
104
100
|
exclude_cols_dict: dict[str, list[str]] | None = None,
|
|
105
101
|
) -> Subgraph:
|
|
106
102
|
|
|
107
|
-
edge_spec_dict: dict[tuple[str, str, str], list[EdgeSpec]] = {
|
|
108
|
-
edge_type: [EdgeSpec(value) for value in num_neighbors]
|
|
109
|
-
for edge_type in self.edge_types
|
|
110
|
-
}
|
|
111
|
-
|
|
112
103
|
# Exclude all columns that leak target information:
|
|
113
104
|
table_stype_dict: dict[str, dict[str, Stype]] = self._table_stype_dict
|
|
114
105
|
if exclude_cols_dict is not None:
|
|
@@ -118,26 +109,23 @@ class Sampler(ABC):
|
|
|
118
109
|
del table_stype_dict[table_name][column_name]
|
|
119
110
|
|
|
120
111
|
# Collect all columns being used as features:
|
|
121
|
-
|
|
122
|
-
table_name:
|
|
112
|
+
columns_dict: dict[str, set[str]] = {
|
|
113
|
+
table_name: set(stype_dict.keys())
|
|
123
114
|
for table_name, stype_dict in table_stype_dict.items()
|
|
124
115
|
}
|
|
125
116
|
# Make sure to store primary key information for entity tables:
|
|
126
117
|
for table_name in entity_table_names:
|
|
127
|
-
|
|
128
|
-
[self.primary_key_dict[table_name]] +
|
|
129
|
-
column_spec_dict[table_name])
|
|
118
|
+
columns_dict[table_name].add(self.primary_key_dict[table_name])
|
|
130
119
|
|
|
131
120
|
if anchor_time.dtype != 'datetime64[ns]':
|
|
132
121
|
anchor_time = anchor_time.astype('datetime64[ns]')
|
|
133
|
-
|
|
122
|
+
|
|
123
|
+
out = self._sample_backward(
|
|
134
124
|
entity_table_name=entity_table_names[0],
|
|
135
125
|
entity_pkey=entity_pkey,
|
|
136
126
|
anchor_time=anchor_time,
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
drop_duplicates=True,
|
|
140
|
-
return_edges=True,
|
|
127
|
+
columns_dict=columns_dict,
|
|
128
|
+
num_neighbors=num_neighbors,
|
|
141
129
|
)
|
|
142
130
|
|
|
143
131
|
subgraph = Subgraph(
|
|
@@ -150,14 +138,14 @@ class Sampler(ABC):
|
|
|
150
138
|
if len(batch) == 0:
|
|
151
139
|
continue
|
|
152
140
|
|
|
153
|
-
primary_key = None
|
|
141
|
+
primary_key: str | None = None
|
|
154
142
|
if table_name in entity_table_names:
|
|
155
|
-
primary_key = self.primary_key_dict
|
|
143
|
+
primary_key = self.primary_key_dict[table_name]
|
|
156
144
|
|
|
157
145
|
df = out.df_dict[table_name].reset_index(drop=True)
|
|
158
|
-
if
|
|
146
|
+
if end_time_column := self.end_time_column_dict.get(table_name):
|
|
159
147
|
# Set end time to NaT for all values greater than anchor time:
|
|
160
|
-
|
|
148
|
+
assert table_name not in out.inverse_dict
|
|
161
149
|
ser = df[end_time_column]
|
|
162
150
|
if ser.dtype != 'datetime64[ns]':
|
|
163
151
|
ser = ser.astype('datetime64[ns]')
|
|
@@ -179,9 +167,6 @@ class Sampler(ABC):
|
|
|
179
167
|
primary_key=primary_key,
|
|
180
168
|
)
|
|
181
169
|
|
|
182
|
-
assert out.row_dict is not None
|
|
183
|
-
assert out.col_dict is not None
|
|
184
|
-
assert out.num_sampled_edges_dict is not None
|
|
185
170
|
for edge_type in out.row_dict.keys():
|
|
186
171
|
row: np.ndarray | None = out.row_dict[edge_type]
|
|
187
172
|
col: np.ndarray | None = out.col_dict[edge_type]
|
|
@@ -227,19 +212,101 @@ class Sampler(ABC):
|
|
|
227
212
|
|
|
228
213
|
return subgraph
|
|
229
214
|
|
|
215
|
+
def sample_forward(
|
|
216
|
+
self,
|
|
217
|
+
query: ValidatedPredictiveQuery,
|
|
218
|
+
num_examples: int,
|
|
219
|
+
anchor_time: pd.Timestamp | Literal['entity'],
|
|
220
|
+
random_seed: int | None = None,
|
|
221
|
+
) -> ForwardSamplerOutput:
|
|
222
|
+
|
|
223
|
+
columns_dict: dict[str, set[str]] = defaultdict(set)
|
|
224
|
+
for fqn in query.all_query_columns + [query.entity_column]:
|
|
225
|
+
table_name, column_name = fqn.split('.')
|
|
226
|
+
columns_dict[table_name].add(column_name)
|
|
227
|
+
|
|
228
|
+
if time_column := self.time_column_dict[query.entity_table]:
|
|
229
|
+
columns_dict[table_name].add(time_column)
|
|
230
|
+
if end_time_column := self.end_time_column_dict[query.entity_table]:
|
|
231
|
+
columns_dict[table_name].add(end_time_column)
|
|
232
|
+
|
|
233
|
+
time_offset_dict: dict[
|
|
234
|
+
tuple[str, str, str],
|
|
235
|
+
tuple[pd.DateOffset | None, pd.DateOffset],
|
|
236
|
+
] = {}
|
|
237
|
+
|
|
238
|
+
def _add_time_offset(node: ASTNode, num_forecasts: int = 1) -> None:
|
|
239
|
+
if isinstance(node, Aggregation):
|
|
240
|
+
table_name = node._get_target_column_name().split('.')[0]
|
|
241
|
+
columns_dict[table_name].add(self.time_column_dict[table_name])
|
|
242
|
+
|
|
243
|
+
edge_types = [
|
|
244
|
+
edge_type for edge_type in self.edge_types
|
|
245
|
+
if edge_type[0] == table_name
|
|
246
|
+
and edge_type[2] == query.entity_table
|
|
247
|
+
]
|
|
248
|
+
if len(edge_types) != 1:
|
|
249
|
+
raise ValueError(f"Could not find a unique foreign key "
|
|
250
|
+
f"from table '{table_name}' to "
|
|
251
|
+
f"'{query.entity_table}'")
|
|
252
|
+
if edge_types[0] not in time_offset_dict:
|
|
253
|
+
start = node.aggr_time_range.start_date_offset
|
|
254
|
+
end = node.aggr_time_range.end_date_offset * num_forecasts
|
|
255
|
+
else:
|
|
256
|
+
start, end = time_offset_dict[edge_types[0]]
|
|
257
|
+
start = min_date_offset(
|
|
258
|
+
start,
|
|
259
|
+
node.aggr_time_range.start_date_offset,
|
|
260
|
+
)
|
|
261
|
+
end = max_date_offset(
|
|
262
|
+
end,
|
|
263
|
+
node.aggr_time_range.end_date_offset * num_forecasts,
|
|
264
|
+
)
|
|
265
|
+
time_offset_dict[edge_types[0]] = (start, end)
|
|
266
|
+
|
|
267
|
+
for child in node.children:
|
|
268
|
+
_add_time_offset(child, num_forecasts)
|
|
269
|
+
|
|
270
|
+
_add_time_offset(query.target_ast, query.num_forecasts)
|
|
271
|
+
_add_time_offset(query.entity_ast)
|
|
272
|
+
if query.whatif_ast is not None:
|
|
273
|
+
_add_time_offset(query.whatif_ast)
|
|
274
|
+
|
|
275
|
+
return self._sample_forward(
|
|
276
|
+
query=query,
|
|
277
|
+
num_examples=num_examples,
|
|
278
|
+
anchor_time=anchor_time,
|
|
279
|
+
columns_dict=columns_dict,
|
|
280
|
+
time_offset_dict=time_offset_dict,
|
|
281
|
+
random_seed=random_seed,
|
|
282
|
+
)
|
|
283
|
+
|
|
230
284
|
# Abstract Methods ########################################################
|
|
231
285
|
|
|
232
286
|
@abstractmethod
|
|
233
|
-
def
|
|
287
|
+
def _sample_backward(
|
|
234
288
|
self,
|
|
235
289
|
entity_table_name: str,
|
|
236
290
|
entity_pkey: pd.Series,
|
|
237
291
|
anchor_time: pd.Series,
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
292
|
+
columns_dict: dict[str, set[str]],
|
|
293
|
+
num_neighbors: list[int],
|
|
294
|
+
) -> BackwardSamplerOutput:
|
|
295
|
+
pass
|
|
296
|
+
|
|
297
|
+
@abstractmethod
|
|
298
|
+
def _sample_forward(
|
|
299
|
+
self,
|
|
300
|
+
query: ValidatedPredictiveQuery,
|
|
301
|
+
num_examples: int,
|
|
302
|
+
anchor_time: pd.Timestamp | Literal['entity'],
|
|
303
|
+
columns_dict: dict[str, set[str]],
|
|
304
|
+
time_offset_dict: dict[
|
|
305
|
+
tuple[str, str, str],
|
|
306
|
+
tuple[pd.DateOffset | None, pd.DateOffset],
|
|
307
|
+
],
|
|
308
|
+
random_seed: int | None = None,
|
|
309
|
+
) -> ForwardSamplerOutput:
|
|
243
310
|
pass
|
|
244
311
|
|
|
245
312
|
|
|
@@ -285,3 +352,22 @@ def _normalize_text(
|
|
|
285
352
|
ser = ser.map(normalize_fn)
|
|
286
353
|
|
|
287
354
|
return ser
|
|
355
|
+
|
|
356
|
+
|
|
357
|
+
def min_date_offset(*args: pd.DateOffset | None) -> pd.DateOffset | None:
|
|
358
|
+
if any(arg is None for arg in args):
|
|
359
|
+
return None
|
|
360
|
+
|
|
361
|
+
anchor = pd.Timestamp('2000-01-01')
|
|
362
|
+
timestamps = [anchor + arg for arg in args]
|
|
363
|
+
assert len(timestamps) > 0
|
|
364
|
+
argmin = min(range(len(timestamps)), key=lambda i: timestamps[i])
|
|
365
|
+
return args[argmin]
|
|
366
|
+
|
|
367
|
+
|
|
368
|
+
def max_date_offset(*args: pd.DateOffset) -> pd.DateOffset:
|
|
369
|
+
anchor = pd.Timestamp('2000-01-01')
|
|
370
|
+
timestamps = [anchor + arg for arg in args]
|
|
371
|
+
assert len(timestamps) > 0
|
|
372
|
+
argmax = max(range(len(timestamps)), key=lambda i: timestamps[i])
|
|
373
|
+
return args[argmax]
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
kumoai/kumolib.cpython-313-darwin.so,sha256=waBv-DiZ3WcasxiCQ-OM9EbSTgTtCfBTZIibXAK-JiQ,232816
|
|
2
2
|
kumoai/_logging.py,sha256=U2_5ROdyk92P4xO4H2WJV8EC7dr6YxmmnM-b7QX9M7I,886
|
|
3
3
|
kumoai/mixin.py,sha256=MP413xzuCqWhxAPUHmloLA3j4ZyF1tEtfi516b_hOXQ,812
|
|
4
|
-
kumoai/_version.py,sha256=
|
|
4
|
+
kumoai/_version.py,sha256=W0EIBX5oPkQ0eXYnfNBgKMhonz56bp9ySi_IPtjQoCA,39
|
|
5
5
|
kumoai/__init__.py,sha256=Nn9YH_x9kAeEFn8RWbP95slZow0qFnakPZZ1WADe1hY,10843
|
|
6
6
|
kumoai/formatting.py,sha256=jA_rLDCGKZI8WWCha-vtuLenVKTZvli99Tqpurz1H84,953
|
|
7
7
|
kumoai/futures.py,sha256=oJFIfdCM_3nWIqQteBKYMY4fPhoYlYWE_JA2o6tx-ng,3737
|
|
@@ -24,7 +24,7 @@ kumoai/experimental/rfm/backend/sqlite/table.py,sha256=kcYpWaZKFez2Tru6Sdz-Ywk8j
|
|
|
24
24
|
kumoai/experimental/rfm/backend/local/__init__.py,sha256=2s9sSA-E-8pfkkzCH4XPuaSxSznEURMfMgwEIfYYPsg,1014
|
|
25
25
|
kumoai/experimental/rfm/backend/local/table.py,sha256=Ahob9HidpU6z_M41rK5FATa3d7CL2UzZl8pGVyrzLNc,3565
|
|
26
26
|
kumoai/experimental/rfm/backend/local/graph_store.py,sha256=RpfJldemOG-4RzGSIS9EcytHbvC4gYm-Ps3a-4qfptk,13297
|
|
27
|
-
kumoai/experimental/rfm/backend/local/sampler.py,sha256=
|
|
27
|
+
kumoai/experimental/rfm/backend/local/sampler.py,sha256=L1S2qxvkS_O8wy4K-czTxojPmklRrReTR8P3-e_8-hM,3823
|
|
28
28
|
kumoai/experimental/rfm/backend/snow/__init__.py,sha256=B-tG-p8WA-mBuwvK1f0S2gdRPEGwApdxlnyeVSnY2xg,927
|
|
29
29
|
kumoai/experimental/rfm/backend/snow/table.py,sha256=sHagXhW7RifzOiB4yjxV_9FtR0KUFVIw1mYwZe4bpMg,4255
|
|
30
30
|
kumoai/experimental/rfm/pquery/__init__.py,sha256=X0O3EIq5SMfBEE-ii5Cq6iDhR3s3XMXB52Cx5htoePw,152
|
|
@@ -38,9 +38,9 @@ kumoai/experimental/rfm/infer/id.py,sha256=ZIO0DWIoiEoS_8MVc5lkqBfkTWWQ0yGCgjkwL
|
|
|
38
38
|
kumoai/experimental/rfm/infer/dtype.py,sha256=ZZ6ztqJnTR1CaC2z5Uhf0o0rSdNThnss5tem5JNQkck,2607
|
|
39
39
|
kumoai/experimental/rfm/infer/__init__.py,sha256=krdMFN8iKZlSFOl-M5MW1KuSviQV3H1E18jj2uB8g6Q,469
|
|
40
40
|
kumoai/experimental/rfm/infer/timestamp.py,sha256=vM9--7eStzaGG13Y-oLYlpNJyhL6f9dp17HDXwtl_DM,1094
|
|
41
|
-
kumoai/experimental/rfm/base/__init__.py,sha256=
|
|
41
|
+
kumoai/experimental/rfm/base/__init__.py,sha256=3haYsIYypeL-U-9RuOOPnRdWaRlh-g_yE4ACJ2KLjOY,335
|
|
42
42
|
kumoai/experimental/rfm/base/table.py,sha256=yaY7Auvq2KblXOid3-a_Pw6RgnPK5Y1zGAY2xi1D2gg,19843
|
|
43
|
-
kumoai/experimental/rfm/base/sampler.py,sha256=
|
|
43
|
+
kumoai/experimental/rfm/base/sampler.py,sha256=b45kllqSm-lpXbP9XbrGQPMx_hEIfesJILViAanh6rk,13456
|
|
44
44
|
kumoai/experimental/rfm/base/source.py,sha256=8_waFQVsctryHkm9BwmFZ9-vw5cXAXfjk7KDmcl_kic,272
|
|
45
45
|
kumoai/experimental/rfm/base/column.py,sha256=izCJmufJcd1RSi-ptFMfrue-JYag38MJxizka7ya0-A,2319
|
|
46
46
|
kumoai/encoder/__init__.py,sha256=VPGs4miBC_WfwWeOXeHhFomOUocERFavhKf5fqITcds,182
|
|
@@ -106,8 +106,8 @@ kumoai/trainer/baseline_trainer.py,sha256=LlfViNOmswNv4c6zJJLsyv0pC2mM2WKMGYx06o
|
|
|
106
106
|
kumoai/trainer/__init__.py,sha256=zUdFl-f-sBWmm2x8R-rdVzPBeU2FaMzUY5mkcgoTa1k,939
|
|
107
107
|
kumoai/trainer/online_serving.py,sha256=9cddb5paeZaCgbUeceQdAOxysCtV5XP-KcsgFz_XR5w,9566
|
|
108
108
|
kumoai/trainer/trainer.py,sha256=hBXO7gwpo3t59zKFTeIkK65B8QRmWCwO33sbDuEAPlY,20133
|
|
109
|
-
kumoai-2.13.0.
|
|
110
|
-
kumoai-2.13.0.
|
|
111
|
-
kumoai-2.13.0.
|
|
112
|
-
kumoai-2.13.0.
|
|
113
|
-
kumoai-2.13.0.
|
|
109
|
+
kumoai-2.13.0.dev202512081731.dist-info/RECORD,,
|
|
110
|
+
kumoai-2.13.0.dev202512081731.dist-info/WHEEL,sha256=oqGJCpG61FZJmvyZ3C_0aCv-2mdfcY9e3fXvyUNmWfM,136
|
|
111
|
+
kumoai-2.13.0.dev202512081731.dist-info/top_level.txt,sha256=YjU6UcmomoDx30vEXLsOU784ED7VztQOsFApk1SFwvs,7
|
|
112
|
+
kumoai-2.13.0.dev202512081731.dist-info/METADATA,sha256=ulcPeS_yowF-CWxGh5m_20ummlecVuiXBDNMgvXH-VU,2510
|
|
113
|
+
kumoai-2.13.0.dev202512081731.dist-info/licenses/LICENSE,sha256=TbWlyqRmhq9PEzCaTI0H0nWLQCCOywQM8wYH8MbjfLo,1102
|
|
File without changes
|
{kumoai-2.13.0.dev202512061731.dist-info → kumoai-2.13.0.dev202512081731.dist-info}/licenses/LICENSE
RENAMED
|
File without changes
|
{kumoai-2.13.0.dev202512061731.dist-info → kumoai-2.13.0.dev202512081731.dist-info}/top_level.txt
RENAMED
|
File without changes
|