kumoai 2.13.0.dev202511191731__cp310-cp310-macosx_11_0_arm64.whl → 2.14.0.dev202512271732__cp310-cp310-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. kumoai/__init__.py +12 -0
  2. kumoai/_version.py +1 -1
  3. kumoai/client/client.py +6 -0
  4. kumoai/client/jobs.py +24 -0
  5. kumoai/client/pquery.py +6 -2
  6. kumoai/connector/utils.py +23 -2
  7. kumoai/experimental/rfm/__init__.py +52 -52
  8. kumoai/experimental/rfm/authenticate.py +3 -4
  9. kumoai/experimental/rfm/backend/__init__.py +0 -0
  10. kumoai/experimental/rfm/backend/local/__init__.py +42 -0
  11. kumoai/experimental/rfm/{local_graph_store.py → backend/local/graph_store.py} +65 -127
  12. kumoai/experimental/rfm/backend/local/sampler.py +312 -0
  13. kumoai/experimental/rfm/backend/local/table.py +113 -0
  14. kumoai/experimental/rfm/backend/snow/__init__.py +37 -0
  15. kumoai/experimental/rfm/backend/snow/sampler.py +297 -0
  16. kumoai/experimental/rfm/backend/snow/table.py +242 -0
  17. kumoai/experimental/rfm/backend/sqlite/__init__.py +32 -0
  18. kumoai/experimental/rfm/backend/sqlite/sampler.py +398 -0
  19. kumoai/experimental/rfm/backend/sqlite/table.py +184 -0
  20. kumoai/experimental/rfm/base/__init__.py +30 -0
  21. kumoai/experimental/rfm/base/column.py +152 -0
  22. kumoai/experimental/rfm/base/expression.py +44 -0
  23. kumoai/experimental/rfm/base/sampler.py +761 -0
  24. kumoai/experimental/rfm/base/source.py +19 -0
  25. kumoai/experimental/rfm/base/sql_sampler.py +143 -0
  26. kumoai/experimental/rfm/base/table.py +753 -0
  27. kumoai/experimental/rfm/{local_graph.py → graph.py} +546 -116
  28. kumoai/experimental/rfm/infer/__init__.py +8 -0
  29. kumoai/experimental/rfm/infer/dtype.py +81 -0
  30. kumoai/experimental/rfm/infer/multicategorical.py +1 -1
  31. kumoai/experimental/rfm/infer/pkey.py +128 -0
  32. kumoai/experimental/rfm/infer/stype.py +35 -0
  33. kumoai/experimental/rfm/infer/time_col.py +61 -0
  34. kumoai/experimental/rfm/pquery/executor.py +27 -27
  35. kumoai/experimental/rfm/pquery/pandas_executor.py +30 -32
  36. kumoai/experimental/rfm/rfm.py +313 -245
  37. kumoai/experimental/rfm/sagemaker.py +15 -7
  38. kumoai/pquery/predictive_query.py +10 -6
  39. kumoai/testing/decorators.py +1 -1
  40. kumoai/testing/snow.py +50 -0
  41. kumoai/trainer/distilled_trainer.py +175 -0
  42. kumoai/utils/__init__.py +3 -2
  43. kumoai/utils/progress_logger.py +178 -12
  44. kumoai/utils/sql.py +3 -0
  45. {kumoai-2.13.0.dev202511191731.dist-info → kumoai-2.14.0.dev202512271732.dist-info}/METADATA +10 -8
  46. {kumoai-2.13.0.dev202511191731.dist-info → kumoai-2.14.0.dev202512271732.dist-info}/RECORD +49 -29
  47. kumoai/experimental/rfm/local_graph_sampler.py +0 -182
  48. kumoai/experimental/rfm/local_pquery_driver.py +0 -689
  49. kumoai/experimental/rfm/local_table.py +0 -545
  50. kumoai/experimental/rfm/utils.py +0 -344
  51. {kumoai-2.13.0.dev202511191731.dist-info → kumoai-2.14.0.dev202512271732.dist-info}/WHEEL +0 -0
  52. {kumoai-2.13.0.dev202511191731.dist-info → kumoai-2.14.0.dev202512271732.dist-info}/licenses/LICENSE +0 -0
  53. {kumoai-2.13.0.dev202511191731.dist-info → kumoai-2.14.0.dev202512271732.dist-info}/top_level.txt +0 -0
kumoai/__init__.py CHANGED
@@ -280,7 +280,19 @@ __all__ = [
280
280
  ]
281
281
 
282
282
 
283
+ def in_snowflake_notebook() -> bool:
284
+ try:
285
+ from snowflake.snowpark.context import get_active_session
286
+ import streamlit # noqa: F401
287
+ get_active_session()
288
+ return True
289
+ except Exception:
290
+ return False
291
+
292
+
283
293
  def in_notebook() -> bool:
294
+ if in_snowflake_notebook():
295
+ return True
284
296
  try:
285
297
  from IPython import get_ipython
286
298
  shell = get_ipython()
kumoai/_version.py CHANGED
@@ -1 +1 @@
1
- __version__ = '2.13.0.dev202511191731'
1
+ __version__ = '2.14.0.dev202512271732'
kumoai/client/client.py CHANGED
@@ -13,6 +13,7 @@ if TYPE_CHECKING:
13
13
  ArtifactExportJobAPI,
14
14
  BaselineJobAPI,
15
15
  BatchPredictionJobAPI,
16
+ DistillationJobAPI,
16
17
  GeneratePredictionTableJobAPI,
17
18
  GenerateTrainTableJobAPI,
18
19
  LLMJobAPI,
@@ -132,6 +133,11 @@ class KumoClient:
132
133
  from kumoai.client.jobs import TrainingJobAPI
133
134
  return TrainingJobAPI(self)
134
135
 
136
+ @property
137
+ def distillation_job_api(self) -> 'DistillationJobAPI':
138
+ from kumoai.client.jobs import DistillationJobAPI
139
+ return DistillationJobAPI(self)
140
+
135
141
  @property
136
142
  def batch_prediction_job_api(self) -> 'BatchPredictionJobAPI':
137
143
  from kumoai.client.jobs import BatchPredictionJobAPI
kumoai/client/jobs.py CHANGED
@@ -22,6 +22,8 @@ from kumoapi.jobs import (
22
22
  BatchPredictionRequest,
23
23
  CancelBatchPredictionJobResponse,
24
24
  CancelTrainingJobResponse,
25
+ DistillationJobRequest,
26
+ DistillationJobResource,
25
27
  ErrorDetails,
26
28
  GeneratePredictionTableJobResource,
27
29
  GeneratePredictionTableRequest,
@@ -171,6 +173,28 @@ class TrainingJobAPI(CommonJobAPI[TrainingJobRequest, TrainingJobResource]):
171
173
  return resource.config
172
174
 
173
175
 
176
+ class DistillationJobAPI(CommonJobAPI[DistillationJobRequest,
177
+ DistillationJobResource]):
178
+ r"""Typed API definition for the distillation job resource."""
179
+ def __init__(self, client: KumoClient) -> None:
180
+ super().__init__(client, '/training_jobs/distilled_training_job',
181
+ DistillationJobResource)
182
+
183
+ def get_config(self, job_id: str) -> DistillationJobRequest:
184
+ raise NotImplementedError(
185
+ "Getting the configuration for a distillation job is "
186
+ "not implemented yet.")
187
+
188
+ def get_progress(self, id: str) -> AutoTrainerProgress:
189
+ raise NotImplementedError(
190
+ "Getting the progress for a distillation job is not "
191
+ "implemented yet.")
192
+
193
+ def cancel(self, id: str) -> CancelTrainingJobResponse:
194
+ raise NotImplementedError(
195
+ "Cancelling a distillation job is not implemented yet.")
196
+
197
+
174
198
  class BatchPredictionJobAPI(CommonJobAPI[BatchPredictionRequest,
175
199
  BatchPredictionJobResource]):
176
200
  r"""Typed API definition for the prediction job resource."""
kumoai/client/pquery.py CHANGED
@@ -176,8 +176,12 @@ def filter_model_plan(
176
176
  # Undefined
177
177
  pass
178
178
 
179
- new_opt_fields.append((field.name, _type, default))
180
- new_opts.append(getattr(section, field.name))
179
+ # Forward compatibility - Remove any newly introduced arguments not
180
+ # returned yet by the backend:
181
+ value = getattr(section, field.name)
182
+ if value != MissingType.VALUE:
183
+ new_opt_fields.append((field.name, _type, default))
184
+ new_opts.append(value)
181
185
 
182
186
  Section = dataclass(
183
187
  config=dict(validate_assignment=True),
kumoai/connector/utils.py CHANGED
@@ -381,8 +381,29 @@ def _handle_duplicate_names(names: List[str]) -> List[str]:
381
381
 
382
382
 
383
383
  def _sanitize_columns(names: List[str]) -> Tuple[List[str], bool]:
384
- _SAN_RE = re.compile(r"[^0-9A-Za-z]+")
384
+ """Normalize column names in a CSV or Parquet file.
385
+
386
+ Rules:
387
+ - Replace any non-alphanumeric character with "_"
388
+ - Strip leading/trailing underscores
389
+ - Ensure uniqueness by appending suffixes: _1, _2, ...
390
+ - Auto-name empty columns as auto_named_<n>
391
+
392
+ Returns:
393
+ (new_column_names, changed)
394
+ """
395
+ _SAN_RE = re.compile(r"[^0-9A-Za-z,\t]")
396
+ # 1) Replace non-alphanumeric sequences with underscore
385
397
  new = [_SAN_RE.sub("_", n).strip("_") for n in names]
398
+
399
+ # 2) Auto-name any empty column names to match UI behavior
400
+ unnamed_counter = 0
401
+ for i, n in enumerate(new):
402
+ if not n:
403
+ new[i] = f"auto_named_{unnamed_counter}"
404
+ unnamed_counter += 1
405
+
406
+ # 3) Ensure uniqueness (append suffixes where needed)
386
407
  new = _handle_duplicate_names(new)
387
408
  return new, new != names
388
409
 
@@ -1168,7 +1189,7 @@ def _detect_and_validate_csv(head_bytes: bytes) -> str:
1168
1189
  - Re-serializes those rows and validates with pandas (small nrows) to catch
1169
1190
  malformed inputs.
1170
1191
  - Raises ValueError on empty input or if parsing fails with the chosen
1171
- delimiter.
1192
+ delimiter.
1172
1193
  """
1173
1194
  if not head_bytes:
1174
1195
  raise ValueError("Could not auto-detect a delimiter: file is empty.")
@@ -1,54 +1,26 @@
1
- try:
2
- import kumoai.kumolib # noqa: F401
3
- except Exception as e:
4
- import platform
5
-
6
- _msg = f"""RFM is not supported in your environment.
7
-
8
- 💻 Your Environment:
9
- Python version: {platform.python_version()}
10
- Operating system: {platform.system()}
11
- CPU architecture: {platform.machine()}
12
- glibc version: {platform.libc_ver()[1]}
13
-
14
- ✅ Supported Environments:
15
- * Python versions: 3.10, 3.11, 3.12, 3.13
16
- * Operating systems and CPU architectures:
17
- * Linux (x86_64)
18
- * macOS (arm64)
19
- * Windows (x86_64)
20
- * glibc versions: >=2.28
21
-
22
- ❌ Unsupported Environments:
23
- * Python versions: 3.8, 3.9, 3.14
24
- * Operating systems and CPU architectures:
25
- * Linux (arm64)
26
- * macOS (x86_64)
27
- * Windows (arm64)
28
- * glibc versions: <2.28
29
-
30
- Please create a feature request at 'https://github.com/kumo-ai/kumo-rfm'."""
31
-
32
- raise RuntimeError(_msg) from e
33
-
34
- from dataclasses import dataclass
35
- from enum import Enum
36
1
  import ipaddress
37
2
  import logging
3
+ import os
38
4
  import re
39
5
  import socket
40
6
  import threading
41
- from typing import Optional, Dict, Tuple
42
- import os
7
+ from dataclasses import dataclass
8
+ from enum import Enum
43
9
  from urllib.parse import urlparse
10
+
44
11
  import kumoai
45
12
  from kumoai.client.client import KumoClient
46
- from .sagemaker import (KumoClient_SageMakerAdapter,
47
- KumoClient_SageMakerProxy_Local)
48
- from .local_table import LocalTable
49
- from .local_graph import LocalGraph
50
- from .rfm import ExplainConfig, Explanation, KumoRFM
13
+ from kumoai.spcs import _get_active_session
14
+
51
15
  from .authenticate import authenticate
16
+ from .sagemaker import (
17
+ KumoClient_SageMakerAdapter,
18
+ KumoClient_SageMakerProxy_Local,
19
+ )
20
+ from .base import Table
21
+ from .backend.local import LocalTable
22
+ from .graph import Graph
23
+ from .rfm import ExplainConfig, Explanation, KumoRFM
52
24
 
53
25
  logger = logging.getLogger('kumoai_rfm')
54
26
 
@@ -77,7 +49,8 @@ class InferenceBackend(str, Enum):
77
49
 
78
50
 
79
51
  def _detect_backend(
80
- url: str) -> Tuple[InferenceBackend, Optional[str], Optional[str]]:
52
+ url: str, #
53
+ ) -> tuple[InferenceBackend, str | None, str | None]:
81
54
  parsed = urlparse(url)
82
55
 
83
56
  # Remote SageMaker
@@ -101,12 +74,27 @@ def _detect_backend(
101
74
  return InferenceBackend.REST, None, None
102
75
 
103
76
 
77
+ def _get_snowflake_url(snowflake_application: str) -> str:
78
+ snowpark_session = _get_active_session()
79
+ if not snowpark_session:
80
+ raise ValueError(
81
+ "Client creation failed: snowflake_application is specified "
82
+ "without an active snowpark session. If running outside "
83
+ "a snowflake notebook, specify a URL and credentials.")
84
+ with snowpark_session.connection.cursor() as cur:
85
+ cur.execute(
86
+ f"DESCRIBE SERVICE {snowflake_application}.user_schema.rfm_service"
87
+ f" ->> SELECT \"dns_name\" from $1")
88
+ dns_name: str = cur.fetchone()[0]
89
+ return f"http://{dns_name}:8000/api"
90
+
91
+
104
92
  @dataclass
105
93
  class RfmGlobalState:
106
94
  _url: str = '__url_not_provided__'
107
95
  _backend: InferenceBackend = InferenceBackend.UNKNOWN
108
- _region: Optional[str] = None
109
- _endpoint_name: Optional[str] = None
96
+ _region: str | None = None
97
+ _endpoint_name: str | None = None
110
98
  _thread_local = threading.local()
111
99
 
112
100
  # Thread-safe init-once.
@@ -149,10 +137,10 @@ global_state = RfmGlobalState()
149
137
 
150
138
 
151
139
  def init(
152
- url: Optional[str] = None,
153
- api_key: Optional[str] = None,
154
- snowflake_credentials: Optional[Dict[str, str]] = None,
155
- snowflake_application: Optional[str] = None,
140
+ url: str | None = None,
141
+ api_key: str | None = None,
142
+ snowflake_credentials: dict[str, str] | None = None,
143
+ snowflake_application: str | None = None,
156
144
  log_level: str = "INFO",
157
145
  ) -> None:
158
146
  with global_state._lock:
@@ -164,6 +152,15 @@ def init(
164
152
  "supported.")
165
153
  return
166
154
 
155
+ if snowflake_application:
156
+ if url is not None:
157
+ raise ValueError(
158
+ "Client creation failed: both snowflake_application and "
159
+ "url are specified. If running from a snowflake notebook, "
160
+ "specify only snowflake_application.")
161
+ url = _get_snowflake_url(snowflake_application)
162
+ api_key = "test:DISABLED"
163
+
167
164
  if url is None:
168
165
  url = os.getenv("RFM_API_URL", "https://kumorfm.ai/api")
169
166
 
@@ -197,12 +194,15 @@ def init(
197
194
  url)
198
195
 
199
196
 
197
+ LocalGraph = Graph # NOTE Backward compatibility - do not use anymore.
198
+
200
199
  __all__ = [
200
+ 'authenticate',
201
+ 'init',
202
+ 'Table',
201
203
  'LocalTable',
202
- 'LocalGraph',
204
+ 'Graph',
203
205
  'KumoRFM',
204
206
  'ExplainConfig',
205
207
  'Explanation',
206
- 'authenticate',
207
- 'init',
208
208
  ]
@@ -2,12 +2,11 @@ import logging
2
2
  import os
3
3
  import platform
4
4
  from datetime import datetime
5
- from typing import Optional
6
5
 
7
6
  from kumoai import in_notebook
8
7
 
9
8
 
10
- def authenticate(api_url: Optional[str] = None) -> None:
9
+ def authenticate(api_url: str | None = None) -> None:
11
10
  """Authenticates the user and sets the Kumo API key for the SDK.
12
11
 
13
12
  This function detects the current environment and launches the appropriate
@@ -65,11 +64,11 @@ def _authenticate_local(api_url: str, redirect_port: int = 8765) -> None:
65
64
  import webbrowser
66
65
  from getpass import getpass
67
66
  from socketserver import TCPServer
68
- from typing import Any, Dict
67
+ from typing import Any
69
68
 
70
69
  logger = logging.getLogger('kumoai')
71
70
 
72
- token_status: Dict[str, Any] = {
71
+ token_status: dict[str, Any] = {
73
72
  'token': None,
74
73
  'token_name': None,
75
74
  'failed': False
File without changes
@@ -0,0 +1,42 @@
1
+ try:
2
+ import kumoai.kumolib # noqa: F401
3
+ except Exception as e:
4
+ import platform
5
+
6
+ _msg = f"""RFM is not supported in your environment.
7
+
8
+ 💻 Your Environment:
9
+ Python version: {platform.python_version()}
10
+ Operating system: {platform.system()}
11
+ CPU architecture: {platform.machine()}
12
+ glibc version: {platform.libc_ver()[1]}
13
+
14
+ ✅ Supported Environments:
15
+ * Python versions: 3.10, 3.11, 3.12, 3.13
16
+ * Operating systems and CPU architectures:
17
+ * Linux (x86_64)
18
+ * macOS (arm64)
19
+ * Windows (x86_64)
20
+ * glibc versions: >=2.28
21
+
22
+ ❌ Unsupported Environments:
23
+ * Python versions: 3.8, 3.9, 3.14
24
+ * Operating systems and CPU architectures:
25
+ * Linux (arm64)
26
+ * macOS (x86_64)
27
+ * Windows (arm64)
28
+ * glibc versions: <2.28
29
+
30
+ Please create a feature request at 'https://github.com/kumo-ai/kumo-rfm'."""
31
+
32
+ raise RuntimeError(_msg) from e
33
+
34
+ from .table import LocalTable
35
+ from .graph_store import LocalGraphStore
36
+ from .sampler import LocalSampler
37
+
38
+ __all__ = [
39
+ 'LocalTable',
40
+ 'LocalGraphStore',
41
+ 'LocalSampler',
42
+ ]
@@ -1,14 +1,12 @@
1
- import warnings
2
- from typing import Dict, List, Optional, Tuple, Union
1
+ from typing import TYPE_CHECKING
3
2
 
4
3
  import numpy as np
5
4
  import pandas as pd
6
5
  from kumoapi.rfm.context import Subgraph
7
- from kumoapi.typing import Stype
8
6
 
9
- from kumoai.experimental.rfm import LocalGraph
10
- from kumoai.experimental.rfm.utils import normalize_text
11
- from kumoai.utils import InteractiveProgressLogger, ProgressLogger
7
+ from kumoai.experimental.rfm.backend.local import LocalTable
8
+ from kumoai.experimental.rfm.base import Table
9
+ from kumoai.utils import ProgressLogger
12
10
 
13
11
  try:
14
12
  import torch
@@ -16,43 +14,40 @@ try:
16
14
  except ImportError:
17
15
  WITH_TORCH = False
18
16
 
17
+ if TYPE_CHECKING:
18
+ from kumoai.experimental.rfm import Graph
19
+
19
20
 
20
21
  class LocalGraphStore:
21
22
  def __init__(
22
23
  self,
23
- graph: LocalGraph,
24
- preprocess: bool = False,
25
- verbose: Union[bool, ProgressLogger] = True,
24
+ graph: 'Graph',
25
+ verbose: bool | ProgressLogger = True,
26
26
  ) -> None:
27
27
 
28
28
  if not isinstance(verbose, ProgressLogger):
29
- verbose = InteractiveProgressLogger(
30
- "Materializing graph",
29
+ verbose = ProgressLogger.default(
30
+ msg="Materializing graph",
31
31
  verbose=verbose,
32
32
  )
33
33
 
34
34
  with verbose as logger:
35
- self.df_dict, self.mask_dict = self.sanitize(graph, preprocess)
36
- self.stype_dict = self.get_stype_dict(graph)
35
+ self.df_dict, self.mask_dict = self.sanitize(graph)
37
36
  logger.log("Sanitized input data")
38
37
 
39
- self.pkey_name_dict, self.pkey_map_dict = self.get_pkey_data(graph)
38
+ self.pkey_map_dict = self.get_pkey_map_dict(graph)
40
39
  num_pkeys = sum(t.has_primary_key() for t in graph.tables.values())
41
40
  if num_pkeys > 1:
42
41
  logger.log(f"Collected primary keys from {num_pkeys} tables")
43
42
  else:
44
43
  logger.log(f"Collected primary key from {num_pkeys} table")
45
44
 
46
- (
47
- self.time_column_dict,
48
- self.end_time_column_dict,
49
- self.time_dict,
50
- self.min_time,
51
- self.max_time,
52
- ) = self.get_time_data(graph)
53
- if self.max_time != pd.Timestamp.min:
45
+ self.time_dict, self.min_max_time_dict = self.get_time_data(graph)
46
+ if len(self.min_max_time_dict) > 0:
47
+ min_time = min(t for t, _ in self.min_max_time_dict.values())
48
+ max_time = max(t for _, t in self.min_max_time_dict.values())
54
49
  logger.log(f"Identified temporal graph from "
55
- f"{self.min_time.date()} to {self.max_time.date()}")
50
+ f"{min_time.date()} to {max_time.date()}")
56
51
  else:
57
52
  logger.log("Identified static graph without timestamps")
58
53
 
@@ -62,14 +57,6 @@ class LocalGraphStore:
62
57
  logger.log(f"Created graph with {num_nodes:,} nodes and "
63
58
  f"{num_edges:,} edges")
64
59
 
65
- @property
66
- def node_types(self) -> List[str]:
67
- return list(self.df_dict.keys())
68
-
69
- @property
70
- def edge_types(self) -> List[Tuple[str, str, str]]:
71
- return list(self.row_dict.keys())
72
-
73
60
  def get_node_id(self, table_name: str, pkey: pd.Series) -> np.ndarray:
74
61
  r"""Returns the node ID given primary keys.
75
62
 
@@ -105,9 +92,8 @@ class LocalGraphStore:
105
92
 
106
93
  def sanitize(
107
94
  self,
108
- graph: LocalGraph,
109
- preprocess: bool = False,
110
- ) -> Tuple[Dict[str, pd.DataFrame], Dict[str, np.ndarray]]:
95
+ graph: 'Graph',
96
+ ) -> tuple[dict[str, pd.DataFrame], dict[str, np.ndarray]]:
111
97
  r"""Sanitizes raw data according to table schema definition:
112
98
 
113
99
  In particular, it:
@@ -115,42 +101,25 @@ class LocalGraphStore:
115
101
  * drops timezone information from timestamps
116
102
  * drops duplicate primary keys
117
103
  * removes rows with missing primary keys or time values
118
-
119
- If ``preprocess`` is set to ``True``, it will additionally pre-process
120
- data for faster model processing. In particular, it:
121
- * tokenizes any text column that is not a foreign key
122
104
  """
123
- df_dict: Dict[str, pd.DataFrame] = {
124
- table_name: table._data.copy(deep=False).reset_index(drop=True)
125
- for table_name, table in graph.tables.items()
126
- }
127
-
128
- foreign_keys = {(edge.src_table, edge.fkey) for edge in graph.edges}
105
+ df_dict: dict[str, pd.DataFrame] = {}
106
+ for table_name, table in graph.tables.items():
107
+ assert isinstance(table, LocalTable)
108
+ df_dict[table_name] = Table._sanitize(
109
+ df=table._data.copy(deep=False).reset_index(drop=True),
110
+ dtype_dict={
111
+ column.name: column.dtype
112
+ for column in table.columns
113
+ },
114
+ stype_dict={
115
+ column.name: column.stype
116
+ for column in table.columns
117
+ },
118
+ )
129
119
 
130
- mask_dict: Dict[str, np.ndarray] = {}
120
+ mask_dict: dict[str, np.ndarray] = {}
131
121
  for table in graph.tables.values():
132
- for col in table.columns:
133
- if col.stype == Stype.timestamp:
134
- ser = df_dict[table.name][col.name]
135
- if not pd.api.types.is_datetime64_any_dtype(ser):
136
- with warnings.catch_warnings():
137
- warnings.filterwarnings(
138
- 'ignore',
139
- message='Could not infer format',
140
- )
141
- ser = pd.to_datetime(ser, errors='coerce')
142
- df_dict[table.name][col.name] = ser
143
- if isinstance(ser.dtype, pd.DatetimeTZDtype):
144
- ser = ser.dt.tz_localize(None)
145
- df_dict[table.name][col.name] = ser
146
-
147
- # Normalize text in advance (but exclude foreign keys):
148
- if (preprocess and col.stype == Stype.text
149
- and (table.name, col.name) not in foreign_keys):
150
- ser = df_dict[table.name][col.name]
151
- df_dict[table.name][col.name] = normalize_text(ser)
152
-
153
- mask: Optional[np.ndarray] = None
122
+ mask: np.ndarray | None = None
154
123
  if table._time_column is not None:
155
124
  ser = df_dict[table.name][table._time_column]
156
125
  mask = ser.notna().to_numpy()
@@ -165,34 +134,16 @@ class LocalGraphStore:
165
134
 
166
135
  return df_dict, mask_dict
167
136
 
168
- def get_stype_dict(self, graph: LocalGraph) -> Dict[str, Dict[str, Stype]]:
169
- stype_dict: Dict[str, Dict[str, Stype]] = {}
170
- foreign_keys = {(edge.src_table, edge.fkey) for edge in graph.edges}
171
- for table in graph.tables.values():
172
- stype_dict[table.name] = {}
173
- for column in table.columns:
174
- if column == table.primary_key:
175
- continue
176
- if (table.name, column.name) in foreign_keys:
177
- continue
178
- stype_dict[table.name][column.name] = column.stype
179
- return stype_dict
180
-
181
- def get_pkey_data(
137
+ def get_pkey_map_dict(
182
138
  self,
183
- graph: LocalGraph,
184
- ) -> Tuple[
185
- Dict[str, str],
186
- Dict[str, pd.DataFrame],
187
- ]:
188
- pkey_name_dict: Dict[str, str] = {}
189
- pkey_map_dict: Dict[str, pd.DataFrame] = {}
139
+ graph: 'Graph',
140
+ ) -> dict[str, pd.DataFrame]:
141
+ pkey_map_dict: dict[str, pd.DataFrame] = {}
190
142
 
191
143
  for table in graph.tables.values():
192
144
  if table._primary_key is None:
193
145
  continue
194
146
 
195
- pkey_name_dict[table.name] = table._primary_key
196
147
  pkey = self.df_dict[table.name][table._primary_key]
197
148
  pkey_map = pd.DataFrame(
198
149
  dict(arange=range(len(pkey))),
@@ -214,61 +165,48 @@ class LocalGraphStore:
214
165
 
215
166
  pkey_map_dict[table.name] = pkey_map
216
167
 
217
- return pkey_name_dict, pkey_map_dict
168
+ return pkey_map_dict
218
169
 
219
170
  def get_time_data(
220
171
  self,
221
- graph: LocalGraph,
222
- ) -> Tuple[
223
- Dict[str, str],
224
- Dict[str, str],
225
- Dict[str, np.ndarray],
226
- pd.Timestamp,
227
- pd.Timestamp,
172
+ graph: 'Graph',
173
+ ) -> tuple[
174
+ dict[str, np.ndarray],
175
+ dict[str, tuple[pd.Timestamp, pd.Timestamp]],
228
176
  ]:
229
- time_column_dict: Dict[str, str] = {}
230
- end_time_column_dict: Dict[str, str] = {}
231
- time_dict: Dict[str, np.ndarray] = {}
232
- min_time = pd.Timestamp.max
233
- max_time = pd.Timestamp.min
177
+ time_dict: dict[str, np.ndarray] = {}
178
+ min_max_time_dict: dict[str, tuple[pd.Timestamp, pd.Timestamp]] = {}
234
179
  for table in graph.tables.values():
235
- if table._end_time_column is not None:
236
- end_time_column_dict[table.name] = table._end_time_column
237
-
238
180
  if table._time_column is None:
239
181
  continue
240
182
 
241
183
  time = self.df_dict[table.name][table._time_column]
242
- time_dict[table.name] = time.astype('datetime64[ns]').astype(
243
- int).to_numpy() // 1000**3
244
- time_column_dict[table.name] = table._time_column
184
+ time_dict[table.name] = time.astype(int).to_numpy() // 1000**3
245
185
 
246
186
  if table.name in self.mask_dict.keys():
247
187
  time = time[self.mask_dict[table.name]]
248
188
  if len(time) > 0:
249
- min_time = min(min_time, time.min())
250
- max_time = max(max_time, time.max())
189
+ min_max_time_dict[table.name] = (time.min(), time.max())
190
+ else:
191
+ min_max_time_dict[table.name] = (
192
+ pd.Timestamp.max,
193
+ pd.Timestamp.min,
194
+ )
251
195
 
252
- return (
253
- time_column_dict,
254
- end_time_column_dict,
255
- time_dict,
256
- min_time,
257
- max_time,
258
- )
196
+ return time_dict, min_max_time_dict
259
197
 
260
198
  def get_csc(
261
199
  self,
262
- graph: LocalGraph,
263
- ) -> Tuple[
264
- Dict[Tuple[str, str, str], np.ndarray],
265
- Dict[Tuple[str, str, str], np.ndarray],
200
+ graph: 'Graph',
201
+ ) -> tuple[
202
+ dict[tuple[str, str, str], np.ndarray],
203
+ dict[tuple[str, str, str], np.ndarray],
266
204
  ]:
267
205
  # A mapping from raw primary keys to node indices (0 to N-1):
268
- map_dict: Dict[str, pd.CategoricalDtype] = {}
206
+ map_dict: dict[str, pd.CategoricalDtype] = {}
269
207
  # A dictionary to manage offsets of node indices for invalid rows:
270
- offset_dict: Dict[str, np.ndarray] = {}
271
- for table_name in set(edge.dst_table for edge in graph.edges):
208
+ offset_dict: dict[str, np.ndarray] = {}
209
+ for table_name in {edge.dst_table for edge in graph.edges}:
272
210
  ser = self.df_dict[table_name][graph[table_name]._primary_key]
273
211
  if table_name in self.mask_dict.keys():
274
212
  mask = self.mask_dict[table_name]
@@ -277,8 +215,8 @@ class LocalGraphStore:
277
215
  map_dict[table_name] = pd.CategoricalDtype(ser, ordered=True)
278
216
 
279
217
  # Build CSC graph representation:
280
- row_dict: Dict[Tuple[str, str, str], np.ndarray] = {}
281
- colptr_dict: Dict[Tuple[str, str, str], np.ndarray] = {}
218
+ row_dict: dict[tuple[str, str, str], np.ndarray] = {}
219
+ colptr_dict: dict[tuple[str, str, str], np.ndarray] = {}
282
220
  for src_table, fkey, dst_table in graph.edges:
283
221
  src_df = self.df_dict[src_table]
284
222
  dst_df = self.df_dict[dst_table]
@@ -340,7 +278,7 @@ def _argsort(input: np.ndarray) -> np.ndarray:
340
278
  return torch.from_numpy(input).argsort().numpy()
341
279
 
342
280
 
343
- def _lexsort(inputs: List[np.ndarray]) -> np.ndarray:
281
+ def _lexsort(inputs: list[np.ndarray]) -> np.ndarray:
344
282
  assert len(inputs) >= 1
345
283
 
346
284
  if not WITH_TORCH: