kumoai 2.13.0.dev202511231731__cp312-cp312-macosx_11_0_arm64.whl → 2.13.0.dev202512011731__cp312-cp312-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
kumoai/_version.py CHANGED
@@ -1 +1 @@
1
- __version__ = '2.13.0.dev202511231731'
1
+ __version__ = '2.13.0.dev202512011731'
kumoai/connector/utils.py CHANGED
@@ -381,8 +381,29 @@ def _handle_duplicate_names(names: List[str]) -> List[str]:
381
381
 
382
382
 
383
383
  def _sanitize_columns(names: List[str]) -> Tuple[List[str], bool]:
384
- _SAN_RE = re.compile(r"[^0-9A-Za-z]+")
384
+ """Normalize column names in a CSV or Parquet file.
385
+
386
+ Rules:
387
+ - Replace any non-alphanumeric character with "_"
388
+ - Strip leading/trailing underscores
389
+ - Ensure uniqueness by appending suffixes: _1, _2, ...
390
+ - Auto-name empty columns as auto_named_<n>
391
+
392
+ Returns:
393
+ (new_column_names, changed)
394
+ """
395
+ _SAN_RE = re.compile(r"[^0-9A-Za-z,\t]")
396
+ # 1) Replace non-alphanumeric sequences with underscore
385
397
  new = [_SAN_RE.sub("_", n).strip("_") for n in names]
398
+
399
+ # 2) Auto-name any empty column names to match UI behavior
400
+ unnamed_counter = 0
401
+ for i, n in enumerate(new):
402
+ if not n:
403
+ new[i] = f"auto_named_{unnamed_counter}"
404
+ unnamed_counter += 1
405
+
406
+ # 3) Ensure uniqueness (append suffixes where needed)
386
407
  new = _handle_duplicate_names(new)
387
408
  return new, new != names
388
409
 
@@ -1168,7 +1189,7 @@ def _detect_and_validate_csv(head_bytes: bytes) -> str:
1168
1189
  - Re-serializes those rows and validates with pandas (small nrows) to catch
1169
1190
  malformed inputs.
1170
1191
  - Raises ValueError on empty input or if parsing fails with the chosen
1171
- delimiter.
1192
+ delimiter.
1172
1193
  """
1173
1194
  if not head_bytes:
1174
1195
  raise ValueError("Could not auto-detect a delimiter: file is empty.")
@@ -1,54 +1,26 @@
1
- try:
2
- import kumoai.kumolib # noqa: F401
3
- except Exception as e:
4
- import platform
5
-
6
- _msg = f"""RFM is not supported in your environment.
7
-
8
- 💻 Your Environment:
9
- Python version: {platform.python_version()}
10
- Operating system: {platform.system()}
11
- CPU architecture: {platform.machine()}
12
- glibc version: {platform.libc_ver()[1]}
13
-
14
- ✅ Supported Environments:
15
- * Python versions: 3.10, 3.11, 3.12, 3.13
16
- * Operating systems and CPU architectures:
17
- * Linux (x86_64)
18
- * macOS (arm64)
19
- * Windows (x86_64)
20
- * glibc versions: >=2.28
21
-
22
- ❌ Unsupported Environments:
23
- * Python versions: 3.8, 3.9, 3.14
24
- * Operating systems and CPU architectures:
25
- * Linux (arm64)
26
- * macOS (x86_64)
27
- * Windows (arm64)
28
- * glibc versions: <2.28
29
-
30
- Please create a feature request at 'https://github.com/kumo-ai/kumo-rfm'."""
31
-
32
- raise RuntimeError(_msg) from e
33
-
34
- from dataclasses import dataclass
35
- from enum import Enum
36
1
  import ipaddress
37
2
  import logging
3
+ import os
38
4
  import re
39
5
  import socket
40
6
  import threading
41
- from typing import Optional, Dict, Tuple
42
- import os
7
+ from dataclasses import dataclass
8
+ from enum import Enum
9
+ from typing import Dict, Optional, Tuple
43
10
  from urllib.parse import urlparse
11
+
44
12
  import kumoai
45
13
  from kumoai.client.client import KumoClient
46
- from .sagemaker import (KumoClient_SageMakerAdapter,
47
- KumoClient_SageMakerProxy_Local)
48
- from .local_table import LocalTable
49
- from .local_graph import LocalGraph
50
- from .rfm import ExplainConfig, Explanation, KumoRFM
14
+
51
15
  from .authenticate import authenticate
16
+ from .sagemaker import (
17
+ KumoClient_SageMakerAdapter,
18
+ KumoClient_SageMakerProxy_Local,
19
+ )
20
+ from .base import Table
21
+ from .backend.local import LocalTable
22
+ from .graph import Graph
23
+ from .rfm import ExplainConfig, Explanation, KumoRFM
52
24
 
53
25
  logger = logging.getLogger('kumoai_rfm')
54
26
 
@@ -197,12 +169,15 @@ def init(
197
169
  url)
198
170
 
199
171
 
172
+ LocalGraph = Graph # NOTE Backward compatibility - do not use anymore.
173
+
200
174
  __all__ = [
175
+ 'authenticate',
176
+ 'init',
177
+ 'Table',
201
178
  'LocalTable',
202
- 'LocalGraph',
179
+ 'Graph',
203
180
  'KumoRFM',
204
181
  'ExplainConfig',
205
182
  'Explanation',
206
- 'authenticate',
207
- 'init',
208
183
  ]
File without changes
@@ -0,0 +1,38 @@
1
+ try:
2
+ import kumoai.kumolib # noqa: F401
3
+ except Exception as e:
4
+ import platform
5
+
6
+ _msg = f"""RFM is not supported in your environment.
7
+
8
+ 💻 Your Environment:
9
+ Python version: {platform.python_version()}
10
+ Operating system: {platform.system()}
11
+ CPU architecture: {platform.machine()}
12
+ glibc version: {platform.libc_ver()[1]}
13
+
14
+ ✅ Supported Environments:
15
+ * Python versions: 3.10, 3.11, 3.12, 3.13
16
+ * Operating systems and CPU architectures:
17
+ * Linux (x86_64)
18
+ * macOS (arm64)
19
+ * Windows (x86_64)
20
+ * glibc versions: >=2.28
21
+
22
+ ❌ Unsupported Environments:
23
+ * Python versions: 3.8, 3.9, 3.14
24
+ * Operating systems and CPU architectures:
25
+ * Linux (arm64)
26
+ * macOS (x86_64)
27
+ * Windows (arm64)
28
+ * glibc versions: <2.28
29
+
30
+ Please create a feature request at 'https://github.com/kumo-ai/kumo-rfm'."""
31
+
32
+ raise RuntimeError(_msg) from e
33
+
34
+ from .table import LocalTable
35
+
36
+ __all__ = [
37
+ 'LocalTable',
38
+ ]
@@ -0,0 +1,151 @@
1
+ from typing import List, Optional
2
+
3
+ import pandas as pd
4
+ from kumoapi.typing import Dtype, Stype
5
+ from typing_extensions import Self
6
+
7
+ from kumoai.experimental.rfm import utils
8
+ from kumoai.experimental.rfm.base import Column, Table
9
+
10
+
11
+ class LocalTable(Table):
12
+ r"""A table backed by a :class:`pandas.DataFrame`.
13
+
14
+ A :class:`LocalTable` fully specifies the relevant metadata, *i.e.*
15
+ selected columns, column semantic types, primary keys and time columns.
16
+ :class:`LocalTable` is used to create a :class:`Graph`.
17
+
18
+ .. code-block:: python
19
+
20
+ import pandas as pd
21
+ import kumoai.experimental.rfm as rfm
22
+
23
+ # Load data from a CSV file:
24
+ df = pd.read_csv("data.csv")
25
+
26
+ # Create a table from a `pandas.DataFrame` and infer its metadata ...
27
+ table = rfm.LocalTable(df, name="my_table").infer_metadata()
28
+
29
+ # ... or create a table explicitly:
30
+ table = rfm.LocalTable(
31
+ df=df,
32
+ name="my_table",
33
+ primary_key="id",
34
+ time_column="time",
35
+ end_time_column=None,
36
+ )
37
+
38
+ # Verify metadata:
39
+ table.print_metadata()
40
+
41
+ # Change the semantic type of a column:
42
+ table[column].stype = "text"
43
+
44
+ Args:
45
+ df: The data frame to create this table from.
46
+ name: The name of this table.
47
+ primary_key: The name of the primary key of this table, if it exists.
48
+ time_column: The name of the time column of this table, if it exists.
49
+ end_time_column: The name of the end time column of this table, if it
50
+ exists.
51
+ """
52
+ def __init__(
53
+ self,
54
+ df: pd.DataFrame,
55
+ name: str,
56
+ primary_key: Optional[str] = None,
57
+ time_column: Optional[str] = None,
58
+ end_time_column: Optional[str] = None,
59
+ ) -> None:
60
+
61
+ if df.empty:
62
+ raise ValueError("Data frame must have at least one row")
63
+ if isinstance(df.columns, pd.MultiIndex):
64
+ raise ValueError("Data frame must not have a multi-index")
65
+ if not df.columns.is_unique:
66
+ raise ValueError("Data frame must have unique column names")
67
+ if any(col == '' for col in df.columns):
68
+ raise ValueError("Data frame must have non-empty column names")
69
+
70
+ self._data = df.copy(deep=False)
71
+
72
+ super().__init__(
73
+ name=name,
74
+ columns=list(df.columns),
75
+ primary_key=primary_key,
76
+ time_column=time_column,
77
+ end_time_column=end_time_column,
78
+ )
79
+
80
+ def infer_metadata(self, verbose: bool = True) -> Self:
81
+ r"""Infers metadata, *i.e.*, primary keys and time columns, in the
82
+ table.
83
+
84
+ Args:
85
+ verbose: Whether to print verbose output.
86
+ """
87
+ logs = []
88
+
89
+ # Try to detect primary key if not set:
90
+ if not self.has_primary_key():
91
+
92
+ def is_candidate(column: Column) -> bool:
93
+ if column.stype == Stype.ID:
94
+ return True
95
+ if all(column.stype != Stype.ID for column in self.columns):
96
+ if self.name == column.name:
97
+ return True
98
+ if (self.name.endswith('s')
99
+ and self.name[:-1] == column.name):
100
+ return True
101
+ return False
102
+
103
+ candidates = [
104
+ column.name for column in self.columns if is_candidate(column)
105
+ ]
106
+
107
+ if primary_key := utils.detect_primary_key(
108
+ table_name=self.name,
109
+ df=self._data,
110
+ candidates=candidates,
111
+ ):
112
+ self.primary_key = primary_key
113
+ logs.append(f"primary key '{primary_key}'")
114
+
115
+ # Try to detect time column if not set:
116
+ if not self.has_time_column():
117
+ candidates = [
118
+ column.name for column in self.columns
119
+ if column.stype == Stype.timestamp
120
+ and column.name != self._end_time_column
121
+ ]
122
+ if time_column := utils.detect_time_column(self._data, candidates):
123
+ self.time_column = time_column
124
+ logs.append(f"time column '{time_column}'")
125
+
126
+ if verbose and len(logs) > 0:
127
+ print(f"Detected {' and '.join(logs)} in table '{self.name}'")
128
+
129
+ return self
130
+
131
+ def _has_source_column(self, name: str) -> bool:
132
+ return name in self._data.columns
133
+
134
+ def _get_source_dtype(self, name: str) -> Dtype:
135
+ return utils.to_dtype(self._data[name])
136
+
137
+ def _get_source_stype(self, name: str, dtype: Dtype) -> Stype:
138
+ return utils.infer_stype(self._data[name], name, dtype)
139
+
140
+ def _infer_primary_key(self, candidates: List[str]) -> Optional[str]:
141
+ return utils.detect_primary_key(
142
+ table_name=self.name,
143
+ df=self._data,
144
+ candidates=candidates,
145
+ )
146
+
147
+ def _infer_time_column(self, candidates: List[str]) -> Optional[str]:
148
+ return utils.detect_time_column(df=self._data, candidates=candidates)
149
+
150
+ def _num_rows(self) -> Optional[int]:
151
+ return len(self._data)
@@ -0,0 +1,23 @@
1
+ from pathlib import Path
2
+ from typing import Any, TypeAlias, Union
3
+
4
+ try:
5
+ import adbc_driver_sqlite.dbapi as adbc
6
+ except ImportError:
7
+ raise ImportError("No module named 'adbc_driver_sqlite'. Please install "
8
+ "Kumo SDK with the 'sqlite' extension via "
9
+ "`pip install kumoai[sqlite]`.")
10
+
11
+ Connection: TypeAlias = adbc.AdbcSqliteConnection
12
+
13
+
14
+ def connect(uri: Union[str, Path, None] = None, **kwargs: Any) -> Connection:
15
+ return adbc.connect(uri, **kwargs)
16
+
17
+
18
+ from .table import SQLiteTable # noqa: E402
19
+
20
+ __all__ = [
21
+ 'Connection',
22
+ 'SQLiteTable',
23
+ ]
@@ -0,0 +1,117 @@
1
+ import re
2
+ from typing import Dict, List, Optional, Sequence
3
+
4
+ import pyarrow as pa
5
+ from kumoapi.typing import Dtype, Stype
6
+ from typing_extensions import Self
7
+
8
+ from kumoai.experimental.rfm import utils
9
+ from kumoai.experimental.rfm.backend.sqlite import Connection
10
+ from kumoai.experimental.rfm.base import Table
11
+
12
+
13
+ class SQLiteTable(Table):
14
+ r"""A table backed by a :class:`sqlite` database.
15
+
16
+ Args:
17
+ connection: The connection to a :class:`sqlite` database.
18
+ name: The name of this table.
19
+ columns: The selected columns of this table.
20
+ primary_key: The name of the primary key of this table, if it exists.
21
+ time_column: The name of the time column of this table, if it exists.
22
+ end_time_column: The name of the end time column of this table, if it
23
+ exists.
24
+ """
25
+ def __init__(
26
+ self,
27
+ connection: Connection,
28
+ name: str,
29
+ columns: Optional[Sequence[str]] = None,
30
+ primary_key: Optional[str] = None,
31
+ time_column: Optional[str] = None,
32
+ end_time_column: Optional[str] = None,
33
+ ) -> None:
34
+
35
+ self._connection = connection
36
+ self._dtype_dict: Dict[str, Dtype] = {}
37
+
38
+ with connection.cursor() as cursor:
39
+ cursor.execute(f"PRAGMA table_info({name})")
40
+ for _, column, dtype, _, _, is_pkey in cursor.fetchall():
41
+ if bool(is_pkey):
42
+ if primary_key is not None and primary_key != column:
43
+ raise ValueError(f"Found duplicate primary key "
44
+ f"definition '{primary_key}' and "
45
+ f"'{column}' in table '{name}'")
46
+ primary_key = column
47
+
48
+ # Determine colun affinity:
49
+ dtype = dtype.strip().upper()
50
+ if re.search('INT', dtype):
51
+ self._dtype_dict[column] = Dtype.int
52
+ elif re.search('TEXT|CHAR|CLOB', dtype):
53
+ self._dtype_dict[column] = Dtype.string
54
+ elif re.search('REAL|FLOA|DOUB', dtype):
55
+ self._dtype_dict[column] = Dtype.float
56
+ else: # NUMERIC affinity.
57
+ self._dtype_dict[column] = Dtype.unsupported
58
+
59
+ if len(self._dtype_dict) > 0:
60
+ column_names = ', '.join(self._dtype_dict.keys())
61
+ cursor.execute(f"SELECT {column_names} FROM {name} "
62
+ f"ORDER BY rowid LIMIT 1000")
63
+ self._sample = cursor.fetch_arrow_table()
64
+
65
+ for column_name in list(self._dtype_dict.keys()):
66
+ if self._dtype_dict[column_name] == Dtype.unsupported:
67
+ dtype = self._sample[column_name].type
68
+ if pa.types.is_integer(dtype):
69
+ self._dtype_dict[column_name] = Dtype.int
70
+ elif pa.types.is_floating(dtype):
71
+ self._dtype_dict[column_name] = Dtype.float
72
+ elif pa.types.is_decimal(dtype):
73
+ self._dtype_dict[column_name] = Dtype.float
74
+ elif pa.types.is_string(dtype):
75
+ self._dtype_dict[column_name] = Dtype.string
76
+ else:
77
+ del self._dtype_dict[column_name]
78
+
79
+ if len(self._dtype_dict) == 0:
80
+ raise RuntimeError(f"Table '{name}' does not exist or does not "
81
+ f"hold any column with a supported data type")
82
+
83
+ super().__init__(
84
+ name=name,
85
+ columns=columns or list(self._dtype_dict.keys()),
86
+ primary_key=primary_key,
87
+ time_column=time_column,
88
+ end_time_column=end_time_column,
89
+ )
90
+
91
+ def infer_metadata(self, verbose: bool = True) -> Self:
92
+ r"""Infers metadata, *i.e.*, primary keys and time columns, in the
93
+ table.
94
+
95
+ Args:
96
+ verbose: Whether to print verbose output.
97
+ """
98
+ return self
99
+
100
+ def _has_source_column(self, name: str) -> bool:
101
+ return name in self._dtype_dict
102
+
103
+ def _get_source_dtype(self, name: str) -> Dtype:
104
+ return self._dtype_dict[name]
105
+
106
+ def _get_source_stype(self, name: str, dtype: Dtype) -> Stype:
107
+ ser = self._sample[name].to_pandas()
108
+ return utils.infer_stype(ser, name, dtype)
109
+
110
+ def _infer_primary_key(self, candidates: List[str]) -> Optional[str]:
111
+ return None # TODO
112
+
113
+ def _infer_time_column(self, candidates: List[str]) -> Optional[str]:
114
+ return None # TODO
115
+
116
+ def _num_rows(self) -> Optional[int]:
117
+ return None
@@ -0,0 +1,7 @@
1
+ from .column import Column
2
+ from .table import Table
3
+
4
+ __all__ = [
5
+ 'Column',
6
+ 'Table',
7
+ ]
@@ -0,0 +1,66 @@
1
+ from dataclasses import dataclass
2
+ from typing import Any
3
+
4
+ from kumoapi.typing import Dtype, Stype
5
+
6
+
7
+ @dataclass(init=False, repr=False, eq=False)
8
+ class Column:
9
+ stype: Stype
10
+
11
+ def __init__(
12
+ self,
13
+ name: str,
14
+ dtype: Dtype,
15
+ stype: Stype,
16
+ is_primary_key: bool = False,
17
+ is_time_column: bool = False,
18
+ is_end_time_column: bool = False,
19
+ ) -> None:
20
+ self._name = name
21
+ self._dtype = Dtype(dtype)
22
+ self._is_primary_key = is_primary_key
23
+ self._is_time_column = is_time_column
24
+ self._is_end_time_column = is_end_time_column
25
+ self.stype = Stype(stype)
26
+
27
+ @property
28
+ def name(self) -> str:
29
+ return self._name
30
+
31
+ @property
32
+ def dtype(self) -> Dtype:
33
+ return self._dtype
34
+
35
+ def __setattr__(self, key: str, val: Any) -> None:
36
+ if key == 'stype':
37
+ if isinstance(val, str):
38
+ val = Stype(val)
39
+ assert isinstance(val, Stype)
40
+ if not val.supports_dtype(self.dtype):
41
+ raise ValueError(f"Column '{self.name}' received an "
42
+ f"incompatible semantic type (got "
43
+ f"dtype='{self.dtype}' and stype='{val}')")
44
+ if self._is_primary_key and val != Stype.ID:
45
+ raise ValueError(f"Primary key '{self.name}' must have 'ID' "
46
+ f"semantic type (got '{val}')")
47
+ if self._is_time_column and val != Stype.timestamp:
48
+ raise ValueError(f"Time column '{self.name}' must have "
49
+ f"'timestamp' semantic type (got '{val}')")
50
+ if self._is_end_time_column and val != Stype.timestamp:
51
+ raise ValueError(f"End time column '{self.name}' must have "
52
+ f"'timestamp' semantic type (got '{val}')")
53
+
54
+ super().__setattr__(key, val)
55
+
56
+ def __hash__(self) -> int:
57
+ return hash((self.name, self.stype, self.dtype))
58
+
59
+ def __eq__(self, other: Any) -> bool:
60
+ if not isinstance(other, Column):
61
+ return False
62
+ return hash(self) == hash(other)
63
+
64
+ def __repr__(self) -> str:
65
+ return (f'{self.__class__.__name__}(name={self.name}, '
66
+ f'stype={self.stype}, dtype={self.dtype})')