kumoai 2.12.0.dev202511031731__cp313-cp313-macosx_11_0_arm64.whl → 2.13.0.dev202512061731__cp313-cp313-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kumoai/__init__.py +18 -9
- kumoai/_version.py +1 -1
- kumoai/client/client.py +9 -13
- kumoai/client/endpoints.py +1 -0
- kumoai/client/rfm.py +35 -7
- kumoai/connector/utils.py +23 -2
- kumoai/experimental/rfm/__init__.py +164 -46
- kumoai/experimental/rfm/backend/__init__.py +0 -0
- kumoai/experimental/rfm/backend/local/__init__.py +42 -0
- kumoai/experimental/rfm/{local_graph_store.py → backend/local/graph_store.py} +20 -30
- kumoai/experimental/rfm/backend/local/sampler.py +131 -0
- kumoai/experimental/rfm/backend/local/table.py +109 -0
- kumoai/experimental/rfm/backend/snow/__init__.py +35 -0
- kumoai/experimental/rfm/backend/snow/table.py +117 -0
- kumoai/experimental/rfm/backend/sqlite/__init__.py +30 -0
- kumoai/experimental/rfm/backend/sqlite/table.py +101 -0
- kumoai/experimental/rfm/base/__init__.py +14 -0
- kumoai/experimental/rfm/base/column.py +66 -0
- kumoai/experimental/rfm/base/sampler.py +287 -0
- kumoai/experimental/rfm/base/source.py +18 -0
- kumoai/experimental/rfm/{local_table.py → base/table.py} +139 -139
- kumoai/experimental/rfm/{local_graph.py → graph.py} +334 -79
- kumoai/experimental/rfm/infer/__init__.py +6 -0
- kumoai/experimental/rfm/infer/dtype.py +79 -0
- kumoai/experimental/rfm/infer/pkey.py +126 -0
- kumoai/experimental/rfm/infer/time_col.py +62 -0
- kumoai/experimental/rfm/local_graph_sampler.py +43 -4
- kumoai/experimental/rfm/local_pquery_driver.py +222 -27
- kumoai/experimental/rfm/pquery/__init__.py +0 -4
- kumoai/experimental/rfm/pquery/pandas_executor.py +34 -8
- kumoai/experimental/rfm/rfm.py +153 -96
- kumoai/experimental/rfm/sagemaker.py +138 -0
- kumoai/spcs.py +1 -3
- kumoai/testing/decorators.py +1 -1
- kumoai/utils/progress_logger.py +10 -4
- {kumoai-2.12.0.dev202511031731.dist-info → kumoai-2.13.0.dev202512061731.dist-info}/METADATA +12 -2
- {kumoai-2.12.0.dev202511031731.dist-info → kumoai-2.13.0.dev202512061731.dist-info}/RECORD +40 -27
- kumoai/experimental/rfm/pquery/backend.py +0 -136
- kumoai/experimental/rfm/pquery/pandas_backend.py +0 -478
- kumoai/experimental/rfm/utils.py +0 -344
- {kumoai-2.12.0.dev202511031731.dist-info → kumoai-2.13.0.dev202512061731.dist-info}/WHEEL +0 -0
- {kumoai-2.12.0.dev202511031731.dist-info → kumoai-2.13.0.dev202512061731.dist-info}/licenses/LICENSE +0 -0
- {kumoai-2.12.0.dev202511031731.dist-info → kumoai-2.13.0.dev202512061731.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,131 @@
|
|
|
1
|
+
from typing import TYPE_CHECKING
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
import pandas as pd
|
|
5
|
+
|
|
6
|
+
from kumoai.experimental.rfm.backend.local import LocalGraphStore
|
|
7
|
+
from kumoai.experimental.rfm.base import EdgeSpec, Sampler, SamplerOutput
|
|
8
|
+
from kumoai.utils import ProgressLogger
|
|
9
|
+
|
|
10
|
+
if TYPE_CHECKING:
|
|
11
|
+
from kumoai.experimental.rfm import Graph
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class LocalSampler(Sampler):
|
|
15
|
+
def __init__(
|
|
16
|
+
self,
|
|
17
|
+
graph: 'Graph',
|
|
18
|
+
verbose: bool | ProgressLogger = True,
|
|
19
|
+
) -> None:
|
|
20
|
+
super().__init__(graph=graph)
|
|
21
|
+
|
|
22
|
+
import kumoai.kumolib as kumolib
|
|
23
|
+
|
|
24
|
+
self._graph_store = LocalGraphStore(graph, verbose)
|
|
25
|
+
self._graph_sampler = kumolib.NeighborSampler(
|
|
26
|
+
list(self.table_stype_dict.keys()),
|
|
27
|
+
self.edge_types,
|
|
28
|
+
{
|
|
29
|
+
'__'.join(edge_type): colptr
|
|
30
|
+
for edge_type, colptr in self._graph_store.colptr_dict.items()
|
|
31
|
+
},
|
|
32
|
+
{
|
|
33
|
+
'__'.join(edge_type): row
|
|
34
|
+
for edge_type, row in self._graph_store.row_dict.items()
|
|
35
|
+
},
|
|
36
|
+
self._graph_store.time_dict,
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
def sample(
|
|
40
|
+
self,
|
|
41
|
+
entity_table_name: str,
|
|
42
|
+
entity_pkey: pd.Series,
|
|
43
|
+
anchor_time: pd.Series,
|
|
44
|
+
column_spec_dict: dict[str, list[str]],
|
|
45
|
+
edge_spec_dict: dict[tuple[str, str, str], list[EdgeSpec]],
|
|
46
|
+
drop_duplicates: bool = False,
|
|
47
|
+
return_edges: bool = False,
|
|
48
|
+
) -> SamplerOutput:
|
|
49
|
+
|
|
50
|
+
if anchor_time.dtype != 'datetime64[ns]':
|
|
51
|
+
anchor_time = anchor_time.astype('datetime64[ns]')
|
|
52
|
+
|
|
53
|
+
num_hops = max([len(specs) for specs in edge_spec_dict.values()] + [0])
|
|
54
|
+
num_neighbors_dict: dict[str, list[int]] = {}
|
|
55
|
+
|
|
56
|
+
for edge_type, specs in edge_spec_dict.items():
|
|
57
|
+
edge_type_str = '__'.join(edge_type)
|
|
58
|
+
num_neighbors_dict[edge_type_str] = [0] * num_hops
|
|
59
|
+
for hop, spec in enumerate(specs):
|
|
60
|
+
# TODO Add support for time-based sampling.
|
|
61
|
+
assert spec.num_neighbors is not None
|
|
62
|
+
num_neighbors_dict[edge_type_str][hop] = spec.num_neighbors
|
|
63
|
+
|
|
64
|
+
(
|
|
65
|
+
row_dict,
|
|
66
|
+
col_dict,
|
|
67
|
+
node_dict,
|
|
68
|
+
batch_dict,
|
|
69
|
+
num_sampled_nodes_dict,
|
|
70
|
+
num_sampled_edges_dict,
|
|
71
|
+
) = self._graph_sampler.sample(
|
|
72
|
+
num_neighbors_dict,
|
|
73
|
+
{},
|
|
74
|
+
entity_table_name,
|
|
75
|
+
self._graph_store.get_node_id(entity_table_name, entity_pkey),
|
|
76
|
+
anchor_time.astype(int).to_numpy() // 1000**3, # to seconds
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
df_dict: dict[str, pd.DataFrame] = {}
|
|
80
|
+
inverse_dict: dict[str, np.ndarray] = {}
|
|
81
|
+
for table_name, node in node_dict.items():
|
|
82
|
+
df = self._graph_store.df_dict[table_name]
|
|
83
|
+
columns = column_spec_dict[table_name]
|
|
84
|
+
if self.end_time_column_dict.get(table_name, None) in columns:
|
|
85
|
+
df = df.iloc[node]
|
|
86
|
+
elif len(columns) > 0 and drop_duplicates:
|
|
87
|
+
# Only store unique rows in `df` above a certain threshold:
|
|
88
|
+
unique_node, inverse = np.unique(node, return_inverse=True)
|
|
89
|
+
if len(node) > 1.05 * len(unique_node):
|
|
90
|
+
df = df.iloc[unique_node]
|
|
91
|
+
inverse_dict[table_name] = inverse
|
|
92
|
+
else:
|
|
93
|
+
df = df.iloc[node]
|
|
94
|
+
else:
|
|
95
|
+
df = df.iloc[node]
|
|
96
|
+
df = df.reset_index(drop=True)
|
|
97
|
+
df = df[columns]
|
|
98
|
+
df_dict[table_name] = df
|
|
99
|
+
|
|
100
|
+
num_sampled_nodes_dict = {
|
|
101
|
+
table_name: num_sampled_nodes.tolist()
|
|
102
|
+
for table_name, num_sampled_nodes in
|
|
103
|
+
num_sampled_nodes_dict.items()
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
if return_edges:
|
|
107
|
+
row_dict = {
|
|
108
|
+
edge_type: row_dict['__'.join(edge_type)]
|
|
109
|
+
for edge_type in edge_spec_dict.keys()
|
|
110
|
+
}
|
|
111
|
+
col_dict = {
|
|
112
|
+
edge_type: col_dict['__'.join(edge_type)]
|
|
113
|
+
for edge_type in edge_spec_dict.keys()
|
|
114
|
+
}
|
|
115
|
+
num_sampled_edges_dict = {
|
|
116
|
+
edge_type:
|
|
117
|
+
num_sampled_edges_dict['__'.join(edge_type)].tolist()
|
|
118
|
+
for edge_type in edge_spec_dict.keys()
|
|
119
|
+
}
|
|
120
|
+
else:
|
|
121
|
+
row_dict = col_dict = num_sampled_edges_dict = None
|
|
122
|
+
|
|
123
|
+
return SamplerOutput(
|
|
124
|
+
df_dict=df_dict,
|
|
125
|
+
inverse_dict=inverse_dict,
|
|
126
|
+
batch_dict=batch_dict,
|
|
127
|
+
num_sampled_nodes_dict=num_sampled_nodes_dict,
|
|
128
|
+
row_dict=row_dict,
|
|
129
|
+
col_dict=col_dict,
|
|
130
|
+
num_sampled_edges_dict=num_sampled_edges_dict,
|
|
131
|
+
)
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
import warnings
|
|
2
|
+
from typing import List, Optional
|
|
3
|
+
|
|
4
|
+
import pandas as pd
|
|
5
|
+
|
|
6
|
+
from kumoai.experimental.rfm.base import SourceColumn, SourceForeignKey, Table
|
|
7
|
+
from kumoai.experimental.rfm.infer import infer_dtype
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class LocalTable(Table):
|
|
11
|
+
r"""A table backed by a :class:`pandas.DataFrame`.
|
|
12
|
+
|
|
13
|
+
A :class:`LocalTable` fully specifies the relevant metadata, *i.e.*
|
|
14
|
+
selected columns, column semantic types, primary keys and time columns.
|
|
15
|
+
:class:`LocalTable` is used to create a :class:`Graph`.
|
|
16
|
+
|
|
17
|
+
.. code-block:: python
|
|
18
|
+
|
|
19
|
+
import pandas as pd
|
|
20
|
+
import kumoai.experimental.rfm as rfm
|
|
21
|
+
|
|
22
|
+
# Load data from a CSV file:
|
|
23
|
+
df = pd.read_csv("data.csv")
|
|
24
|
+
|
|
25
|
+
# Create a table from a `pandas.DataFrame` and infer its metadata ...
|
|
26
|
+
table = rfm.LocalTable(df, name="my_table").infer_metadata()
|
|
27
|
+
|
|
28
|
+
# ... or create a table explicitly:
|
|
29
|
+
table = rfm.LocalTable(
|
|
30
|
+
df=df,
|
|
31
|
+
name="my_table",
|
|
32
|
+
primary_key="id",
|
|
33
|
+
time_column="time",
|
|
34
|
+
end_time_column=None,
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
# Verify metadata:
|
|
38
|
+
table.print_metadata()
|
|
39
|
+
|
|
40
|
+
# Change the semantic type of a column:
|
|
41
|
+
table[column].stype = "text"
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
df: The data frame to create this table from.
|
|
45
|
+
name: The name of this table.
|
|
46
|
+
primary_key: The name of the primary key of this table, if it exists.
|
|
47
|
+
time_column: The name of the time column of this table, if it exists.
|
|
48
|
+
end_time_column: The name of the end time column of this table, if it
|
|
49
|
+
exists.
|
|
50
|
+
"""
|
|
51
|
+
def __init__(
|
|
52
|
+
self,
|
|
53
|
+
df: pd.DataFrame,
|
|
54
|
+
name: str,
|
|
55
|
+
primary_key: Optional[str] = None,
|
|
56
|
+
time_column: Optional[str] = None,
|
|
57
|
+
end_time_column: Optional[str] = None,
|
|
58
|
+
) -> None:
|
|
59
|
+
|
|
60
|
+
if df.empty:
|
|
61
|
+
raise ValueError("Data frame is empty")
|
|
62
|
+
if isinstance(df.columns, pd.MultiIndex):
|
|
63
|
+
raise ValueError("Data frame must not have a multi-index")
|
|
64
|
+
if not df.columns.is_unique:
|
|
65
|
+
raise ValueError("Data frame must have unique column names")
|
|
66
|
+
if any(col == '' for col in df.columns):
|
|
67
|
+
raise ValueError("Data frame must have non-empty column names")
|
|
68
|
+
|
|
69
|
+
self._data = df.copy(deep=False)
|
|
70
|
+
|
|
71
|
+
super().__init__(
|
|
72
|
+
name=name,
|
|
73
|
+
columns=list(df.columns),
|
|
74
|
+
primary_key=primary_key,
|
|
75
|
+
time_column=time_column,
|
|
76
|
+
end_time_column=end_time_column,
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
def _get_source_columns(self) -> List[SourceColumn]:
|
|
80
|
+
source_columns: List[SourceColumn] = []
|
|
81
|
+
for column in self._data.columns:
|
|
82
|
+
ser = self._data[column]
|
|
83
|
+
try:
|
|
84
|
+
dtype = infer_dtype(ser)
|
|
85
|
+
except Exception:
|
|
86
|
+
warnings.warn(f"Data type inference for column '{column}' in "
|
|
87
|
+
f"table '{self.name}' failed. Consider changing "
|
|
88
|
+
f"the data type of the column to use it within "
|
|
89
|
+
f"this table.")
|
|
90
|
+
continue
|
|
91
|
+
|
|
92
|
+
source_column = SourceColumn(
|
|
93
|
+
name=column,
|
|
94
|
+
dtype=dtype,
|
|
95
|
+
is_primary_key=False,
|
|
96
|
+
is_unique_key=False,
|
|
97
|
+
)
|
|
98
|
+
source_columns.append(source_column)
|
|
99
|
+
|
|
100
|
+
return source_columns
|
|
101
|
+
|
|
102
|
+
def _get_source_foreign_keys(self) -> List[SourceForeignKey]:
|
|
103
|
+
return []
|
|
104
|
+
|
|
105
|
+
def _get_sample_df(self) -> pd.DataFrame:
|
|
106
|
+
return self._data
|
|
107
|
+
|
|
108
|
+
def _get_num_rows(self) -> Optional[int]:
|
|
109
|
+
return len(self._data)
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
from typing import Any, TypeAlias
|
|
2
|
+
|
|
3
|
+
try:
|
|
4
|
+
import snowflake.connector
|
|
5
|
+
except ImportError:
|
|
6
|
+
raise ImportError("No module named 'snowflake'. Please install Kumo SDK "
|
|
7
|
+
"with the 'snowflake' extension via "
|
|
8
|
+
"`pip install kumoai[snowflake]`.")
|
|
9
|
+
|
|
10
|
+
Connection: TypeAlias = snowflake.connector.SnowflakeConnection
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def connect(**kwargs: Any) -> Connection:
|
|
14
|
+
r"""Opens a connection to a :class:`snowflake` database.
|
|
15
|
+
|
|
16
|
+
If available, will return a connection to the active session.
|
|
17
|
+
|
|
18
|
+
kwargs: Connection arguments, following the :class:`snowflake` protocol.
|
|
19
|
+
"""
|
|
20
|
+
try:
|
|
21
|
+
from snowflake.snowpark.context import get_active_session
|
|
22
|
+
return get_active_session().connection
|
|
23
|
+
except Exception:
|
|
24
|
+
pass
|
|
25
|
+
|
|
26
|
+
return snowflake.connector.connect(**kwargs)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
from .table import SnowTable # noqa: E402
|
|
30
|
+
|
|
31
|
+
__all__ = [
|
|
32
|
+
'connect',
|
|
33
|
+
'Connection',
|
|
34
|
+
'SnowTable',
|
|
35
|
+
]
|
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
import re
|
|
2
|
+
from typing import List, Optional, Sequence
|
|
3
|
+
|
|
4
|
+
import pandas as pd
|
|
5
|
+
from kumoapi.typing import Dtype
|
|
6
|
+
|
|
7
|
+
from kumoai.experimental.rfm.backend.snow import Connection
|
|
8
|
+
from kumoai.experimental.rfm.base import SourceColumn, SourceForeignKey, Table
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class SnowTable(Table):
|
|
12
|
+
r"""A table backed by a :class:`sqlite` database.
|
|
13
|
+
|
|
14
|
+
Args:
|
|
15
|
+
connection: The connection to a :class:`snowflake` database.
|
|
16
|
+
name: The name of this table.
|
|
17
|
+
database: The database.
|
|
18
|
+
schema: The schema.
|
|
19
|
+
columns: The selected columns of this table.
|
|
20
|
+
primary_key: The name of the primary key of this table, if it exists.
|
|
21
|
+
time_column: The name of the time column of this table, if it exists.
|
|
22
|
+
end_time_column: The name of the end time column of this table, if it
|
|
23
|
+
exists.
|
|
24
|
+
"""
|
|
25
|
+
def __init__(
|
|
26
|
+
self,
|
|
27
|
+
connection: Connection,
|
|
28
|
+
name: str,
|
|
29
|
+
database: str | None = None,
|
|
30
|
+
schema: str | None = None,
|
|
31
|
+
columns: Optional[Sequence[str]] = None,
|
|
32
|
+
primary_key: Optional[str] = None,
|
|
33
|
+
time_column: Optional[str] = None,
|
|
34
|
+
end_time_column: Optional[str] = None,
|
|
35
|
+
) -> None:
|
|
36
|
+
|
|
37
|
+
if database is not None and schema is None:
|
|
38
|
+
raise ValueError(f"Missing 'schema' for table '{name}' in "
|
|
39
|
+
f"database '{database}'")
|
|
40
|
+
|
|
41
|
+
self._connection = connection
|
|
42
|
+
self._database = database
|
|
43
|
+
self._schema = schema
|
|
44
|
+
|
|
45
|
+
super().__init__(
|
|
46
|
+
name=name,
|
|
47
|
+
columns=columns,
|
|
48
|
+
primary_key=primary_key,
|
|
49
|
+
time_column=time_column,
|
|
50
|
+
end_time_column=end_time_column,
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
@property
|
|
54
|
+
def fqn_name(self) -> str:
|
|
55
|
+
names: List[str] = []
|
|
56
|
+
if self._database is not None:
|
|
57
|
+
assert self._schema is not None
|
|
58
|
+
names.extend([self._database, self._schema])
|
|
59
|
+
elif self._schema is not None:
|
|
60
|
+
names.append(self._schema)
|
|
61
|
+
names.append(self._name)
|
|
62
|
+
return '.'.join(names)
|
|
63
|
+
|
|
64
|
+
def _get_source_columns(self) -> List[SourceColumn]:
|
|
65
|
+
source_columns: List[SourceColumn] = []
|
|
66
|
+
with self._connection.cursor() as cursor:
|
|
67
|
+
try:
|
|
68
|
+
cursor.execute(f"DESCRIBE TABLE {self.fqn_name}")
|
|
69
|
+
except Exception as e:
|
|
70
|
+
raise ValueError(
|
|
71
|
+
f"Table '{self.fqn_name}' does not exist") from e
|
|
72
|
+
|
|
73
|
+
for row in cursor.fetchall():
|
|
74
|
+
column, type, _, _, _, is_pkey, is_unique = row[:7]
|
|
75
|
+
|
|
76
|
+
type = type.strip().upper()
|
|
77
|
+
if type.startswith('NUMBER'):
|
|
78
|
+
dtype = Dtype.int
|
|
79
|
+
elif type.startswith('VARCHAR'):
|
|
80
|
+
dtype = Dtype.string
|
|
81
|
+
elif type == 'FLOAT':
|
|
82
|
+
dtype = Dtype.float
|
|
83
|
+
elif type == 'BOOLEAN':
|
|
84
|
+
dtype = Dtype.bool
|
|
85
|
+
elif re.search('DATE|TIMESTAMP', type):
|
|
86
|
+
dtype = Dtype.date
|
|
87
|
+
else:
|
|
88
|
+
continue
|
|
89
|
+
|
|
90
|
+
source_column = SourceColumn(
|
|
91
|
+
name=column,
|
|
92
|
+
dtype=dtype,
|
|
93
|
+
is_primary_key=is_pkey.strip().upper() == 'Y',
|
|
94
|
+
is_unique_key=is_unique.strip().upper() == 'Y',
|
|
95
|
+
)
|
|
96
|
+
source_columns.append(source_column)
|
|
97
|
+
|
|
98
|
+
return source_columns
|
|
99
|
+
|
|
100
|
+
def _get_source_foreign_keys(self) -> List[SourceForeignKey]:
|
|
101
|
+
source_fkeys: List[SourceForeignKey] = []
|
|
102
|
+
with self._connection.cursor() as cursor:
|
|
103
|
+
cursor.execute(f"SHOW IMPORTED KEYS IN TABLE {self.fqn_name}")
|
|
104
|
+
for row in cursor.fetchall():
|
|
105
|
+
_, _, _, dst_table, pkey, _, _, _, fkey = row[:9]
|
|
106
|
+
source_fkeys.append(SourceForeignKey(fkey, dst_table, pkey))
|
|
107
|
+
return source_fkeys
|
|
108
|
+
|
|
109
|
+
def _get_sample_df(self) -> pd.DataFrame:
|
|
110
|
+
with self._connection.cursor() as cursor:
|
|
111
|
+
columns = ', '.join(self._source_column_dict.keys())
|
|
112
|
+
cursor.execute(f"SELECT {columns} FROM {self.fqn_name} LIMIT 1000")
|
|
113
|
+
table = cursor.fetch_arrow_all()
|
|
114
|
+
return table.to_pandas(types_mapper=pd.ArrowDtype)
|
|
115
|
+
|
|
116
|
+
def _get_num_rows(self) -> Optional[int]:
|
|
117
|
+
return None
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
from typing import Any, TypeAlias, Union
|
|
3
|
+
|
|
4
|
+
try:
|
|
5
|
+
import adbc_driver_sqlite.dbapi as adbc
|
|
6
|
+
except ImportError:
|
|
7
|
+
raise ImportError("No module named 'adbc_driver_sqlite'. Please install "
|
|
8
|
+
"Kumo SDK with the 'sqlite' extension via "
|
|
9
|
+
"`pip install kumoai[sqlite]`.")
|
|
10
|
+
|
|
11
|
+
Connection: TypeAlias = adbc.AdbcSqliteConnection
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def connect(uri: Union[str, Path, None] = None, **kwargs: Any) -> Connection:
|
|
15
|
+
r"""Opens a connection to a :class:`sqlite` database.
|
|
16
|
+
|
|
17
|
+
uri: The path to the database file to be opened.
|
|
18
|
+
kwargs: Additional connection arguments, following the
|
|
19
|
+
:class:`adbc_driver_sqlite` protocol.
|
|
20
|
+
"""
|
|
21
|
+
return adbc.connect(uri, **kwargs)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
from .table import SQLiteTable # noqa: E402
|
|
25
|
+
|
|
26
|
+
__all__ = [
|
|
27
|
+
'connect',
|
|
28
|
+
'Connection',
|
|
29
|
+
'SQLiteTable',
|
|
30
|
+
]
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
import re
|
|
2
|
+
import warnings
|
|
3
|
+
from typing import List, Optional, Sequence
|
|
4
|
+
|
|
5
|
+
import pandas as pd
|
|
6
|
+
from kumoapi.typing import Dtype
|
|
7
|
+
|
|
8
|
+
from kumoai.experimental.rfm.backend.sqlite import Connection
|
|
9
|
+
from kumoai.experimental.rfm.base import SourceColumn, SourceForeignKey, Table
|
|
10
|
+
from kumoai.experimental.rfm.infer import infer_dtype
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class SQLiteTable(Table):
|
|
14
|
+
r"""A table backed by a :class:`sqlite` database.
|
|
15
|
+
|
|
16
|
+
Args:
|
|
17
|
+
connection: The connection to a :class:`sqlite` database.
|
|
18
|
+
name: The name of this table.
|
|
19
|
+
columns: The selected columns of this table.
|
|
20
|
+
primary_key: The name of the primary key of this table, if it exists.
|
|
21
|
+
time_column: The name of the time column of this table, if it exists.
|
|
22
|
+
end_time_column: The name of the end time column of this table, if it
|
|
23
|
+
exists.
|
|
24
|
+
"""
|
|
25
|
+
def __init__(
|
|
26
|
+
self,
|
|
27
|
+
connection: Connection,
|
|
28
|
+
name: str,
|
|
29
|
+
columns: Optional[Sequence[str]] = None,
|
|
30
|
+
primary_key: Optional[str] = None,
|
|
31
|
+
time_column: Optional[str] = None,
|
|
32
|
+
end_time_column: Optional[str] = None,
|
|
33
|
+
) -> None:
|
|
34
|
+
|
|
35
|
+
self._connection = connection
|
|
36
|
+
|
|
37
|
+
super().__init__(
|
|
38
|
+
name=name,
|
|
39
|
+
columns=columns,
|
|
40
|
+
primary_key=primary_key,
|
|
41
|
+
time_column=time_column,
|
|
42
|
+
end_time_column=end_time_column,
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
def _get_source_columns(self) -> List[SourceColumn]:
|
|
46
|
+
source_columns: List[SourceColumn] = []
|
|
47
|
+
with self._connection.cursor() as cursor:
|
|
48
|
+
cursor.execute(f"PRAGMA table_info({self.name})")
|
|
49
|
+
rows = cursor.fetchall()
|
|
50
|
+
|
|
51
|
+
if len(rows) == 0:
|
|
52
|
+
raise ValueError(f"Table '{self.name}' does not exist")
|
|
53
|
+
|
|
54
|
+
for _, column, type, _, _, is_pkey in rows:
|
|
55
|
+
# Determine column affinity:
|
|
56
|
+
type = type.strip().upper()
|
|
57
|
+
if re.search('INT', type):
|
|
58
|
+
dtype = Dtype.int
|
|
59
|
+
elif re.search('TEXT|CHAR|CLOB', type):
|
|
60
|
+
dtype = Dtype.string
|
|
61
|
+
elif re.search('REAL|FLOA|DOUB', type):
|
|
62
|
+
dtype = Dtype.float
|
|
63
|
+
else: # NUMERIC affinity.
|
|
64
|
+
ser = self._sample_df[column]
|
|
65
|
+
try:
|
|
66
|
+
dtype = infer_dtype(ser)
|
|
67
|
+
except Exception:
|
|
68
|
+
warnings.warn(
|
|
69
|
+
f"Data type inference for column '{column}' in "
|
|
70
|
+
f"table '{self.name}' failed. Consider changing "
|
|
71
|
+
f"the data type of the column to use it within "
|
|
72
|
+
f"this table.")
|
|
73
|
+
continue
|
|
74
|
+
|
|
75
|
+
source_column = SourceColumn(
|
|
76
|
+
name=column,
|
|
77
|
+
dtype=dtype,
|
|
78
|
+
is_primary_key=bool(is_pkey),
|
|
79
|
+
is_unique_key=False,
|
|
80
|
+
)
|
|
81
|
+
source_columns.append(source_column)
|
|
82
|
+
|
|
83
|
+
return source_columns
|
|
84
|
+
|
|
85
|
+
def _get_source_foreign_keys(self) -> List[SourceForeignKey]:
|
|
86
|
+
source_fkeys: List[SourceForeignKey] = []
|
|
87
|
+
with self._connection.cursor() as cursor:
|
|
88
|
+
cursor.execute(f"PRAGMA foreign_key_list({self.name})")
|
|
89
|
+
for _, _, dst_table, fkey, pkey, _, _, _ in cursor.fetchall():
|
|
90
|
+
source_fkeys.append(SourceForeignKey(fkey, dst_table, pkey))
|
|
91
|
+
return source_fkeys
|
|
92
|
+
|
|
93
|
+
def _get_sample_df(self) -> pd.DataFrame:
|
|
94
|
+
with self._connection.cursor() as cursor:
|
|
95
|
+
cursor.execute(f"SELECT * FROM {self.name} "
|
|
96
|
+
f"ORDER BY rowid LIMIT 1000")
|
|
97
|
+
table = cursor.fetch_arrow_table()
|
|
98
|
+
return table.to_pandas(types_mapper=pd.ArrowDtype)
|
|
99
|
+
|
|
100
|
+
def _get_num_rows(self) -> Optional[int]:
|
|
101
|
+
return None
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
from .source import SourceColumn, SourceForeignKey
|
|
2
|
+
from .column import Column
|
|
3
|
+
from .table import Table
|
|
4
|
+
from .sampler import EdgeSpec, SamplerOutput, Sampler
|
|
5
|
+
|
|
6
|
+
__all__ = [
|
|
7
|
+
'SourceColumn',
|
|
8
|
+
'SourceForeignKey',
|
|
9
|
+
'Column',
|
|
10
|
+
'Table',
|
|
11
|
+
'EdgeSpec',
|
|
12
|
+
'SamplerOutput',
|
|
13
|
+
'Sampler',
|
|
14
|
+
]
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from typing import Any
|
|
3
|
+
|
|
4
|
+
from kumoapi.typing import Dtype, Stype
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
@dataclass(init=False, repr=False, eq=False)
|
|
8
|
+
class Column:
|
|
9
|
+
stype: Stype
|
|
10
|
+
|
|
11
|
+
def __init__(
|
|
12
|
+
self,
|
|
13
|
+
name: str,
|
|
14
|
+
dtype: Dtype,
|
|
15
|
+
stype: Stype,
|
|
16
|
+
is_primary_key: bool = False,
|
|
17
|
+
is_time_column: bool = False,
|
|
18
|
+
is_end_time_column: bool = False,
|
|
19
|
+
) -> None:
|
|
20
|
+
self._name = name
|
|
21
|
+
self._dtype = Dtype(dtype)
|
|
22
|
+
self._is_primary_key = is_primary_key
|
|
23
|
+
self._is_time_column = is_time_column
|
|
24
|
+
self._is_end_time_column = is_end_time_column
|
|
25
|
+
self.stype = Stype(stype)
|
|
26
|
+
|
|
27
|
+
@property
|
|
28
|
+
def name(self) -> str:
|
|
29
|
+
return self._name
|
|
30
|
+
|
|
31
|
+
@property
|
|
32
|
+
def dtype(self) -> Dtype:
|
|
33
|
+
return self._dtype
|
|
34
|
+
|
|
35
|
+
def __setattr__(self, key: str, val: Any) -> None:
|
|
36
|
+
if key == 'stype':
|
|
37
|
+
if isinstance(val, str):
|
|
38
|
+
val = Stype(val)
|
|
39
|
+
assert isinstance(val, Stype)
|
|
40
|
+
if not val.supports_dtype(self.dtype):
|
|
41
|
+
raise ValueError(f"Column '{self.name}' received an "
|
|
42
|
+
f"incompatible semantic type (got "
|
|
43
|
+
f"dtype='{self.dtype}' and stype='{val}')")
|
|
44
|
+
if self._is_primary_key and val != Stype.ID:
|
|
45
|
+
raise ValueError(f"Primary key '{self.name}' must have 'ID' "
|
|
46
|
+
f"semantic type (got '{val}')")
|
|
47
|
+
if self._is_time_column and val != Stype.timestamp:
|
|
48
|
+
raise ValueError(f"Time column '{self.name}' must have "
|
|
49
|
+
f"'timestamp' semantic type (got '{val}')")
|
|
50
|
+
if self._is_end_time_column and val != Stype.timestamp:
|
|
51
|
+
raise ValueError(f"End time column '{self.name}' must have "
|
|
52
|
+
f"'timestamp' semantic type (got '{val}')")
|
|
53
|
+
|
|
54
|
+
super().__setattr__(key, val)
|
|
55
|
+
|
|
56
|
+
def __hash__(self) -> int:
|
|
57
|
+
return hash((self.name, self.stype, self.dtype))
|
|
58
|
+
|
|
59
|
+
def __eq__(self, other: Any) -> bool:
|
|
60
|
+
if not isinstance(other, Column):
|
|
61
|
+
return False
|
|
62
|
+
return hash(self) == hash(other)
|
|
63
|
+
|
|
64
|
+
def __repr__(self) -> str:
|
|
65
|
+
return (f'{self.__class__.__name__}(name={self.name}, '
|
|
66
|
+
f'stype={self.stype}, dtype={self.dtype})')
|