kumoai 2.13.0.dev202511261731__cp313-cp313-macosx_11_0_arm64.whl → 2.13.0.dev202512061731__cp313-cp313-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. kumoai/__init__.py +12 -0
  2. kumoai/_version.py +1 -1
  3. kumoai/connector/utils.py +23 -2
  4. kumoai/experimental/rfm/__init__.py +20 -45
  5. kumoai/experimental/rfm/backend/__init__.py +0 -0
  6. kumoai/experimental/rfm/backend/local/__init__.py +42 -0
  7. kumoai/experimental/rfm/{local_graph_store.py → backend/local/graph_store.py} +20 -30
  8. kumoai/experimental/rfm/backend/local/sampler.py +131 -0
  9. kumoai/experimental/rfm/backend/local/table.py +109 -0
  10. kumoai/experimental/rfm/backend/snow/__init__.py +35 -0
  11. kumoai/experimental/rfm/backend/snow/table.py +117 -0
  12. kumoai/experimental/rfm/backend/sqlite/__init__.py +30 -0
  13. kumoai/experimental/rfm/backend/sqlite/table.py +101 -0
  14. kumoai/experimental/rfm/base/__init__.py +14 -0
  15. kumoai/experimental/rfm/base/column.py +66 -0
  16. kumoai/experimental/rfm/base/sampler.py +287 -0
  17. kumoai/experimental/rfm/base/source.py +18 -0
  18. kumoai/experimental/rfm/{local_table.py → base/table.py} +139 -139
  19. kumoai/experimental/rfm/{local_graph.py → graph.py} +334 -79
  20. kumoai/experimental/rfm/infer/__init__.py +6 -0
  21. kumoai/experimental/rfm/infer/dtype.py +79 -0
  22. kumoai/experimental/rfm/infer/pkey.py +126 -0
  23. kumoai/experimental/rfm/infer/time_col.py +62 -0
  24. kumoai/experimental/rfm/local_graph_sampler.py +43 -2
  25. kumoai/experimental/rfm/local_pquery_driver.py +1 -1
  26. kumoai/experimental/rfm/rfm.py +7 -17
  27. kumoai/experimental/rfm/sagemaker.py +11 -3
  28. kumoai/testing/decorators.py +1 -1
  29. {kumoai-2.13.0.dev202511261731.dist-info → kumoai-2.13.0.dev202512061731.dist-info}/METADATA +9 -8
  30. {kumoai-2.13.0.dev202511261731.dist-info → kumoai-2.13.0.dev202512061731.dist-info}/RECORD +33 -19
  31. kumoai/experimental/rfm/utils.py +0 -344
  32. {kumoai-2.13.0.dev202511261731.dist-info → kumoai-2.13.0.dev202512061731.dist-info}/WHEEL +0 -0
  33. {kumoai-2.13.0.dev202511261731.dist-info → kumoai-2.13.0.dev202512061731.dist-info}/licenses/LICENSE +0 -0
  34. {kumoai-2.13.0.dev202511261731.dist-info → kumoai-2.13.0.dev202512061731.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,117 @@
1
+ import re
2
+ from typing import List, Optional, Sequence
3
+
4
+ import pandas as pd
5
+ from kumoapi.typing import Dtype
6
+
7
+ from kumoai.experimental.rfm.backend.snow import Connection
8
+ from kumoai.experimental.rfm.base import SourceColumn, SourceForeignKey, Table
9
+
10
+
11
+ class SnowTable(Table):
12
+ r"""A table backed by a :class:`sqlite` database.
13
+
14
+ Args:
15
+ connection: The connection to a :class:`snowflake` database.
16
+ name: The name of this table.
17
+ database: The database.
18
+ schema: The schema.
19
+ columns: The selected columns of this table.
20
+ primary_key: The name of the primary key of this table, if it exists.
21
+ time_column: The name of the time column of this table, if it exists.
22
+ end_time_column: The name of the end time column of this table, if it
23
+ exists.
24
+ """
25
+ def __init__(
26
+ self,
27
+ connection: Connection,
28
+ name: str,
29
+ database: str | None = None,
30
+ schema: str | None = None,
31
+ columns: Optional[Sequence[str]] = None,
32
+ primary_key: Optional[str] = None,
33
+ time_column: Optional[str] = None,
34
+ end_time_column: Optional[str] = None,
35
+ ) -> None:
36
+
37
+ if database is not None and schema is None:
38
+ raise ValueError(f"Missing 'schema' for table '{name}' in "
39
+ f"database '{database}'")
40
+
41
+ self._connection = connection
42
+ self._database = database
43
+ self._schema = schema
44
+
45
+ super().__init__(
46
+ name=name,
47
+ columns=columns,
48
+ primary_key=primary_key,
49
+ time_column=time_column,
50
+ end_time_column=end_time_column,
51
+ )
52
+
53
+ @property
54
+ def fqn_name(self) -> str:
55
+ names: List[str] = []
56
+ if self._database is not None:
57
+ assert self._schema is not None
58
+ names.extend([self._database, self._schema])
59
+ elif self._schema is not None:
60
+ names.append(self._schema)
61
+ names.append(self._name)
62
+ return '.'.join(names)
63
+
64
+ def _get_source_columns(self) -> List[SourceColumn]:
65
+ source_columns: List[SourceColumn] = []
66
+ with self._connection.cursor() as cursor:
67
+ try:
68
+ cursor.execute(f"DESCRIBE TABLE {self.fqn_name}")
69
+ except Exception as e:
70
+ raise ValueError(
71
+ f"Table '{self.fqn_name}' does not exist") from e
72
+
73
+ for row in cursor.fetchall():
74
+ column, type, _, _, _, is_pkey, is_unique = row[:7]
75
+
76
+ type = type.strip().upper()
77
+ if type.startswith('NUMBER'):
78
+ dtype = Dtype.int
79
+ elif type.startswith('VARCHAR'):
80
+ dtype = Dtype.string
81
+ elif type == 'FLOAT':
82
+ dtype = Dtype.float
83
+ elif type == 'BOOLEAN':
84
+ dtype = Dtype.bool
85
+ elif re.search('DATE|TIMESTAMP', type):
86
+ dtype = Dtype.date
87
+ else:
88
+ continue
89
+
90
+ source_column = SourceColumn(
91
+ name=column,
92
+ dtype=dtype,
93
+ is_primary_key=is_pkey.strip().upper() == 'Y',
94
+ is_unique_key=is_unique.strip().upper() == 'Y',
95
+ )
96
+ source_columns.append(source_column)
97
+
98
+ return source_columns
99
+
100
+ def _get_source_foreign_keys(self) -> List[SourceForeignKey]:
101
+ source_fkeys: List[SourceForeignKey] = []
102
+ with self._connection.cursor() as cursor:
103
+ cursor.execute(f"SHOW IMPORTED KEYS IN TABLE {self.fqn_name}")
104
+ for row in cursor.fetchall():
105
+ _, _, _, dst_table, pkey, _, _, _, fkey = row[:9]
106
+ source_fkeys.append(SourceForeignKey(fkey, dst_table, pkey))
107
+ return source_fkeys
108
+
109
+ def _get_sample_df(self) -> pd.DataFrame:
110
+ with self._connection.cursor() as cursor:
111
+ columns = ', '.join(self._source_column_dict.keys())
112
+ cursor.execute(f"SELECT {columns} FROM {self.fqn_name} LIMIT 1000")
113
+ table = cursor.fetch_arrow_all()
114
+ return table.to_pandas(types_mapper=pd.ArrowDtype)
115
+
116
+ def _get_num_rows(self) -> Optional[int]:
117
+ return None
@@ -0,0 +1,30 @@
1
+ from pathlib import Path
2
+ from typing import Any, TypeAlias, Union
3
+
4
+ try:
5
+ import adbc_driver_sqlite.dbapi as adbc
6
+ except ImportError:
7
+ raise ImportError("No module named 'adbc_driver_sqlite'. Please install "
8
+ "Kumo SDK with the 'sqlite' extension via "
9
+ "`pip install kumoai[sqlite]`.")
10
+
11
+ Connection: TypeAlias = adbc.AdbcSqliteConnection
12
+
13
+
14
+ def connect(uri: Union[str, Path, None] = None, **kwargs: Any) -> Connection:
15
+ r"""Opens a connection to a :class:`sqlite` database.
16
+
17
+ uri: The path to the database file to be opened.
18
+ kwargs: Additional connection arguments, following the
19
+ :class:`adbc_driver_sqlite` protocol.
20
+ """
21
+ return adbc.connect(uri, **kwargs)
22
+
23
+
24
+ from .table import SQLiteTable # noqa: E402
25
+
26
+ __all__ = [
27
+ 'connect',
28
+ 'Connection',
29
+ 'SQLiteTable',
30
+ ]
@@ -0,0 +1,101 @@
1
+ import re
2
+ import warnings
3
+ from typing import List, Optional, Sequence
4
+
5
+ import pandas as pd
6
+ from kumoapi.typing import Dtype
7
+
8
+ from kumoai.experimental.rfm.backend.sqlite import Connection
9
+ from kumoai.experimental.rfm.base import SourceColumn, SourceForeignKey, Table
10
+ from kumoai.experimental.rfm.infer import infer_dtype
11
+
12
+
13
+ class SQLiteTable(Table):
14
+ r"""A table backed by a :class:`sqlite` database.
15
+
16
+ Args:
17
+ connection: The connection to a :class:`sqlite` database.
18
+ name: The name of this table.
19
+ columns: The selected columns of this table.
20
+ primary_key: The name of the primary key of this table, if it exists.
21
+ time_column: The name of the time column of this table, if it exists.
22
+ end_time_column: The name of the end time column of this table, if it
23
+ exists.
24
+ """
25
+ def __init__(
26
+ self,
27
+ connection: Connection,
28
+ name: str,
29
+ columns: Optional[Sequence[str]] = None,
30
+ primary_key: Optional[str] = None,
31
+ time_column: Optional[str] = None,
32
+ end_time_column: Optional[str] = None,
33
+ ) -> None:
34
+
35
+ self._connection = connection
36
+
37
+ super().__init__(
38
+ name=name,
39
+ columns=columns,
40
+ primary_key=primary_key,
41
+ time_column=time_column,
42
+ end_time_column=end_time_column,
43
+ )
44
+
45
+ def _get_source_columns(self) -> List[SourceColumn]:
46
+ source_columns: List[SourceColumn] = []
47
+ with self._connection.cursor() as cursor:
48
+ cursor.execute(f"PRAGMA table_info({self.name})")
49
+ rows = cursor.fetchall()
50
+
51
+ if len(rows) == 0:
52
+ raise ValueError(f"Table '{self.name}' does not exist")
53
+
54
+ for _, column, type, _, _, is_pkey in rows:
55
+ # Determine column affinity:
56
+ type = type.strip().upper()
57
+ if re.search('INT', type):
58
+ dtype = Dtype.int
59
+ elif re.search('TEXT|CHAR|CLOB', type):
60
+ dtype = Dtype.string
61
+ elif re.search('REAL|FLOA|DOUB', type):
62
+ dtype = Dtype.float
63
+ else: # NUMERIC affinity.
64
+ ser = self._sample_df[column]
65
+ try:
66
+ dtype = infer_dtype(ser)
67
+ except Exception:
68
+ warnings.warn(
69
+ f"Data type inference for column '{column}' in "
70
+ f"table '{self.name}' failed. Consider changing "
71
+ f"the data type of the column to use it within "
72
+ f"this table.")
73
+ continue
74
+
75
+ source_column = SourceColumn(
76
+ name=column,
77
+ dtype=dtype,
78
+ is_primary_key=bool(is_pkey),
79
+ is_unique_key=False,
80
+ )
81
+ source_columns.append(source_column)
82
+
83
+ return source_columns
84
+
85
+ def _get_source_foreign_keys(self) -> List[SourceForeignKey]:
86
+ source_fkeys: List[SourceForeignKey] = []
87
+ with self._connection.cursor() as cursor:
88
+ cursor.execute(f"PRAGMA foreign_key_list({self.name})")
89
+ for _, _, dst_table, fkey, pkey, _, _, _ in cursor.fetchall():
90
+ source_fkeys.append(SourceForeignKey(fkey, dst_table, pkey))
91
+ return source_fkeys
92
+
93
+ def _get_sample_df(self) -> pd.DataFrame:
94
+ with self._connection.cursor() as cursor:
95
+ cursor.execute(f"SELECT * FROM {self.name} "
96
+ f"ORDER BY rowid LIMIT 1000")
97
+ table = cursor.fetch_arrow_table()
98
+ return table.to_pandas(types_mapper=pd.ArrowDtype)
99
+
100
+ def _get_num_rows(self) -> Optional[int]:
101
+ return None
@@ -0,0 +1,14 @@
1
+ from .source import SourceColumn, SourceForeignKey
2
+ from .column import Column
3
+ from .table import Table
4
+ from .sampler import EdgeSpec, SamplerOutput, Sampler
5
+
6
+ __all__ = [
7
+ 'SourceColumn',
8
+ 'SourceForeignKey',
9
+ 'Column',
10
+ 'Table',
11
+ 'EdgeSpec',
12
+ 'SamplerOutput',
13
+ 'Sampler',
14
+ ]
@@ -0,0 +1,66 @@
1
+ from dataclasses import dataclass
2
+ from typing import Any
3
+
4
+ from kumoapi.typing import Dtype, Stype
5
+
6
+
7
+ @dataclass(init=False, repr=False, eq=False)
8
+ class Column:
9
+ stype: Stype
10
+
11
+ def __init__(
12
+ self,
13
+ name: str,
14
+ dtype: Dtype,
15
+ stype: Stype,
16
+ is_primary_key: bool = False,
17
+ is_time_column: bool = False,
18
+ is_end_time_column: bool = False,
19
+ ) -> None:
20
+ self._name = name
21
+ self._dtype = Dtype(dtype)
22
+ self._is_primary_key = is_primary_key
23
+ self._is_time_column = is_time_column
24
+ self._is_end_time_column = is_end_time_column
25
+ self.stype = Stype(stype)
26
+
27
+ @property
28
+ def name(self) -> str:
29
+ return self._name
30
+
31
+ @property
32
+ def dtype(self) -> Dtype:
33
+ return self._dtype
34
+
35
+ def __setattr__(self, key: str, val: Any) -> None:
36
+ if key == 'stype':
37
+ if isinstance(val, str):
38
+ val = Stype(val)
39
+ assert isinstance(val, Stype)
40
+ if not val.supports_dtype(self.dtype):
41
+ raise ValueError(f"Column '{self.name}' received an "
42
+ f"incompatible semantic type (got "
43
+ f"dtype='{self.dtype}' and stype='{val}')")
44
+ if self._is_primary_key and val != Stype.ID:
45
+ raise ValueError(f"Primary key '{self.name}' must have 'ID' "
46
+ f"semantic type (got '{val}')")
47
+ if self._is_time_column and val != Stype.timestamp:
48
+ raise ValueError(f"Time column '{self.name}' must have "
49
+ f"'timestamp' semantic type (got '{val}')")
50
+ if self._is_end_time_column and val != Stype.timestamp:
51
+ raise ValueError(f"End time column '{self.name}' must have "
52
+ f"'timestamp' semantic type (got '{val}')")
53
+
54
+ super().__setattr__(key, val)
55
+
56
+ def __hash__(self) -> int:
57
+ return hash((self.name, self.stype, self.dtype))
58
+
59
+ def __eq__(self, other: Any) -> bool:
60
+ if not isinstance(other, Column):
61
+ return False
62
+ return hash(self) == hash(other)
63
+
64
+ def __repr__(self) -> str:
65
+ return (f'{self.__class__.__name__}(name={self.name}, '
66
+ f'stype={self.stype}, dtype={self.dtype})')
@@ -0,0 +1,287 @@
1
+ import copy
2
+ import re
3
+ from abc import ABC, abstractmethod
4
+ from dataclasses import dataclass
5
+ from typing import TYPE_CHECKING
6
+
7
+ import numpy as np
8
+ import pandas as pd
9
+ from kumoapi.rfm.context import EdgeLayout, Link, Subgraph, Table
10
+ from kumoapi.typing import Stype
11
+
12
+ if TYPE_CHECKING:
13
+ from kumoai.experimental.rfm import Graph
14
+
15
+
16
+ @dataclass
17
+ class EdgeSpec:
18
+ num_neighbors: int | None = None
19
+ time_offsets: tuple[
20
+ pd.DateOffset | None,
21
+ pd.DateOffset,
22
+ ] | None = None
23
+
24
+ def __post_init__(self) -> None:
25
+ if (self.num_neighbors is None) == (self.time_offsets is None):
26
+ raise ValueError("Only one of 'num_neighbors' and 'time_offsets' "
27
+ "must be provided")
28
+
29
+
30
+ @dataclass
31
+ class SamplerOutput:
32
+ df_dict: dict[str, pd.DataFrame]
33
+ inverse_dict: dict[str, np.ndarray]
34
+ batch_dict: dict[str, np.ndarray]
35
+ num_sampled_nodes_dict: dict[str, list[int]]
36
+ row_dict: dict[tuple[str, str, str], np.ndarray] | None = None
37
+ col_dict: dict[tuple[str, str, str], np.ndarray] | None = None
38
+ num_sampled_edges_dict: dict[tuple[str, str, str], list[int]] | None = None
39
+
40
+
41
+ class Sampler(ABC):
42
+ def __init__(self, graph: 'Graph') -> None:
43
+ self._edge_types: list[tuple[str, str, str]] = []
44
+ for edge in graph.edges:
45
+ edge_type = (edge.src_table, edge.fkey, edge.dst_table)
46
+ self._edge_types.append(edge_type)
47
+ self._edge_types.append(Subgraph.rev_edge_type(edge_type))
48
+
49
+ self._primary_key_dict: dict[str, str] = {
50
+ table.name: table._primary_key
51
+ for table in graph.tables.values()
52
+ if table._primary_key is not None
53
+ }
54
+
55
+ self._time_column_dict: dict[str, str] = {
56
+ table.name: table._time_column
57
+ for table in graph.tables.values()
58
+ if table._time_column is not None
59
+ }
60
+
61
+ self._end_time_column_dict: dict[str, str] = {
62
+ table.name: table._end_time_column
63
+ for table in graph.tables.values()
64
+ if table._end_time_column is not None
65
+ }
66
+
67
+ foreign_keys = {(edge.src_table, edge.fkey) for edge in graph.edges}
68
+ self._table_stype_dict: dict[str, dict[str, Stype]] = {}
69
+ for table in graph.tables.values():
70
+ self._table_stype_dict[table.name] = {}
71
+ for column in table.columns:
72
+ if column == table.primary_key:
73
+ continue
74
+ if (table.name, column.name) in foreign_keys:
75
+ continue
76
+ self._table_stype_dict[table.name][column.name] = column.stype
77
+
78
+ @property
79
+ def edge_types(self) -> list[tuple[str, str, str]]:
80
+ return self._edge_types
81
+
82
+ @property
83
+ def primary_key_dict(self) -> dict[str, str]:
84
+ return self._primary_key_dict
85
+
86
+ @property
87
+ def time_column_dict(self) -> dict[str, str]:
88
+ return self._time_column_dict
89
+
90
+ @property
91
+ def end_time_column_dict(self) -> dict[str, str]:
92
+ return self._end_time_column_dict
93
+
94
+ @property
95
+ def table_stype_dict(self) -> dict[str, dict[str, Stype]]:
96
+ return self._table_stype_dict
97
+
98
+ def sample_subgraph(
99
+ self,
100
+ entity_table_names: tuple[str, ...],
101
+ entity_pkey: pd.Series,
102
+ anchor_time: pd.Series,
103
+ num_neighbors: list[int],
104
+ exclude_cols_dict: dict[str, list[str]] | None = None,
105
+ ) -> Subgraph:
106
+
107
+ edge_spec_dict: dict[tuple[str, str, str], list[EdgeSpec]] = {
108
+ edge_type: [EdgeSpec(value) for value in num_neighbors]
109
+ for edge_type in self.edge_types
110
+ }
111
+
112
+ # Exclude all columns that leak target information:
113
+ table_stype_dict: dict[str, dict[str, Stype]] = self._table_stype_dict
114
+ if exclude_cols_dict is not None:
115
+ table_stype_dict = copy.deepcopy(table_stype_dict)
116
+ for table_name, exclude_cols in exclude_cols_dict.items():
117
+ for column_name in exclude_cols:
118
+ del table_stype_dict[table_name][column_name]
119
+
120
+ # Collect all columns being used as features:
121
+ column_spec_dict: dict[str, list[str]] = {
122
+ table_name: list(stype_dict.keys())
123
+ for table_name, stype_dict in table_stype_dict.items()
124
+ }
125
+ # Make sure to store primary key information for entity tables:
126
+ for table_name in entity_table_names:
127
+ column_spec_dict[table_name] = (
128
+ [self.primary_key_dict[table_name]] +
129
+ column_spec_dict[table_name])
130
+
131
+ if anchor_time.dtype != 'datetime64[ns]':
132
+ anchor_time = anchor_time.astype('datetime64[ns]')
133
+ out = self.sample(
134
+ entity_table_name=entity_table_names[0],
135
+ entity_pkey=entity_pkey,
136
+ anchor_time=anchor_time,
137
+ column_spec_dict=column_spec_dict,
138
+ edge_spec_dict=edge_spec_dict,
139
+ drop_duplicates=True,
140
+ return_edges=True,
141
+ )
142
+
143
+ subgraph = Subgraph(
144
+ anchor_time=anchor_time.astype(int).to_numpy(),
145
+ table_dict={},
146
+ link_dict={},
147
+ )
148
+
149
+ for table_name, batch in out.batch_dict.items():
150
+ if len(batch) == 0:
151
+ continue
152
+
153
+ primary_key = None
154
+ if table_name in entity_table_names:
155
+ primary_key = self.primary_key_dict.get(table_name, None)
156
+
157
+ df = out.df_dict[table_name].reset_index(drop=True)
158
+ if table_name in self.end_time_column_dict:
159
+ # Set end time to NaT for all values greater than anchor time:
160
+ end_time_column = self.end_time_column_dict[table_name]
161
+ ser = df[end_time_column]
162
+ if ser.dtype != 'datetime64[ns]':
163
+ ser = ser.astype('datetime64[ns]')
164
+ mask = ser > anchor_time.iloc[batch]
165
+ ser.iloc[mask] = pd.NaT
166
+ df[end_time_column] = ser
167
+
168
+ stype_dict = table_stype_dict[table_name]
169
+ for column_name, stype in stype_dict.items():
170
+ if stype == Stype.text:
171
+ df[column_name] = _normalize_text(df[column_name])
172
+
173
+ subgraph.table_dict[table_name] = Table(
174
+ df=df,
175
+ row=out.inverse_dict.get(table_name),
176
+ batch=batch,
177
+ num_sampled_nodes=out.num_sampled_nodes_dict[table_name],
178
+ stype_dict=stype_dict,
179
+ primary_key=primary_key,
180
+ )
181
+
182
+ assert out.row_dict is not None
183
+ assert out.col_dict is not None
184
+ assert out.num_sampled_edges_dict is not None
185
+ for edge_type in out.row_dict.keys():
186
+ row: np.ndarray | None = out.row_dict[edge_type]
187
+ col: np.ndarray | None = out.col_dict[edge_type]
188
+
189
+ if row is None or col is None or len(row) == 0:
190
+ continue
191
+
192
+ # Do not store reverse edge type if it is an exact replica:
193
+ rev_edge_type = Subgraph.rev_edge_type(edge_type)
194
+ if (rev_edge_type in subgraph.link_dict
195
+ and np.array_equal(row, out.col_dict[rev_edge_type])
196
+ and np.array_equal(col, out.row_dict[rev_edge_type])):
197
+ subgraph.link_dict[edge_type] = Link(
198
+ layout=EdgeLayout.REV,
199
+ row=None,
200
+ col=None,
201
+ num_sampled_edges=out.num_sampled_edges_dict[edge_type],
202
+ )
203
+ continue
204
+
205
+ # Do not store non-informative edges:
206
+ layout = EdgeLayout.COO
207
+ if np.array_equal(row, np.arange(len(row))):
208
+ row = None
209
+ if np.array_equal(col, np.arange(len(col))):
210
+ col = None
211
+
212
+ # Store in compressed representation if more efficient:
213
+ num_cols = subgraph.table_dict[edge_type[2]].num_rows
214
+ if col is not None and len(col) > num_cols + 1:
215
+ layout = EdgeLayout.CSC
216
+ colcount = np.bincount(col, minlength=num_cols)
217
+ col = np.empty(num_cols + 1, dtype=col.dtype)
218
+ col[0] = 0
219
+ np.cumsum(colcount, out=col[1:])
220
+
221
+ subgraph.link_dict[edge_type] = Link(
222
+ layout=layout,
223
+ row=row,
224
+ col=col,
225
+ num_sampled_edges=out.num_sampled_edges_dict[edge_type],
226
+ )
227
+
228
+ return subgraph
229
+
230
+ # Abstract Methods ########################################################
231
+
232
+ @abstractmethod
233
+ def sample(
234
+ self,
235
+ entity_table_name: str,
236
+ entity_pkey: pd.Series,
237
+ anchor_time: pd.Series,
238
+ column_spec_dict: dict[str, list[str]],
239
+ edge_spec_dict: dict[tuple[str, str, str], list[EdgeSpec]],
240
+ drop_duplicates: bool = False,
241
+ return_edges: bool = False,
242
+ ) -> SamplerOutput:
243
+ pass
244
+
245
+
246
+ # Helper Functions ############################################################
247
+
248
+ PUNCTUATION = re.compile(r"[\'\"\.,\(\)\!\?\;\:]")
249
+ MULTISPACE = re.compile(r"\s+")
250
+
251
+
252
+ def _normalize_text(
253
+ ser: pd.Series,
254
+ max_words: int | None = 50,
255
+ ) -> pd.Series:
256
+ r"""Normalizes text into a list of lower-case words.
257
+
258
+ Args:
259
+ ser: The :class:`pandas.Series` to normalize.
260
+ max_words: The maximum number of words to return.
261
+ This will auto-shrink any large text column to avoid blowing up
262
+ context size.
263
+ """
264
+ if len(ser) == 0 or pd.api.types.is_list_like(ser.iloc[0]):
265
+ return ser
266
+
267
+ def normalize_fn(line: str) -> list[str]:
268
+ line = PUNCTUATION.sub(" ", line)
269
+ line = re.sub(r"<br\s*/?>", " ", line) # Handle <br /> or <br>
270
+ line = MULTISPACE.sub(" ", line)
271
+ words = line.split()
272
+ if max_words is not None:
273
+ words = words[:max_words]
274
+ return words
275
+
276
+ ser = ser.fillna('').astype(str)
277
+
278
+ if max_words is not None:
279
+ # We estimate the number of words as 5 characters + 1 space in an
280
+ # English text on average. We need this pre-filter here, as word
281
+ # splitting on a giant text can be very expensive:
282
+ ser = ser.str[:6 * max_words]
283
+
284
+ ser = ser.str.lower()
285
+ ser = ser.map(normalize_fn)
286
+
287
+ return ser
@@ -0,0 +1,18 @@
1
+ from dataclasses import dataclass
2
+
3
+ from kumoapi.typing import Dtype
4
+
5
+
6
+ @dataclass
7
+ class SourceColumn:
8
+ name: str
9
+ dtype: Dtype
10
+ is_primary_key: bool
11
+ is_unique_key: bool
12
+
13
+
14
+ @dataclass
15
+ class SourceForeignKey:
16
+ name: str
17
+ dst_table: str
18
+ primary_key: str