wedata-feature-engineering 0.1.4__py3-none-any.whl → 0.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {feature_store → wedata}/__init__.py +1 -1
- {feature_store → wedata/feature_store}/client.py +113 -41
- {feature_store → wedata/feature_store}/constants/constants.py +19 -0
- {feature_store → wedata/feature_store}/entities/column_info.py +4 -4
- {feature_store → wedata/feature_store}/entities/feature_lookup.py +5 -1
- {feature_store → wedata/feature_store}/entities/feature_spec.py +46 -46
- wedata/feature_store/entities/feature_table.py +107 -0
- {feature_store → wedata/feature_store}/entities/training_set.py +13 -12
- {feature_store → wedata/feature_store}/feature_table_client/feature_table_client.py +85 -30
- {feature_store → wedata/feature_store}/spark_client/spark_client.py +30 -56
- wedata/feature_store/training_set_client/training_set_client.py +367 -0
- wedata/feature_store/utils/__init__.py +0 -0
- feature_store/utils/utils.py → wedata/feature_store/utils/common_utils.py +108 -54
- {feature_store → wedata/feature_store}/utils/feature_lookup_utils.py +6 -6
- {feature_store → wedata/feature_store}/utils/feature_spec_utils.py +6 -6
- {feature_store → wedata/feature_store}/utils/feature_utils.py +5 -5
- wedata/feature_store/utils/on_demand_utils.py +107 -0
- {feature_store → wedata/feature_store}/utils/schema_utils.py +1 -1
- wedata/feature_store/utils/signature_utils.py +205 -0
- {feature_store → wedata/feature_store}/utils/training_set_utils.py +18 -19
- {feature_store → wedata/feature_store}/utils/uc_utils.py +1 -1
- {wedata_feature_engineering-0.1.4.dist-info → wedata_feature_engineering-0.1.6.dist-info}/METADATA +1 -1
- wedata_feature_engineering-0.1.6.dist-info/RECORD +43 -0
- wedata_feature_engineering-0.1.6.dist-info/top_level.txt +1 -0
- feature_store/entities/feature_table.py +0 -164
- feature_store/training_set_client/training_set_client.py +0 -196
- feature_store/utils/common_utils.py +0 -96
- wedata_feature_engineering-0.1.4.dist-info/RECORD +0 -41
- wedata_feature_engineering-0.1.4.dist-info/top_level.txt +0 -1
- {feature_store/constants → wedata/feature_store}/__init__.py +0 -0
- {feature_store/entities → wedata/feature_store/constants}/__init__.py +0 -0
- {feature_store/feature_table_client → wedata/feature_store/entities}/__init__.py +0 -0
- {feature_store → wedata/feature_store}/entities/data_type.py +0 -0
- {feature_store → wedata/feature_store}/entities/environment_variables.py +0 -0
- {feature_store → wedata/feature_store}/entities/feature.py +0 -0
- {feature_store → wedata/feature_store}/entities/feature_column_info.py +0 -0
- {feature_store → wedata/feature_store}/entities/feature_function.py +0 -0
- {feature_store → wedata/feature_store}/entities/feature_spec_constants.py +0 -0
- {feature_store → wedata/feature_store}/entities/feature_table_info.py +0 -0
- {feature_store → wedata/feature_store}/entities/function_info.py +0 -0
- {feature_store → wedata/feature_store}/entities/on_demand_column_info.py +0 -0
- {feature_store → wedata/feature_store}/entities/source_data_column_info.py +0 -0
- {feature_store/spark_client → wedata/feature_store/feature_table_client}/__init__.py +0 -0
- {feature_store/training_set_client → wedata/feature_store/spark_client}/__init__.py +0 -0
- {feature_store/utils → wedata/feature_store/training_set_client}/__init__.py +0 -0
- {feature_store → wedata/feature_store}/utils/topological_sort.py +0 -0
- {feature_store → wedata/feature_store}/utils/validation_utils.py +0 -0
- {wedata_feature_engineering-0.1.4.dist-info → wedata_feature_engineering-0.1.6.dist-info}/WHEEL +0 -0
@@ -8,15 +8,18 @@ from pyspark.sql.streaming import StreamingQuery
|
|
8
8
|
from pyspark.sql.types import StructType
|
9
9
|
import os
|
10
10
|
|
11
|
-
from feature_store.constants.constants import APPEND, DEFAULT_WRITE_STREAM_TRIGGER
|
11
|
+
from wedata.feature_store.constants.constants import APPEND, DEFAULT_WRITE_STREAM_TRIGGER
|
12
|
+
from wedata.feature_store.entities.feature_table import FeatureTable
|
13
|
+
from wedata.feature_store.spark_client.spark_client import SparkClient
|
14
|
+
from wedata.feature_store.utils import common_utils
|
12
15
|
|
13
16
|
|
14
17
|
class FeatureTableClient:
|
15
18
|
"""特征表操作类"""
|
16
19
|
|
17
20
|
def __init__(
|
18
|
-
|
19
|
-
|
21
|
+
self,
|
22
|
+
spark: SparkSession
|
20
23
|
):
|
21
24
|
self._spark = spark
|
22
25
|
|
@@ -46,12 +49,6 @@ class FeatureTableClient:
|
|
46
49
|
f"DataFrame与schema不匹配。差异字段: {diff_fields if diff_fields else '字段类型不一致'}"
|
47
50
|
)
|
48
51
|
|
49
|
-
@staticmethod
|
50
|
-
def _validate_table_name(name: str):
|
51
|
-
"""验证特征表命名规范"""
|
52
|
-
if name.count('.') < 2:
|
53
|
-
raise ValueError("特征表名称需符合<catalog>.<schema>.<table>格式")
|
54
|
-
|
55
52
|
@staticmethod
|
56
53
|
def _validate_key_conflicts(primary_keys: List[str], timestamp_keys: List[str]):
|
57
54
|
"""校验主键与时间戳键是否冲突"""
|
@@ -75,7 +72,8 @@ class FeatureTableClient:
|
|
75
72
|
schema: Optional[StructType] = None,
|
76
73
|
description: Optional[str] = None,
|
77
74
|
tags: Optional[Dict[str, str]] = None
|
78
|
-
):
|
75
|
+
) -> FeatureTable:
|
76
|
+
|
79
77
|
"""
|
80
78
|
创建特征表(支持批流数据写入)
|
81
79
|
|
@@ -85,6 +83,7 @@ class FeatureTableClient:
|
|
85
83
|
df: 初始数据(可选,用于推断schema)
|
86
84
|
timestamp_keys: 时间戳键(用于时态特征)
|
87
85
|
partition_columns: 分区列(优化存储查询)
|
86
|
+
schema: 表结构定义(可选,当不提供df时必需)
|
88
87
|
description: 业务描述
|
89
88
|
tags: 业务标签
|
90
89
|
|
@@ -94,6 +93,7 @@ class FeatureTableClient:
|
|
94
93
|
Raises:
|
95
94
|
ValueError: 当schema与数据不匹配时
|
96
95
|
"""
|
96
|
+
|
97
97
|
# 参数标准化
|
98
98
|
primary_keys = self._normalize_params(primary_keys)
|
99
99
|
timestamp_keys = self._normalize_params(timestamp_keys)
|
@@ -101,23 +101,25 @@ class FeatureTableClient:
|
|
101
101
|
|
102
102
|
# 元数据校验
|
103
103
|
self._validate_schema(df, schema)
|
104
|
-
#self._validate_table_name(name)
|
105
104
|
self._validate_key_conflicts(primary_keys, timestamp_keys)
|
106
105
|
|
107
|
-
#
|
108
|
-
|
106
|
+
# 表名校验
|
107
|
+
common_utils.validate_table_name(name)
|
108
|
+
|
109
|
+
# 构建完整表名
|
110
|
+
table_name = common_utils.build_full_table_name(name)
|
109
111
|
|
110
112
|
# 检查表是否存在
|
111
113
|
try:
|
112
114
|
if self._spark.catalog.tableExists(table_name):
|
113
115
|
raise ValueError(
|
114
|
-
f"
|
115
|
-
"
|
116
|
-
"1.
|
117
|
-
"2.
|
116
|
+
f"Table '{table_name}' already exists\n"
|
117
|
+
"Solutions:\n"
|
118
|
+
"1. Use a different table name\n"
|
119
|
+
"2. Drop the existing table: spark.sql(f'DROP TABLE {name}')\n"
|
118
120
|
)
|
119
121
|
except Exception as e:
|
120
|
-
raise ValueError(f"
|
122
|
+
raise ValueError(f"Error checking table existence: {str(e)}") from e
|
121
123
|
|
122
124
|
# 推断表schema
|
123
125
|
table_schema = schema or df.schema
|
@@ -126,7 +128,7 @@ class FeatureTableClient:
|
|
126
128
|
timestamp_keys_ddl = []
|
127
129
|
for timestamp_key in timestamp_keys:
|
128
130
|
if timestamp_key not in primary_keys:
|
129
|
-
raise ValueError(f"
|
131
|
+
raise ValueError(f"Timestamp key '{timestamp_key}' must be a primary key")
|
130
132
|
timestamp_keys_ddl.append(f"`{timestamp_key}` TIMESTAMP")
|
131
133
|
|
132
134
|
#从环境变量获取额外标签
|
@@ -185,7 +187,19 @@ class FeatureTableClient:
|
|
185
187
|
if df is not None:
|
186
188
|
df.write.insertInto(table_name)
|
187
189
|
except Exception as e:
|
188
|
-
raise ValueError(f"
|
190
|
+
raise ValueError(f"Failed to create table: {str(e)}") from e
|
191
|
+
|
192
|
+
# 构建并返回FeatureTable对象
|
193
|
+
return FeatureTable(
|
194
|
+
name=name,
|
195
|
+
table_id=table_name,
|
196
|
+
description=description or "",
|
197
|
+
primary_keys=primary_keys,
|
198
|
+
partition_columns=partition_columns or [],
|
199
|
+
features=[field.name for field in table_schema.fields],
|
200
|
+
timestamp_keys=timestamp_keys or [],
|
201
|
+
tags=dict(**tags or {}, **env_tags)
|
202
|
+
)
|
189
203
|
|
190
204
|
def write_table(
|
191
205
|
self,
|
@@ -195,6 +209,7 @@ class FeatureTableClient:
|
|
195
209
|
checkpoint_location: Optional[str] = None,
|
196
210
|
trigger: Optional[Dict[str, Any]] = DEFAULT_WRITE_STREAM_TRIGGER
|
197
211
|
) -> Optional[StreamingQuery]:
|
212
|
+
|
198
213
|
"""
|
199
214
|
写入特征表数据(支持批处理和流式写入)
|
200
215
|
|
@@ -215,10 +230,13 @@ class FeatureTableClient:
|
|
215
230
|
# 验证写入模式
|
216
231
|
valid_modes = ["append", "overwrite"]
|
217
232
|
if mode not in valid_modes:
|
218
|
-
raise ValueError(f"
|
233
|
+
raise ValueError(f"Invalid write mode '{mode}', valid options: {valid_modes}")
|
234
|
+
|
235
|
+
# 表名校验
|
236
|
+
common_utils.validate_table_name(name)
|
219
237
|
|
220
|
-
#
|
221
|
-
table_name =
|
238
|
+
# 构建完整表名
|
239
|
+
table_name = common_utils.build_full_table_name(name)
|
222
240
|
|
223
241
|
# 判断是否是流式DataFrame
|
224
242
|
is_streaming = df.isStreaming
|
@@ -227,7 +245,7 @@ class FeatureTableClient:
|
|
227
245
|
if is_streaming:
|
228
246
|
# 流式写入
|
229
247
|
if not checkpoint_location:
|
230
|
-
raise ValueError("
|
248
|
+
raise ValueError("Streaming write requires checkpoint_location parameter")
|
231
249
|
|
232
250
|
writer = df.writeStream \
|
233
251
|
.format("parquet") \
|
@@ -252,6 +270,7 @@ class FeatureTableClient:
|
|
252
270
|
self,
|
253
271
|
name: str
|
254
272
|
) -> DataFrame:
|
273
|
+
|
255
274
|
"""
|
256
275
|
从特征表中读取数据
|
257
276
|
|
@@ -264,8 +283,12 @@ class FeatureTableClient:
|
|
264
283
|
Raises:
|
265
284
|
ValueError: 当表不存在或读取失败时抛出
|
266
285
|
"""
|
286
|
+
|
287
|
+
# 表名校验
|
288
|
+
common_utils.validate_table_name(name)
|
289
|
+
|
267
290
|
# 构建完整表名
|
268
|
-
table_name =
|
291
|
+
table_name = common_utils.build_full_table_name(name)
|
269
292
|
|
270
293
|
try:
|
271
294
|
# 检查表是否存在
|
@@ -278,10 +301,8 @@ class FeatureTableClient:
|
|
278
301
|
except Exception as e:
|
279
302
|
raise ValueError(f"读取表 '{table_name}' 失败: {str(e)}") from e
|
280
303
|
|
281
|
-
def drop_table(
|
282
|
-
|
283
|
-
name: str
|
284
|
-
) -> None:
|
304
|
+
def drop_table(self, name: str):
|
305
|
+
|
285
306
|
"""
|
286
307
|
删除特征表(表不存在时抛出异常)
|
287
308
|
|
@@ -296,8 +317,12 @@ class FeatureTableClient:
|
|
296
317
|
# 基本删除
|
297
318
|
drop_table("user_features")
|
298
319
|
"""
|
320
|
+
|
321
|
+
# 表名校验
|
322
|
+
common_utils.validate_table_name(name)
|
323
|
+
|
299
324
|
# 构建完整表名
|
300
|
-
table_name =
|
325
|
+
table_name = common_utils.build_full_table_name(name)
|
301
326
|
|
302
327
|
try:
|
303
328
|
# 检查表是否存在
|
@@ -311,3 +336,33 @@ class FeatureTableClient:
|
|
311
336
|
raise # 直接抛出已知的ValueError
|
312
337
|
except Exception as e:
|
313
338
|
raise RuntimeError(f"删除表 '{table_name}' 失败: {str(e)}") from e
|
339
|
+
|
340
|
+
def get_table(
|
341
|
+
self,
|
342
|
+
name: str,
|
343
|
+
spark_client: SparkClient
|
344
|
+
) -> FeatureTable:
|
345
|
+
|
346
|
+
"""获取特征表元数据信息
|
347
|
+
|
348
|
+
参数:
|
349
|
+
name: 特征表名称
|
350
|
+
spark_client: Spark客户端
|
351
|
+
|
352
|
+
返回:
|
353
|
+
FeatureTable对象
|
354
|
+
|
355
|
+
异常:
|
356
|
+
ValueError: 当表不存在或获取失败时抛出
|
357
|
+
"""
|
358
|
+
|
359
|
+
# 表名校验
|
360
|
+
common_utils.validate_table_name(name)
|
361
|
+
|
362
|
+
# 构建完整表名
|
363
|
+
table_name = common_utils.build_full_table_name(name)
|
364
|
+
|
365
|
+
try:
|
366
|
+
return spark_client.get_feature_table(table_name)
|
367
|
+
except Exception as e:
|
368
|
+
raise ValueError(f"获取表'{name}'元数据失败: {str(e)}") from e
|
@@ -6,73 +6,52 @@ from pyspark.sql.catalog import Column
|
|
6
6
|
from pyspark.sql.functions import when, isnull
|
7
7
|
from pyspark.sql.types import StructType, StringType, StructField
|
8
8
|
|
9
|
-
from feature_store.entities.feature import Feature
|
10
|
-
from feature_store.entities.feature_table import FeatureTable
|
11
|
-
from feature_store.entities.function_info import FunctionParameterInfo, FunctionInfo
|
12
|
-
from feature_store.utils.common_utils import unsanitize_identifier
|
13
|
-
from feature_store.utils.utils import sanitize_multi_level_name
|
9
|
+
from wedata.feature_store.entities.feature import Feature
|
10
|
+
from wedata.feature_store.entities.feature_table import FeatureTable
|
11
|
+
from wedata.feature_store.entities.function_info import FunctionParameterInfo, FunctionInfo
|
12
|
+
from wedata.feature_store.utils.common_utils import unsanitize_identifier, sanitize_multi_level_name
|
14
13
|
|
15
14
|
|
16
15
|
class SparkClient:
|
17
16
|
def __init__(self, spark: SparkSession):
|
18
17
|
self._spark = spark
|
19
18
|
|
20
|
-
def createDataFrame(self, data, schema) -> DataFrame:
|
21
|
-
return self._spark.createDataFrame(data, schema)
|
22
|
-
|
23
|
-
def read_table(
|
24
|
-
self, qualified_table_name, as_of_delta_timestamp=None, streaming=False
|
25
|
-
):
|
26
|
-
"""
|
27
|
-
Reads a Delta table, optionally as of some timestamp.
|
28
|
-
"""
|
29
|
-
if streaming and as_of_delta_timestamp:
|
30
|
-
raise ValueError(
|
31
|
-
"Internal error: as_of_delta_timestamp cannot be specified when"
|
32
|
-
" streaming=True."
|
33
|
-
)
|
34
|
-
|
35
|
-
base_reader = (
|
36
|
-
# By default, Structured Streaming only handles append operations. Because
|
37
|
-
# we have a notion of primary keys, most offline feature store operations
|
38
|
-
# are not appends. For example, FeatureStoreClient.write_table(mode=MERGE)
|
39
|
-
# will issue a MERGE operation.
|
40
|
-
# In order to propagate the non-append operations to the
|
41
|
-
# readStream, we set ignoreChanges to "true".
|
42
|
-
# For more information,
|
43
|
-
# see https://docs.databricks.com/delta/delta-streaming.html#ignore-updates-and-deletes
|
44
|
-
self._spark.readStream.format("delta").option("ignoreChanges", "true")
|
45
|
-
if streaming
|
46
|
-
else self._spark.read.format("delta")
|
47
|
-
)
|
48
|
-
|
49
|
-
if as_of_delta_timestamp:
|
50
|
-
return base_reader.option("timestampAsOf", as_of_delta_timestamp).table(
|
51
|
-
sanitize_multi_level_name(qualified_table_name)
|
52
|
-
)
|
53
|
-
else:
|
54
|
-
return base_reader.table(sanitize_multi_level_name(qualified_table_name))
|
55
19
|
|
56
20
|
def get_current_catalog(self):
|
57
21
|
"""
|
58
|
-
|
22
|
+
获取当前Spark会话的catalog名称(使用spark.catalog.currentCatalog属性)
|
23
|
+
|
24
|
+
返回:
|
25
|
+
str: 当前catalog名称,如果未设置则返回None
|
59
26
|
"""
|
60
27
|
try:
|
61
|
-
|
62
|
-
|
63
|
-
except Exception as e:
|
28
|
+
return unsanitize_identifier(self._spark.catalog.currentCatalog())
|
29
|
+
except Exception:
|
64
30
|
return None
|
65
31
|
|
66
32
|
def get_current_database(self):
|
67
33
|
"""
|
68
|
-
|
34
|
+
获取Spark上下文中当前设置的数据库名称
|
35
|
+
|
36
|
+
返回:
|
37
|
+
str: 当前数据库名称,如果获取失败则返回None
|
69
38
|
"""
|
70
39
|
try:
|
71
|
-
|
72
|
-
|
73
|
-
|
40
|
+
# 使用Spark SQL查询当前数据库
|
41
|
+
df = self._spark.sql("SELECT CURRENT_DATABASE()")
|
42
|
+
# 获取第一行第一列的值并去除特殊字符
|
43
|
+
return unsanitize_identifier(df.first()[0])
|
44
|
+
except Exception:
|
45
|
+
# 捕获所有异常并返回None
|
74
46
|
return None
|
75
47
|
|
48
|
+
|
49
|
+
|
50
|
+
|
51
|
+
def createDataFrame(self, data, schema) -> DataFrame:
|
52
|
+
return self._spark.createDataFrame(data, schema)
|
53
|
+
|
54
|
+
|
76
55
|
def read_table(self, table_name):
|
77
56
|
"""读取Spark表数据
|
78
57
|
|
@@ -134,11 +113,6 @@ class SparkClient:
|
|
134
113
|
) for row in columns
|
135
114
|
]
|
136
115
|
|
137
|
-
def get_online_stores(self, table_name):
|
138
|
-
return None
|
139
|
-
|
140
|
-
|
141
|
-
|
142
116
|
def get_feature_table(self, table_name):
|
143
117
|
|
144
118
|
# 获取表元数据
|
@@ -170,19 +144,19 @@ class SparkClient:
|
|
170
144
|
return FeatureTable(
|
171
145
|
name=table_name,
|
172
146
|
table_id=table_properties.get("table_id", table_name),
|
173
|
-
description=table.description or table_properties.get("
|
147
|
+
description=table.description or table_properties.get("comment", table_name),
|
174
148
|
primary_keys=table_properties.get("primaryKeys", "").split(",") if table_properties.get("primaryKeys") else [],
|
175
149
|
partition_columns=table.partitionColumnNames if hasattr(table, 'partitionColumnNames') else [],
|
176
150
|
features=features,
|
177
151
|
creation_timestamp=None, # Spark表元数据不包含创建时间戳
|
178
|
-
online_stores=
|
152
|
+
online_stores=None,
|
179
153
|
notebook_producers=None,
|
180
154
|
job_producers=None,
|
181
155
|
table_data_sources=None,
|
182
156
|
path_data_sources=None,
|
183
157
|
custom_data_sources=None,
|
184
158
|
timestamp_keys=table_properties.get("timestamp_keys"),
|
185
|
-
tags=table_properties
|
159
|
+
tags=table_properties
|
186
160
|
)
|
187
161
|
|
188
162
|
def _get_routines_with_parameters(self, full_routine_names: List[str]) -> DataFrame:
|