kele 0.0.1a1__cp314-cp314-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. kele/__init__.py +38 -0
  2. kele/_version.py +1 -0
  3. kele/config.py +243 -0
  4. kele/control/README_metrics.md +102 -0
  5. kele/control/__init__.py +20 -0
  6. kele/control/callback.py +255 -0
  7. kele/control/grounding_selector/__init__.py +5 -0
  8. kele/control/grounding_selector/_rule_strategies/README.md +13 -0
  9. kele/control/grounding_selector/_rule_strategies/__init__.py +24 -0
  10. kele/control/grounding_selector/_rule_strategies/_sequential_strategy.py +42 -0
  11. kele/control/grounding_selector/_rule_strategies/strategy_protocol.py +51 -0
  12. kele/control/grounding_selector/_selector_utils.py +123 -0
  13. kele/control/grounding_selector/_term_strategies/__init__.py +24 -0
  14. kele/control/grounding_selector/_term_strategies/_exhausted_strategy.py +34 -0
  15. kele/control/grounding_selector/_term_strategies/strategy_protocol.py +50 -0
  16. kele/control/grounding_selector/rule_selector.py +98 -0
  17. kele/control/grounding_selector/term_selector.py +89 -0
  18. kele/control/infer_path.py +306 -0
  19. kele/control/metrics.py +357 -0
  20. kele/control/status.py +286 -0
  21. kele/egg_equiv.pyd +0 -0
  22. kele/egg_equiv.pyi +11 -0
  23. kele/equality/README.md +8 -0
  24. kele/equality/__init__.py +4 -0
  25. kele/equality/_egg_equiv/src/lib.rs +267 -0
  26. kele/equality/_equiv_elem.py +67 -0
  27. kele/equality/_utils.py +36 -0
  28. kele/equality/equivalence.py +141 -0
  29. kele/executer/__init__.py +4 -0
  30. kele/executer/executing.py +139 -0
  31. kele/grounder/README.md +83 -0
  32. kele/grounder/__init__.py +17 -0
  33. kele/grounder/grounded_rule_ds/__init__.py +6 -0
  34. kele/grounder/grounded_rule_ds/_nodes/__init__.py +24 -0
  35. kele/grounder/grounded_rule_ds/_nodes/_assertion.py +353 -0
  36. kele/grounder/grounded_rule_ds/_nodes/_conn.py +116 -0
  37. kele/grounder/grounded_rule_ds/_nodes/_op.py +57 -0
  38. kele/grounder/grounded_rule_ds/_nodes/_root.py +71 -0
  39. kele/grounder/grounded_rule_ds/_nodes/_rule.py +119 -0
  40. kele/grounder/grounded_rule_ds/_nodes/_term.py +390 -0
  41. kele/grounder/grounded_rule_ds/_nodes/_tftable.py +15 -0
  42. kele/grounder/grounded_rule_ds/_nodes/_tupletable.py +444 -0
  43. kele/grounder/grounded_rule_ds/_nodes/_typing_polars.py +26 -0
  44. kele/grounder/grounded_rule_ds/grounded_class.py +461 -0
  45. kele/grounder/grounded_rule_ds/grounded_ds_utils.py +91 -0
  46. kele/grounder/grounded_rule_ds/rule_check.py +373 -0
  47. kele/grounder/grounding.py +118 -0
  48. kele/knowledge_bases/README.md +112 -0
  49. kele/knowledge_bases/__init__.py +6 -0
  50. kele/knowledge_bases/builtin_base/__init__.py +1 -0
  51. kele/knowledge_bases/builtin_base/builtin_concepts.py +13 -0
  52. kele/knowledge_bases/builtin_base/builtin_facts.py +43 -0
  53. kele/knowledge_bases/builtin_base/builtin_operators.py +105 -0
  54. kele/knowledge_bases/builtin_base/builtin_rules.py +14 -0
  55. kele/knowledge_bases/fact_base.py +158 -0
  56. kele/knowledge_bases/ontology_base.py +67 -0
  57. kele/knowledge_bases/rule_base.py +194 -0
  58. kele/main.py +464 -0
  59. kele/py.typed +0 -0
  60. kele/syntax/CONCEPT_README.md +117 -0
  61. kele/syntax/__init__.py +40 -0
  62. kele/syntax/_cnf_converter.py +161 -0
  63. kele/syntax/_sat_solver.py +116 -0
  64. kele/syntax/base_classes.py +1482 -0
  65. kele/syntax/connectives.py +20 -0
  66. kele/syntax/dnf_converter.py +145 -0
  67. kele/syntax/external.py +17 -0
  68. kele/syntax/sub_concept.py +87 -0
  69. kele/syntax/syntacticsugar.py +201 -0
  70. kele-0.0.1a1.dist-info/METADATA +166 -0
  71. kele-0.0.1a1.dist-info/RECORD +74 -0
  72. kele-0.0.1a1.dist-info/WHEEL +4 -0
  73. kele-0.0.1a1.dist-info/licenses/LICENSE +28 -0
  74. kele-0.0.1a1.dist-info/licenses/licensecheck.json +20 -0
@@ -0,0 +1,444 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING, ClassVar, Any
4
+ from bidict import bidict
5
+ import numpy as np
6
+ import polars as pl
7
+ from polars.type_aliases import PolarsDataType
8
+
9
+ if TYPE_CHECKING:
10
+ from kele.equality import Equivalence
11
+ from ._typing_polars import PolarsDataType
12
+ from kele.syntax.base_classes import Constant, CompoundTerm, Variable
13
+ from collections.abc import Generator
14
+ from numpy.typing import NDArray
15
+
16
+
17
+ class NameToObject:
18
+ """
19
+ 记录了Variable到其uuid(或term、constant到其uuid)的双向映射
20
+ """
21
+ _item_to_name: ClassVar[bidict[Any, int]] = bidict()
22
+ _item_counter: ClassVar[int] = 0
23
+
24
+ @classmethod
25
+ def register_item(cls, var: Constant | CompoundTerm) -> None:
26
+ if var not in cls._item_to_name:
27
+ var_id = cls._item_counter
28
+ cls._item_to_name[var] = var_id
29
+ cls._item_counter += 1
30
+
31
+ @classmethod
32
+ def get_item_by_name(cls, name: int | str) -> Any: # noqa: ANN401 # HACK: 列名必须str,值必须int。为了后续改动不容易出bug
33
+ # 的角度,这里最好拆开两个get_item,分值和列名。或者列名直接从variable的name去生成,这个没必要单独用item
34
+ return cls._item_to_name.inverse[int(name)]
35
+
36
+ @classmethod
37
+ def get_item_name(cls, item: Any) -> int: # noqa: ANN401
38
+ cls.register_item(item)
39
+ return cls._item_to_name[item]
40
+
41
+ @classmethod
42
+ def len(cls) -> int: return cls._item_counter
43
+
44
+ @classmethod
45
+ def reset(cls) -> None:
46
+ cls._item_to_name.clear()
47
+ cls._item_counter = 0
48
+
49
+
50
+ class _TupleTable: # HACK: 此结构储存非常容易导致爆内存,后续应当考虑是否修改
51
+ """
52
+ 这是用于记录实例化结果的一个类。
53
+ 它本质上是一个dict[Variable, list[Constant]],但是特别之处在于,index相同的元素可以视作一个元组,
54
+ 也就是说,删除list中某个index的元素,那么所有的list都要删除这个index的元素。
55
+ 同时还实现了合并功能,合并的时候,按照:相同元素取交集,全新元素基于乘法法则生成新的组来合并
56
+ """
57
+ def __init__(self, column_name: tuple[Variable, ...]) -> None:
58
+ self.column_name = [str(NameToObject.get_item_name(var)) for var in column_name]
59
+ self._raw_column_name: tuple[Variable, ...] = column_name
60
+ self._add_row_cache: dict[str, list[int]] = {}
61
+ self._base_df: pl.DataFrame
62
+ self._is_deduped = False
63
+
64
+ def __len__(self) -> int:
65
+ """
66
+ 返回行数。
67
+ """
68
+ return self.height
69
+
70
+ def __getitem__(self, index: int) -> dict[Variable, Constant | CompoundTerm]:
71
+ """
72
+ 按行索引返回一条记录,格式与 iter_rows一致。仅支持 int 索引(正/负),不支持切片。
73
+ raise: TypeError: 暂时只允许通过数字访问行
74
+ raise: IndexError:数字只允许[-n~n-1]的范围
75
+ """ # noqa: DOC501
76
+ self.make_table_ready()
77
+
78
+ n = self.height
79
+ if not isinstance(index, int):
80
+ raise TypeError("Index must be an int.")
81
+ if n == 0:
82
+ raise IndexError("The table is empty.")
83
+ # 负索引处理
84
+ if index < 0:
85
+ index += n
86
+ if index < 0 or index >= n:
87
+ raise IndexError("Row index out of range.")
88
+
89
+ row_np = self.base_df.row(index) # 使用缓存的 numpy 数组以获得更快的随机访问
90
+ # 将字符串名恢复成原对象;键用原始 Variable 对象
91
+ result = {}
92
+
93
+ for name, val in zip(self.raw_column_name, row_np, strict=True):
94
+ val_obj = NameToObject.get_item_by_name(val)
95
+ result[name] = val_obj
96
+
97
+ return result
98
+
99
+ def __iter__(self) -> Generator[dict[Variable, Constant | CompoundTerm]]:
100
+ yield from self.iter_rows()
101
+
102
+ def set_base_df(self, df: pl.DataFrame, *, is_unique: bool | None = None) -> None:
103
+
104
+ """
105
+ 从外界的一个lazyframe初始化此TupleTable,这主要是用于join过程
106
+
107
+ :param df: 初始化数据
108
+ :type df: pl.DataFrame
109
+ """
110
+ # TODO: 有些调用方可确定已去重;未来可以更细化地决定是否执行 unique 以降低开销。
111
+ if is_unique is None:
112
+ self._base_df = df.unique()
113
+ self._is_deduped = True
114
+ elif is_unique:
115
+ self._base_df = df
116
+ self._is_deduped = True
117
+ else:
118
+ self._base_df = df
119
+ self._is_deduped = False
120
+ self.column_name = df.columns
121
+ self.__dict__.pop('_np_list_store', None)
122
+
123
+ def get_small_table(self, column_name: tuple[Any, ...]) -> _TupleTable:
124
+ """
125
+ 获取一个子表,只包含column_name中的列
126
+
127
+ :param column_name: 子表的列名
128
+ :type column_name: tuple[Any, ...]
129
+ :return: 子表
130
+ :rtype: _TupleTable
131
+ """
132
+ self.make_table_ready() # 使用的一定是准备好后的table
133
+
134
+ column_name = tuple(set(column_name))
135
+ small_column_name = [str(NameToObject.get_item_name(var)) for var in column_name]
136
+ small_table = _TupleTable(column_name)
137
+ small_table.set_base_df(self.base_df.select(pl.col(small_column_name)))
138
+ return small_table
139
+
140
+ def get_true_false_table(self, data: list[bool], *, keep_table: bool | None = None) -> tuple[_TupleTable, _TupleTable]:
141
+ """
142
+ 基于真值列获取新表true_table/false_table
143
+ :param column_name: 列名
144
+ :type column_name: Assertion
145
+ :param data: 真值列
146
+ :type data: list[str]
147
+ :return: 真值表/假值表
148
+ :rtype: tuple[_TupleTable, _TupleTable]
149
+ """
150
+ self.make_table_ready()
151
+
152
+ mask_series = pl.Series(data, dtype=pl.Boolean)
153
+ true_table = _TupleTable(self.raw_column_name)
154
+ false_table = _TupleTable(self.raw_column_name)
155
+ if keep_table is None or keep_table:
156
+ df_true_lazy = self.base_df.filter(mask_series)
157
+ true_table.set_base_df(df_true_lazy)
158
+ if keep_table is None or not keep_table:
159
+ df_false_lazy = self.base_df.filter(~mask_series)
160
+ false_table.set_base_df(df_false_lazy)
161
+
162
+ return true_table, false_table
163
+
164
+ def concat_table(self, *tables: _TupleTable) -> _TupleTable:
165
+ """
166
+ 合并此表与多个表,返回合并后的 table
167
+
168
+ :param tables: 需要合并的其他表
169
+ :type tables: _TupleTable
170
+ :return: 合并后的表
171
+ :rtype: _TupleTable
172
+ """
173
+ # 确保所有表的cache都被合并为df了
174
+ for table in (self, *tables):
175
+ table.make_table_ready()
176
+
177
+ valid_tables = [t for t in tables if t.height > 0]
178
+
179
+ # 新建表,列名取 self 的
180
+ new_table = _TupleTable(self.raw_column_name)
181
+
182
+ # 拼接所有表
183
+ all_lfs = []
184
+ columns = self.base_df.columns
185
+ for t in valid_tables:
186
+ df = t.base_df.select(columns)
187
+ all_lfs.append(df)
188
+
189
+ # 这里不做去重:union/anti_join 已确保输入表按需去重;如需去重,应在合并后统一处理以避免重复开销。
190
+ new_df = pl.concat([self.base_df, *all_lfs])
191
+ new_table.set_base_df(new_df)
192
+
193
+ return new_table
194
+
195
+ def update_equiv_element(self, equivalence: Equivalence) -> _TupleTable:
196
+ """
197
+ 将当前table的所有元素更新为他们的等价类代表元
198
+
199
+ :param equivalence: 等价类
200
+ :type equivalence: Equivalence
201
+ :return: 新的table
202
+ :rtype: _TupleTable
203
+ """
204
+ # 1) 取所有列的唯一值
205
+ unique_values = (
206
+ self.base_df.select(pl.concat_list(pl.all()).explode().unique())
207
+ .to_series()
208
+ .to_list()
209
+ )
210
+
211
+ # 2) 计算代表元映射
212
+ mapping = {}
213
+ for old in unique_values:
214
+ old_elem = NameToObject.get_item_by_name(old)
215
+ rep_elem = equivalence.get_represent_elem(old_elem)
216
+ if old_elem != rep_elem:
217
+ mapping[old] = NameToObject.get_item_name(rep_elem)
218
+
219
+ new_df = self.base_df.with_columns(
220
+ pl.all().map_elements(lambda a: mapping.get(a, a), return_dtype=self._smallest_unsigned_int_dtype())
221
+ ) # FIXME: 换replace、unique也可以做一定优化
222
+ new_table = _TupleTable(self.raw_column_name)
223
+ # 映射可能让不同值收敛到同一代表元,因此仍需去重。
224
+ new_table.set_base_df(new_df)
225
+ return new_table
226
+
227
+ def union_table(self, another_table: _TupleTable) -> _TupleTable:
228
+ """
229
+ 将此表与另外一个表按照inner方式合并,返回合并后的table
230
+
231
+ :param another_table: 另外一个表
232
+ :type another_table: _TupleTable
233
+ :return: 合并后的表
234
+ :rtype: _TupleTable
235
+ """
236
+ for table in (self, another_table):
237
+ table.make_table_ready(ensure_unique=True)
238
+
239
+ common_columns: set[str] = set(self.column_name) & set(another_table.column_name)
240
+ if common_columns == set():
241
+ result_df = self.base_df.join(another_table.base_df, how='cross')
242
+ # 没有任何公共列,二者按照乘法原则合并
243
+ else:
244
+ result_df = self.base_df.join(another_table.base_df, on=list(common_columns), how='inner')
245
+ # 有公共列:二者按照"inner"方式合并
246
+
247
+ new_column_name = tuple(NameToObject.get_item_by_name(name) for name in result_df.columns)
248
+ new_table = _TupleTable(new_column_name)
249
+ new_table.set_base_df(result_df)
250
+ return new_table
251
+
252
+ def anti_join(self, another_table: _TupleTable) -> _TupleTable:
253
+ """
254
+ 移除掉此table中与another_table中相同的行,返回一个新的table
255
+ """
256
+ same_column_name = set(another_table.column_name) & set(self.column_name)
257
+ result_df = self.base_df.join(another_table.base_df, on=list(same_column_name), how='anti')
258
+ new_table = _TupleTable(self.raw_column_name)
259
+ new_table.set_base_df(result_df, is_unique=self._is_deduped)
260
+ return new_table
261
+
262
+ def copy(self) -> _TupleTable:
263
+ """
264
+ 复制此表
265
+ """
266
+ new_table = _TupleTable(self.raw_column_name)
267
+ new_table.set_base_df(self.base_df)
268
+ return new_table
269
+
270
+ def iter_rows(self) -> Generator[dict[Variable, Constant | CompoundTerm]]:
271
+ """
272
+ 获取所有行
273
+ :yield: 所有行
274
+ :rtype: Generator[tuple[Any, ...]]
275
+ """ # noqa: DOC402
276
+ for i in self.base_df.iter_rows(): # XXX: 似乎换rows等好一点
277
+ temp_dict: dict[Variable, Constant | CompoundTerm] = {}
278
+ for j in range(len(i)):
279
+ temp_dict[self.raw_column_name[j]] = NameToObject.get_item_by_name(i[j])
280
+ yield temp_dict
281
+
282
+ def make_table_ready(self, *, ensure_unique: bool = False) -> None:
283
+ """
284
+ 确保表已经准备好,实质就是调用内部的_merge_to_lazy_block方法
285
+ """
286
+ if not hasattr(self, "_base_df"):
287
+ self._base_df = pl.DataFrame(data=self._add_row_cache, schema=self._column_schema())
288
+ self._is_deduped = False
289
+ if ensure_unique and not self._is_deduped:
290
+ self._base_df = self._base_df.unique()
291
+ self._is_deduped = True
292
+ self.__dict__.pop('_np_list_store', None)
293
+
294
+ def clear(self) -> None:
295
+ """
296
+ 清空dataframe
297
+ """
298
+ self._add_row_cache = {}
299
+ self.__dict__.pop('_base_df', None)
300
+ self.__dict__.pop('_np_list_store', None)
301
+ self.__dict__.pop('table_represent', None)
302
+ self.__dict__.pop('raw_columns_name_str', None)
303
+ self._is_deduped = False
304
+
305
+ def add_row(self, row: dict[Variable, Constant | CompoundTerm]) -> None:
306
+ """
307
+ 添加一行
308
+ :param row: 一行数据
309
+ :type row: dict[Variable, Constant | CompoundTerm]
310
+ :raise: RuntimeError: 由于设计问题,目前add row实际上是进行缓存的,在被使用的时候将会生成一个真正的dataframe。而这之后是不可以继续
311
+ add row的。 # TODO: 后期修改为可以支持
312
+ """ # noqa: DOC501
313
+ if hasattr(self, "_base_df"):
314
+ raise RuntimeError("Cannot add rows after the base DataFrame is materialized (automatically when used to union or execute sth.).")
315
+ for k, v in row.items():
316
+ key_name = str(NameToObject.get_item_name(k))
317
+ var_name = NameToObject.get_item_name(v)
318
+ if key_name in self._add_row_cache:
319
+ self._add_row_cache[key_name].append(var_name)
320
+ else:
321
+ self._add_row_cache[key_name] = [var_name]
322
+
323
+ def add_column(self, new_columns: dict[Variable, list[Constant | CompoundTerm]]) -> _TupleTable:
324
+ """添加一列。TODO: 暂时还没有加入列对应的cache以实现lazy"""
325
+ self.make_table_ready()
326
+
327
+ base_df = self.base_df
328
+
329
+ series_to_add: list[pl.Series] = []
330
+ for var, values in new_columns.items():
331
+ col_name = NameToObject.get_item_name(var)
332
+ str_values = [NameToObject.get_item_name(v) for v in values]
333
+ series_to_add.append(pl.Series(name=col_name, values=str_values, dtype=self._smallest_unsigned_int_dtype()))
334
+
335
+ new_base_df = base_df.hstack(series_to_add)
336
+
337
+ new_raw_column_name = (*new_columns.keys(), *self.raw_column_name)
338
+
339
+ new_table = _TupleTable(new_raw_column_name)
340
+ new_table.set_base_df(new_base_df)
341
+ return new_table
342
+
343
+ @classmethod
344
+ def create_empty_table_with_emptyset(cls) -> _TupleTable:
345
+ df = pl.DataFrame([[]], orient="row")
346
+ table = _TupleTable(())
347
+ table.set_base_df(df)
348
+ return table
349
+
350
+ @property
351
+ def height(self) -> int:
352
+ """
353
+ 获取表的高度
354
+
355
+ :return: 表的高度
356
+ :rtype: int
357
+ """
358
+ return self.base_df.height
359
+
360
+ @property
361
+ def base_df(self) -> pl.DataFrame:
362
+ if not hasattr(self, '_base_df'):
363
+ self.make_table_ready()
364
+ if self._base_df.width == 0: # 没有任何列的空表直接to_numpy会报错,通过强行判断来避免这件事
365
+ # 这种没有任何列的空表来源于ConstantNode,一般grounding不会涉及到它们,但是pytest有可能涉及,因此这里再
366
+ # 强行判断一下
367
+ self._np_list_store = np.empty((0, 0))
368
+ else:
369
+ self._np_list_store = self._base_df.to_numpy()
370
+ return self._base_df
371
+
372
+ @property
373
+ def _np_list(self) -> NDArray[np.float64]:
374
+ """
375
+ 转换为numpy数组,主要用于取单列
376
+ """
377
+ if not hasattr(self, '_np_list_store'):
378
+ _ = self.base_df # 如果在触发base_df之前触发_np_list,则先触发一次base_bf来计算_np_list_store
379
+ return self._np_list_store
380
+
381
+ @property
382
+ def table_represent(self) -> list[dict[str, str]]:
383
+ """
384
+ 用于pytest,相当于将table转化为list[dict],同时将变量名和常量名转化为字符串
385
+
386
+ :return: 转化后的table
387
+ :rtype: list[dict[str, str]]
388
+ """
389
+ list_dict = self.base_df.unique(maintain_order=True).to_dicts()
390
+ return [{str(NameToObject.get_item_by_name(u)): str(NameToObject.get_item_by_name(v)) for u, v in s_dict.items()} for s_dict in list_dict]
391
+
392
+ def debug_summary(self, *, sample_size: int = 5) -> dict[str, Any]:
393
+ """
394
+ 返回用于日志的表摘要信息。
395
+ """
396
+ self.make_table_ready()
397
+ return {
398
+ "columns": [str(name) for name in self.raw_column_name],
399
+ "rows": self.height,
400
+ "sample": self.table_represent[:sample_size],
401
+ }
402
+
403
+ def unique_height(self) -> int:
404
+ """
405
+ 用于日志或调试输出的去重行数统计。
406
+ """
407
+ return len(self.table_represent)
408
+
409
+ @property
410
+ def raw_column_name(self) -> tuple[Any, ...]:
411
+ """
412
+ 获取原始列名(对象)
413
+
414
+ :return: 原始列名(对象)
415
+ :rtype: list[Any]
416
+ """
417
+ if not hasattr(self, "_raw_column_name"):
418
+ self._raw_column_name = tuple(NameToObject.get_item_by_name(u) for u in self.column_name)
419
+ return self._raw_column_name
420
+
421
+ @property
422
+ def raw_columns_name_str(self) -> list[str]:
423
+ """
424
+ 获取原始列名(字符串),用于绘图
425
+
426
+ :return: 原始列名(字符串)
427
+ :rtype: list[str]
428
+ """
429
+ return [str(NameToObject.get_item_by_name(u)) for u in self.column_name]
430
+
431
+ @staticmethod
432
+ def _smallest_unsigned_int_dtype() -> PolarsDataType:
433
+ max_val = NameToObject.len()
434
+
435
+ if max_val <= ((1 << 8) - 1):
436
+ return pl.UInt8
437
+ if max_val <= ((1 << 16) - 1):
438
+ return pl.UInt16
439
+ if max_val <= ((1 << 32) - 1):
440
+ return pl.UInt32
441
+ return pl.UInt64
442
+
443
+ def _column_schema(self) -> dict[str, PolarsDataType]:
444
+ return dict.fromkeys(self.column_name, self._smallest_unsigned_int_dtype())
@@ -0,0 +1,26 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING, Any
4
+
5
+ if TYPE_CHECKING:
6
+ # 1) Prefer the public module if/when Polars exposes it (polars.typing)
7
+ try:
8
+ from polars.typing import PolarsDataType
9
+ except Exception:
10
+ # 2) For now, fall back to the private module (polars._typing)
11
+ # This avoids importing the deprecated polars.type_aliases.
12
+ try:
13
+ from polars._typing import PolarsDataType
14
+
15
+ except Exception:
16
+ # 3) Last resort: define the alias
17
+ from polars.type_aliases import PolarsDataType as _PolarsDataType
18
+ import polars as pl
19
+
20
+ PolarsDataType = _PolarsDataType | pl.DataType
21
+ else:
22
+ # At runtime we don't need the type alias; keep a lightweight placeholder.
23
+ PolarsDataType = Any
24
+
25
+
26
+ __all__ = ['PolarsDataType']