kele 0.0.1a1__cp313-cp313-win32.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. kele/__init__.py +38 -0
  2. kele/_version.py +1 -0
  3. kele/config.py +243 -0
  4. kele/control/README_metrics.md +102 -0
  5. kele/control/__init__.py +20 -0
  6. kele/control/callback.py +255 -0
  7. kele/control/grounding_selector/__init__.py +5 -0
  8. kele/control/grounding_selector/_rule_strategies/README.md +13 -0
  9. kele/control/grounding_selector/_rule_strategies/__init__.py +24 -0
  10. kele/control/grounding_selector/_rule_strategies/_sequential_strategy.py +42 -0
  11. kele/control/grounding_selector/_rule_strategies/strategy_protocol.py +51 -0
  12. kele/control/grounding_selector/_selector_utils.py +123 -0
  13. kele/control/grounding_selector/_term_strategies/__init__.py +24 -0
  14. kele/control/grounding_selector/_term_strategies/_exhausted_strategy.py +34 -0
  15. kele/control/grounding_selector/_term_strategies/strategy_protocol.py +50 -0
  16. kele/control/grounding_selector/rule_selector.py +98 -0
  17. kele/control/grounding_selector/term_selector.py +89 -0
  18. kele/control/infer_path.py +306 -0
  19. kele/control/metrics.py +357 -0
  20. kele/control/status.py +286 -0
  21. kele/egg_equiv.pyd +0 -0
  22. kele/egg_equiv.pyi +11 -0
  23. kele/equality/README.md +8 -0
  24. kele/equality/__init__.py +4 -0
  25. kele/equality/_egg_equiv/src/lib.rs +267 -0
  26. kele/equality/_equiv_elem.py +67 -0
  27. kele/equality/_utils.py +36 -0
  28. kele/equality/equivalence.py +141 -0
  29. kele/executer/__init__.py +4 -0
  30. kele/executer/executing.py +139 -0
  31. kele/grounder/README.md +83 -0
  32. kele/grounder/__init__.py +17 -0
  33. kele/grounder/grounded_rule_ds/__init__.py +6 -0
  34. kele/grounder/grounded_rule_ds/_nodes/__init__.py +24 -0
  35. kele/grounder/grounded_rule_ds/_nodes/_assertion.py +353 -0
  36. kele/grounder/grounded_rule_ds/_nodes/_conn.py +116 -0
  37. kele/grounder/grounded_rule_ds/_nodes/_op.py +57 -0
  38. kele/grounder/grounded_rule_ds/_nodes/_root.py +71 -0
  39. kele/grounder/grounded_rule_ds/_nodes/_rule.py +119 -0
  40. kele/grounder/grounded_rule_ds/_nodes/_term.py +390 -0
  41. kele/grounder/grounded_rule_ds/_nodes/_tftable.py +15 -0
  42. kele/grounder/grounded_rule_ds/_nodes/_tupletable.py +444 -0
  43. kele/grounder/grounded_rule_ds/_nodes/_typing_polars.py +26 -0
  44. kele/grounder/grounded_rule_ds/grounded_class.py +461 -0
  45. kele/grounder/grounded_rule_ds/grounded_ds_utils.py +91 -0
  46. kele/grounder/grounded_rule_ds/rule_check.py +373 -0
  47. kele/grounder/grounding.py +118 -0
  48. kele/knowledge_bases/README.md +112 -0
  49. kele/knowledge_bases/__init__.py +6 -0
  50. kele/knowledge_bases/builtin_base/__init__.py +1 -0
  51. kele/knowledge_bases/builtin_base/builtin_concepts.py +13 -0
  52. kele/knowledge_bases/builtin_base/builtin_facts.py +43 -0
  53. kele/knowledge_bases/builtin_base/builtin_operators.py +105 -0
  54. kele/knowledge_bases/builtin_base/builtin_rules.py +14 -0
  55. kele/knowledge_bases/fact_base.py +158 -0
  56. kele/knowledge_bases/ontology_base.py +67 -0
  57. kele/knowledge_bases/rule_base.py +194 -0
  58. kele/main.py +464 -0
  59. kele/py.typed +0 -0
  60. kele/syntax/CONCEPT_README.md +117 -0
  61. kele/syntax/__init__.py +40 -0
  62. kele/syntax/_cnf_converter.py +161 -0
  63. kele/syntax/_sat_solver.py +116 -0
  64. kele/syntax/base_classes.py +1482 -0
  65. kele/syntax/connectives.py +20 -0
  66. kele/syntax/dnf_converter.py +145 -0
  67. kele/syntax/external.py +17 -0
  68. kele/syntax/sub_concept.py +87 -0
  69. kele/syntax/syntacticsugar.py +201 -0
  70. kele-0.0.1a1.dist-info/METADATA +166 -0
  71. kele-0.0.1a1.dist-info/RECORD +74 -0
  72. kele-0.0.1a1.dist-info/WHEEL +4 -0
  73. kele-0.0.1a1.dist-info/licenses/LICENSE +28 -0
  74. kele-0.0.1a1.dist-info/licenses/licensecheck.json +20 -0
kele/main.py ADDED
@@ -0,0 +1,464 @@
1
+ import logging
2
+ from collections.abc import Sequence
3
+ from typing import Literal, Any, ClassVar
4
+ from collections.abc import Mapping
5
+
6
+ from pydantic import BaseModel, ConfigDict
7
+
8
+ from kele.syntax import Assertion, _QuestionRule
9
+ from kele.config import init_config_logger, Config
10
+ from kele.control.grounding_selector import GroundingFlatTermWithWildCardSelector
11
+ from kele.control.status import InferenceStatus, QuerySolutionManager
12
+ from kele.executer import Executor
13
+ from kele.grounder import Grounder, GroundedRule, GroundedRuleDS
14
+ from kele.knowledge_bases import FactBase, RuleBase, load_ontologies
15
+ from kele.control.metrics import PhaseTimer, observe_counts, init_metrics, \
16
+ measure, end_run, start_run, inc_iter
17
+ from kele.syntax import FACT_TYPE, Rule, SankuManagementSystem, Question, Constant, CompoundTerm, Variable
18
+ from kele.equality import Equivalence
19
+ from kele.control import create_main_loop_manager, GroundingRuleSelector, InferencePath
20
+ from kele.control.infer_path import FactStep
21
+
22
+ logger = logging.getLogger(__name__)
23
+
24
+
25
+ class QueryStructure(BaseModel):
26
+ """Query structure used as input when calling the inference engine."""
27
+ premises: Sequence[Assertion]
28
+ question: Sequence[FACT_TYPE]
29
+
30
+ model_config: ClassVar = ConfigDict(
31
+ arbitrary_types_allowed=True,
32
+ extra="forbid",
33
+ )
34
+
35
+
36
+ class EngineRunResult(BaseModel):
37
+ """Return structure from the inference engine."""
38
+ model_config: ClassVar = ConfigDict(
39
+ arbitrary_types_allowed=True,
40
+ extra="forbid",
41
+ )
42
+
43
+ status: InferenceStatus
44
+ final_facts: list[FACT_TYPE]
45
+ fact_num: int
46
+ include_final_facts: bool
47
+ question: Question
48
+ iterations: int
49
+ execute_steps: int
50
+ terminated_by: Literal["initial_check", "executor", "main_loop", "unknown"]
51
+ solution_count: int # Number of solutions found
52
+
53
+ # Detailed solution bindings (empty list if save_solutions=False in config)
54
+ solutions: list[Mapping[Variable, Constant | CompoundTerm]]
55
+
56
+ @property
57
+ def has_solution(self) -> bool:
58
+ """Return whether any solution exists."""
59
+ return self.solution_count > 0
60
+
61
+ @property
62
+ def is_success(self) -> bool | None: # None means unknown/undetermined
63
+ """
64
+ - SUCCESS -> 成功
65
+ - FIXPOINT_REACHED 且有解 -> 成功
66
+ - MAX_* / EXTERNALLY_INTERRUPTED 且有解 -> 只能算部分成功
67
+ """
68
+ if self.status == InferenceStatus.SUCCESS:
69
+ return True
70
+
71
+ if self.status == InferenceStatus.FIXPOINT_REACHED:
72
+ return self.has_solution
73
+
74
+ return False
75
+
76
+ @property
77
+ def is_partial_success(self) -> bool | None: # None means unknown/undetermined
78
+ """
79
+ Has solutions, but stopped early due to resource limits or external interruption.
80
+ There may be more solutions; no solutions does not imply failure.
81
+ """
82
+ return self.has_solution and self.status in {
83
+ InferenceStatus.MAX_STEPS_REACHED,
84
+ InferenceStatus.MAX_ITERATIONS_REACHED,
85
+ InferenceStatus.EXTERNALLY_INTERRUPTED,
86
+ }
87
+
88
+ def log_message(self) -> str:
89
+ """Build a log-friendly message."""
90
+ msg = (f"Inference finished.\n"
91
+ f"status={self.status}, success={self.is_success}, partial_success=={self.is_partial_success}, "
92
+ f"terminated_by={self.terminated_by}, iterations={self.iterations}, facts_num={self.fact_num}, "
93
+ f"has_solution={self.has_solution}, solution_count={self.solution_count}")
94
+
95
+ # Show detailed solutions if stored
96
+ if self.solutions:
97
+ for i, sol in enumerate(self.solutions):
98
+ msg += f"\n Solution {i + 1}: " + ", ".join(f"{var.display_name}={val}" for var, val in sol.items())
99
+
100
+ return msg
101
+
102
+ def to_dict(self, *, include_final_facts: bool | None = None) -> dict[str, Any]:
103
+ """Serialize the result (solutions may be empty if save_solutions=False)."""
104
+ if include_final_facts is None:
105
+ include_final_facts = self.include_final_facts
106
+ if include_final_facts:
107
+ return self.model_dump()
108
+ return self.model_dump(exclude={"final_facts"})
109
+
110
+
111
+ class InferenceEngine:
112
+ """Inference engine main program that wraps grounding + executing."""
113
+
114
+ def __init__(self, # noqa: PLR0913
115
+ facts: Sequence[FACT_TYPE] | str | None,
116
+ rules: Sequence[Rule] | str | None,
117
+ *,
118
+ concept_dir_or_path: str = 'knowledge_bases/builtin_base/builtin_concepts.py',
119
+ operator_dir_or_path: str = 'knowledge_bases/builtin_base/builtin_operators.py',
120
+ user_config: Config | None = None,
121
+ config_file_path: str | None = None, # TODO: Consider moving custom log file into Config.
122
+ ) -> None:
123
+ """
124
+ Initialize the inference engine with initial facts and rules.
125
+ If facts and rules are None, use the default initial facts and rules.
126
+ """
127
+ self.args = init_config_logger(user_config, config_file_path)
128
+
129
+ def _get_source_info(obj: Sequence[FACT_TYPE] | Sequence[Rule] | str | None, name: str) -> str:
130
+ if isinstance(obj, str): # Note that str is also a Sequence.
131
+ return f"{name} from file: {obj}"
132
+ if isinstance(obj, Sequence):
133
+ return f"{name} from list, length={len(obj)}"
134
+ if obj is None:
135
+ return f"{name} is None"
136
+
137
+ raise TypeError(f"Unsupported type for obj: {type(obj).__name__}")
138
+
139
+ logger.info("Initializing inference engine: Load %s; Load %s",
140
+ _get_source_info(facts, "facts"),
141
+ _get_source_info(rules, "rules"))
142
+
143
+ self.equivalence = Equivalence(args=self.args)
144
+ sk_system_handler = SankuManagementSystem()
145
+ # TODO: Knowledge base declarations may require db_url from args; not implemented yet.
146
+
147
+ facts = self.args.path.fact_dir if facts is None else facts
148
+ rules = self.args.path.rule_dir if rules is None else rules
149
+
150
+ try:
151
+ load_ontologies(concept_dir_or_path=concept_dir_or_path,
152
+ operator_dir_or_path=operator_dir_or_path)
153
+
154
+ # selector
155
+ self.rule_selector = GroundingRuleSelector(strategy=self.args.strategy.grounding_rule_strategy,
156
+ question_rule_interval=self.args.strategy.question_rule_interval)
157
+
158
+ self.term_selector = GroundingFlatTermWithWildCardSelector(equivalence=self.equivalence,
159
+ args=self.args)
160
+
161
+ # knowledge base
162
+ self.fact_base = FactBase(initial_facts_or_dir_or_path=facts,
163
+ equivalence_handler=self.equivalence,
164
+ term_selector=self.term_selector,
165
+ sk_system_handler=sk_system_handler,
166
+ args=self.args.engineering)
167
+ # only one global fact_base is maintained.
168
+
169
+ self.rule_base = RuleBase(rules, args=self.args.engineering)
170
+
171
+ if logger.isEnabledFor(logging.DEBUG):
172
+ logger.debug("Fact base created with %s facts", len(self.fact_base.facts))
173
+
174
+ if logger.isEnabledFor(logging.DEBUG):
175
+ logger.debug("Rule base created with %s rules", len(self.rule_base.rules))
176
+ logger.info("Inference engine created successfully.")
177
+
178
+ except Exception:
179
+ logger.exception("Initialization failed: ontologies_path=(concept=%s, operator=%s)\n(facts=%s, rules=%s)",
180
+ concept_dir_or_path,
181
+ operator_dir_or_path,
182
+ facts[:2] if facts else None,
183
+ rules[:2] if rules else None)
184
+ raise
185
+
186
+ # Create the solution manager.
187
+ self.solution_manager = QuerySolutionManager(
188
+ interactive_query_mode=self.args.run.interactive_query_mode,
189
+ store_solutions=self.args.run.save_solutions
190
+ )
191
+
192
+ # Create the main loop manager.
193
+ self.main_loop_manager = create_main_loop_manager(
194
+ self.equivalence,
195
+ sk_system_handler,
196
+ max_iterations=self.args.run.iteration_limit
197
+ )
198
+
199
+ # Create inference path dealer
200
+ self.inference_path = InferencePath(self.args.run, self.equivalence)
201
+
202
+ # Create the Grounder.
203
+ grounded_structure = GroundedRuleDS(equivalence=self.equivalence, sk_system_handler=sk_system_handler,
204
+ args=self.args, inference_path=self.inference_path)
205
+ # FIXME: Extract DS into a standalone component.
206
+ self.grounder = Grounder(fact_base=self.fact_base,
207
+ rule_base=self.rule_base,
208
+ rule_selector=self.rule_selector,
209
+ term_selector=self.term_selector,
210
+ grounded_structure=grounded_structure,
211
+ rules_num_every_step=self.args.grounder.grounding_rules_num_every_step, # TODO: Can
212
+ # wrap these into args as a grounder config type; keep separate to avoid conflicts.
213
+ facts_num_for_each_rule=self.args.grounder.grounding_facts_num_for_each_rule)
214
+
215
+ self.executor = Executor(equivalence=self.equivalence,
216
+ sk_system_handler=sk_system_handler,
217
+ fact_base=self.fact_base,
218
+ main_loop_manager=self.main_loop_manager,
219
+ solution_manager=self.solution_manager,
220
+ inference_path=self.inference_path,
221
+ select_num=self.args.executor.executing_rule_num,
222
+ max_steps=self.args.executor.executing_max_steps)
223
+
224
+ # Track whether the engine has completed at least one inference run.
225
+ self._has_previous_run: bool = False
226
+
227
+ # Initialize metrics monitoring.
228
+ init_metrics(job="al_inference", grouping={"env": "dev"})
229
+
230
+ def _infer(self, question: Question) -> EngineRunResult:
231
+ """Run a full forward-chaining inference cycle."""
232
+ mod = __name__
233
+ # Initial snapshot.
234
+ observe_counts(facts_count=len(self.fact_base.get_facts()))
235
+
236
+ logger.info("InferenceEngine: Starting full inference...")
237
+
238
+ # Check whether the question can be answered before the loop starts.
239
+ initial_status, result = self._check_initial_status(question)
240
+ if initial_status is not None and result is not None:
241
+ return result
242
+
243
+ final_status: InferenceStatus | None = None
244
+ terminated_by: Literal['initial_check', 'executor', 'main_loop', 'unknown']
245
+
246
+ while True:
247
+ logger.info("Inference iteration %s...", self.main_loop_manager.iteration)
248
+
249
+ # Grounding process produce instantiated rules (based on current facts)
250
+ with PhaseTimer("grounding", module=mod):
251
+ grounded_rules: Sequence[GroundedRule] = self.grounder.grounding_process(question=question)
252
+ observe_counts(grounded_rules=len(grounded_rules), facts_count=len(grounded_rules))
253
+
254
+ selection_only_question_rules = self.grounder.selected_only_question_rules()
255
+ if not grounded_rules:
256
+ if not selection_only_question_rules:
257
+ inc_iter(mod)
258
+ logger.info("Inference iteration %s: No new groundings found.", self.main_loop_manager.iteration)
259
+ continue
260
+ if not selection_only_question_rules:
261
+ inc_iter(mod)
262
+
263
+ with PhaseTimer("execute", module=mod):
264
+ exec_status = self.executor.execute(grounded_rules=grounded_rules, question=question)
265
+
266
+ if exec_status.is_terminal_for_main_loop():
267
+ logger.result("Inference terminated due to executor: %s", exec_status.log_message()) # type: ignore[attr-defined]
268
+ terminated_by = "executor"
269
+ final_status = exec_status
270
+
271
+ logger.info("Executing: %i rules", len(grounded_rules)) # Placeholder: may be grounding/executing.
272
+ if logger.isEnabledFor(logging.DEBUG):
273
+ logger.debug("Executing rules: %s", [str(r.rule) for r in grounded_rules])
274
+
275
+ break
276
+
277
+ with PhaseTimer("main_check", module=mod): # Unified check for all termination conditions.
278
+ main_status = self.main_loop_manager.check_status([], question)
279
+ # main checks facts before the loop, executor checks new facts, so pass an empty fact list here.
280
+
281
+ if main_status.is_terminal_for_main_loop():
282
+ logger.result("Main loop terminating: %s", main_status.log_message()) # type: ignore[attr-defined]
283
+ terminated_by = "main_loop"
284
+ final_status = main_status
285
+ self._handle_fixpoint(final_status=final_status, question=question)
286
+ break
287
+
288
+ # Move to the next iteration.
289
+ if not selection_only_question_rules:
290
+ self.main_loop_manager.next_iteration()
291
+
292
+ facts = self.fact_base.get_facts()
293
+ observe_counts(facts_count=len(facts))
294
+ logger.result("Total facts when terminal: %s", len(facts)) # type: ignore[attr-defined]
295
+
296
+ solution_count = self.solution_manager.get_solution_count()
297
+ all_solutions = self.solution_manager.get_all_solutions()
298
+
299
+ include_final_facts = self.args.run.include_final_facts
300
+ final_facts = facts if include_final_facts else []
301
+ return EngineRunResult(
302
+ status=final_status,
303
+ solution_count=solution_count,
304
+ solutions=all_solutions,
305
+ final_facts=final_facts,
306
+ fact_num=len(facts),
307
+ include_final_facts=include_final_facts,
308
+ question=question,
309
+ iterations=self.main_loop_manager.iteration,
310
+ execute_steps=self.executor.executor_manager.step_num,
311
+ terminated_by=terminated_by,
312
+ )
313
+
314
+ @measure("infer_query", module="inference")
315
+ def infer_query(self, query: QueryStructure, *, resume: bool = False) -> EngineRunResult: # TODO: Between runs,
316
+ # EngineRunResult is still returned per call; last result can be treated as authoritative.
317
+ """
318
+ Public interface for the inference engine: accept QueryStructure and return results.
319
+ :param resume: Set True to continue a previous run after injecting new facts externally.
320
+ HACK: logs are split into two files, so timing stats will be inaccurate.
321
+ :raise: ValueError: The first call must have resume=False.
322
+ If resume=True is used before any inference run, ValueError is raised.
323
+ """ # noqa: DOC501
324
+ start_run(log_dir="metrics_logs") # Start a new metrics record per outer call.
325
+
326
+ try:
327
+ if not resume:
328
+ self._reset()
329
+ elif not self._has_previous_run:
330
+ # Attempting resume without any prior run is invalid.
331
+ raise ValueError(
332
+ "Invalid use of `resume=True` when"
333
+ "no previous inference run is available to continue from. "
334
+ "Please set resume=False when calling infer_query(...) first."
335
+ )
336
+
337
+ self._has_previous_run = True # At least one inference run completed.
338
+
339
+ premises = query.premises
340
+ question = Question(premises=premises, question=query.question) # TODO: Consider internal-only Question
341
+ # and avoid storing premises to reduce duplication with QueryStructure.
342
+
343
+ if not resume: # Redundant check, but keeps the flow clearer.
344
+ self._initial_engine(question=question, premises=premises)
345
+ else:
346
+ self.fact_base.add_facts(premises, check_free_variables=True)
347
+ self.main_loop_manager.initial_manager(normal_rules=None, resume=resume) # If continue_infer is added
348
+ # everywhere, this branch could be omitted.
349
+
350
+ engine_result = self._infer(question=question)
351
+ logger.result(engine_result.log_message()) # type: ignore[attr-defined]
352
+
353
+ return engine_result
354
+
355
+ finally:
356
+ end_run(extra_meta={
357
+ "facts_final": len(self.fact_base.get_facts()),
358
+ "rules_total": len(self.rule_base.rules),
359
+ })
360
+
361
+ def get_facts(self) -> list[FACT_TYPE]:
362
+ """Return facts used (selected by initial_fact_base) and all derived facts."""
363
+ return self.fact_base.get_facts()
364
+
365
+ def get_infer_path(self, terminal_fact: FACT_TYPE) -> tuple[list[FactStep], FACT_TYPE | None]:
366
+ """
367
+ get the infer path message, the message will be returned in a tuple,
368
+ the first element is the infer path message(a list of FactStep), the second element is the terminal fact.
369
+
370
+ :param terminal_fact: the terminal fact
371
+ :type terminal_fact: FACT_TYPE
372
+ :return: the infer path message(a list of FactStep), the terminal fact.
373
+ :rtype: tuple[list[FactStep], FACT_TYPE | None]
374
+ """
375
+ return self.inference_path.get_infer_graph(terminal_fact)
376
+
377
+ def generate_infer_path_graph(self, infer_path: list[FactStep]) -> None:
378
+ """
379
+ generate graph through infer path message
380
+ important: you should use "get_infer_path" method to get the infer path message first
381
+
382
+ :param infer_path: the infer path message(a list of FactStep)
383
+ :type infer_path: list[FactStep]
384
+ """
385
+ self.inference_path.gennerate_infer_path_graph(infer_path)
386
+
387
+ def _reset(self) -> None:
388
+ self.fact_base.reset_fact_base()
389
+ self.rule_base.reset_rule_base()
390
+
391
+ self.equivalence.clear()
392
+ self.grounder.reset()
393
+ self.executor.reset()
394
+
395
+ self.main_loop_manager.reset()
396
+ self.solution_manager.reset()
397
+ self.inference_path.reset()
398
+
399
+ # Reset resume flag.
400
+ self._has_previous_run = False
401
+
402
+ def _initial_engine(self, question: Question, premises: Sequence[Assertion]) -> None:
403
+ self.fact_base.initial_fact_base(question=question, topn=self.args.strategy.select_facts_num)
404
+ self.fact_base.add_facts(facts=premises, force_add=True, check_free_variables=True)
405
+
406
+ self.rule_base.initial_rule_base(question=question, topn=self.args.strategy.select_rules_num)
407
+
408
+ self._initialize_term_selector()
409
+
410
+ question_rules = self.rule_base.get_question_rules()
411
+ normal_rules = [r for r in self.rule_base.get_rules() if not isinstance(r, _QuestionRule)]
412
+
413
+ self.rule_selector.set_rules(normal_rules=normal_rules,
414
+ question_rules=question_rules) # HACK: Not linked to fact base.
415
+
416
+ self.main_loop_manager.initial_manager(normal_rules=normal_rules)
417
+
418
+ if self.args.run.trace:
419
+ for f in self.fact_base.get_facts():
420
+ self.inference_path.add_infer_edge(consequent=f) # FIXME: Keep change small for this PR; later use list
421
+ # types and revert to Assertion, or at least include a CNF split.
422
+
423
+ def _check_initial_status(self, question: Question) -> tuple[InferenceStatus | None, EngineRunResult | None]:
424
+ """Check whether the question can be answered before the loop starts."""
425
+ current_facts = self.fact_base.get_facts()
426
+ initial_status = self.main_loop_manager.check_status(current_facts, question)
427
+ if initial_status.is_terminal_for_main_loop():
428
+ logger.info("Initial check result: %s", initial_status.log_message())
429
+ include_final_facts = self.args.run.include_final_facts
430
+ final_facts = current_facts if include_final_facts else []
431
+ result = EngineRunResult(
432
+ status=initial_status,
433
+ solution_count=1,
434
+ solutions=[{}], # The question already exists in facts; treat {} as a "true" solution for display.
435
+ final_facts=final_facts,
436
+ fact_num=len(current_facts),
437
+ include_final_facts=include_final_facts,
438
+ question=question,
439
+ iterations=self.main_loop_manager.iteration,
440
+ execute_steps=self.executor.executor_manager.step_num,
441
+ terminated_by="initial_check",
442
+ )
443
+ return initial_status, result
444
+ return None, None
445
+
446
+ def _handle_fixpoint(self, final_status: InferenceStatus, question: Question) -> None:
447
+ """Handle actions when a FIXPOINT_REACHED status is detected."""
448
+ if final_status == InferenceStatus.FIXPOINT_REACHED:
449
+ self.rule_selector.set_at_fixpoint(at_fixpoint=True)
450
+ grounded_rules = self.grounder.grounding_process(question=question)
451
+ if grounded_rules:
452
+ self.executor.execute(grounded_rules=grounded_rules, question=question)
453
+
454
+ def _initialize_term_selector(self) -> None:
455
+ """Initialize term candidates from facts and rule/question ground terms."""
456
+ self.term_selector.update_terms(facts=self.fact_base.get_facts())
457
+
458
+ rules = self.rule_base.get_rules()
459
+ question_rules = self.rule_base.get_question_rules()
460
+ self.term_selector.update_terms_from_rules([*rules, *question_rules])
461
+
462
+
463
+ if __name__ == '__main__':
464
+ logger.info("Inference Engine Started")
kele/py.typed ADDED
File without changes
@@ -0,0 +1,117 @@
1
+ # 概念(Concept)子集关系与类型校验说明
2
+
3
+ 本引擎已支持 **概念的从属(子集)关系**,并在 Term(CompoundTerm)、Operator 等处的类型校验中自动考虑从属关系。
4
+
5
+ ## 一、为什么需要子集关系?
6
+ 在实际问题中,概念存在层级:如 `int ⊆ real`、`rational ⊆ real`。当某算子参数期望 `real`,传入 `int` 应被视为 **类型兼容**。
7
+
8
+ ## 二、核心特性
9
+ - **O(1) 查询**:内部维护传递闭包 `_ancestors_inclusive / _descendants_inclusive`,`c1 ⊆ c2` 判定近似 O(1)。
10
+ - **增量更新**:录入一条 `child ⊆ parent` 后,自动更新所有相关祖先/后代的闭包集合。
11
+ - **通配概念**:若存在 `FREEVARANY_CONCEPT`,与其比较视为兼容。
12
+
13
+ ## 三、用户友好录入方式(任选其一或组合)
14
+ 1) **单条**:
15
+ 由Concept类维护的函数
16
+ ```python
17
+ Concept.add_subsumption("int", "real")
18
+ ```
19
+ 2) **批量列表**:
20
+ 外部对add_subsumption的封装和调用
21
+ ```python
22
+ add_subsumptions([
23
+ ("int", "real"),
24
+ ("rational", "real"),
25
+ ])
26
+ ```
27
+ 3) **映射(子 -> 父列表)**:
28
+ 外部对add_subsumption的封装和调用
29
+ ```python
30
+ add_subsumptions_from_mapping({
31
+ "int": ["real"],
32
+ "rational": ["real"],
33
+ })
34
+ ```
35
+ 4) **字符串 DSL**(支持 `⊆` 与 `<=`,分隔:逗号/分号/换行):
36
+ 外部对add_subsumption的封装和调用
37
+ ```python
38
+ add_subsumptions_from_string("""
39
+ int ⊆ real, rational <= real;
40
+ positive_int <= int
41
+ """)
42
+ ```
43
+ 5) **构造时指定父概念**:
44
+ ```python
45
+ Concept("int", parents=["real"])
46
+ ```
47
+ 6) **链式设置父概念**:
48
+ ```python
49
+ Concept("int").set_parents(["real"])
50
+ ```
51
+
52
+ > 注:以上 API 都可混用,重复声明会被自动去重处理。
53
+
54
+ ## 四、Term/Operator 的类型校验如何生效?
55
+ - **Operator** 定义入参/出参概念:
56
+ ```python
57
+ plus = Operator("plus", input_concepts=["real", "real"], output_concept="real")
58
+ ```
59
+ - **CompoundTerm** 构造时校验:
60
+ - 若参数是 `Constant`,检查 `arg.belong_concepts ⊆ 期望概念`;
61
+ - 若参数是 `CompoundTerm`,检查 `arg.operator.output_concept ⊆ 期望概念`;
62
+ - 若为 `HashableAndStringable`(如原始字面量),会被转换为 `Constant` 再校验。
63
+ - **子集规则自动生效**:如果期望 `real`,传 `int` 或 `positive_int`(且 `positive_int ⊆ int ⊆ real`)均合法。
64
+
65
+ ## 五、示例
66
+ ```python
67
+ Real = Concept("real")
68
+ Int = Concept("int", parents=["real"])
69
+ PosInt = Concept("positive_int", parents=["int"])
70
+
71
+ to_real = Operator("to_real", input_concepts=["int"], output_concept="real")
72
+
73
+ # 期望 int,传 positive_int 也可(因 positive_int ⊆ int)
74
+ t1 = CompoundTerm("to_real", [Constant(5, "positive_int")]) # 通过
75
+
76
+ t2 = CompoundTerm("to_real", [Constant(5, "real")]) # 抛出异常
77
+
78
+ register_concept_relations("int ⊆ real")
79
+
80
+ # 试图注册逆向边将报错
81
+ try:
82
+ Concept.add_subsumption("real", "int")
83
+ except ValueError as e:
84
+ print("阻止互为子集:", e)
85
+ ```
86
+
87
+ ## 六、跨模块/函数注册入口(不仅限于 Concept 声明)
88
+
89
+ 除了在 `Concept` 上调用外,你也可以在任何地方通过以下入口录入子集关系:
90
+
91
+ ### 1) 全局函数
92
+ ```python
93
+ register_concept_subsumptions([("rational","real")])
94
+
95
+ register_concept_relations({
96
+ "positive_int": ["int"], # 映射:子 -> [父...]
97
+ "int": ["real"],
98
+ })
99
+
100
+ register_concept_relations("int ⊆ real; positive_int <= int") # 字符串 DSL
101
+ ```
102
+
103
+ ### 2) 装饰器(定义时注册)
104
+ ```python
105
+ @with_concept_relations("int ⊆ real; rational <= real")
106
+ def setup_domain(): ...
107
+ ```
108
+
109
+ ### 3) 上下文管理器(进入作用域时注册)
110
+ ```python
111
+ from base_classes import concept_relation_scope
112
+
113
+ with concept_relation_scope("""int ⊆ real
114
+ rational <= real"""):
115
+ # 在该作用域内立即可用(注册是全局幂等的)
116
+ ...
117
+ ```
@@ -0,0 +1,40 @@
1
+ """断言逻辑和推理引擎所需要的语法结构"""
2
+
3
+ # 导入 base_classes.py 中的核心类型
4
+ from .base_classes import (
5
+ Constant,
6
+ Variable,
7
+ Concept,
8
+ Operator,
9
+ CompoundTerm,
10
+ TERM_TYPE,
11
+ Assertion,
12
+ ConceptConstraintMismatchError,
13
+ Formula,
14
+ FACT_TYPE,
15
+ Rule,
16
+ Intro,
17
+ Question,
18
+ _QuestionRule,
19
+ FlatCompoundTerm,
20
+ FLATTERM_TYPE,
21
+ ATOM_TYPE,
22
+ GROUNDED_TYPE_FOR_UNIFICATION,
23
+ vf
24
+ )
25
+
26
+ # 导入 external.py 中的外部系统结构
27
+ from .external import (
28
+ SankuManagementSystem,
29
+ )
30
+
31
+ __all__ = [ # noqa: RUF022
32
+ # base_classes
33
+ "Constant", "Variable", "Concept", "Operator", "CompoundTerm", "TERM_TYPE",
34
+ "Assertion", "ConceptConstraintMismatchError", "Formula", "FACT_TYPE", "Rule", "Question", "_QuestionRule",
35
+ "FlatCompoundTerm", "FLATTERM_TYPE", "ATOM_TYPE", "GROUNDED_TYPE_FOR_UNIFICATION",
36
+ "vf", "Intro",
37
+
38
+ # external
39
+ "SankuManagementSystem",
40
+ ]