ripple-down-rules 0.6.1__py3-none-any.whl → 0.6.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ripple_down_rules/__init__.py +21 -1
- ripple_down_rules/datastructures/callable_expression.py +24 -7
- ripple_down_rules/datastructures/case.py +12 -11
- ripple_down_rules/datastructures/dataclasses.py +135 -14
- ripple_down_rules/datastructures/enums.py +29 -86
- ripple_down_rules/datastructures/field_info.py +177 -0
- ripple_down_rules/datastructures/tracked_object.py +208 -0
- ripple_down_rules/experts.py +141 -50
- ripple_down_rules/failures.py +4 -0
- ripple_down_rules/helpers.py +75 -8
- ripple_down_rules/predicates.py +97 -0
- ripple_down_rules/rdr.py +712 -96
- ripple_down_rules/rdr_decorators.py +164 -112
- ripple_down_rules/rules.py +351 -114
- ripple_down_rules/user_interface/gui.py +66 -41
- ripple_down_rules/user_interface/ipython_custom_shell.py +46 -9
- ripple_down_rules/user_interface/prompt.py +80 -60
- ripple_down_rules/user_interface/template_file_creator.py +13 -8
- ripple_down_rules/utils.py +537 -53
- {ripple_down_rules-0.6.1.dist-info → ripple_down_rules-0.6.6.dist-info}/METADATA +4 -1
- ripple_down_rules-0.6.6.dist-info/RECORD +28 -0
- ripple_down_rules-0.6.1.dist-info/RECORD +0 -24
- {ripple_down_rules-0.6.1.dist-info → ripple_down_rules-0.6.6.dist-info}/WHEEL +0 -0
- {ripple_down_rules-0.6.1.dist-info → ripple_down_rules-0.6.6.dist-info}/licenses/LICENSE +0 -0
- {ripple_down_rules-0.6.1.dist-info → ripple_down_rules-0.6.6.dist-info}/top_level.txt +0 -0
@@ -3,110 +3,187 @@ This file contains decorators for the RDR (Ripple Down Rules) framework. Where e
|
|
3
3
|
that can be used with any python function such that this function can benefit from the incremental knowledge acquisition
|
4
4
|
of the RDRs.
|
5
5
|
"""
|
6
|
+
import inspect
|
6
7
|
import os.path
|
8
|
+
from dataclasses import dataclass, field
|
7
9
|
from functools import wraps
|
10
|
+
from typing import get_origin
|
8
11
|
|
9
12
|
from typing_extensions import Callable, Optional, Type, Tuple, Dict, Any, Self, get_type_hints, List, Union, Sequence
|
10
13
|
|
11
|
-
from
|
12
|
-
from
|
13
|
-
from
|
14
|
-
from
|
14
|
+
from .datastructures.case import Case
|
15
|
+
from .datastructures.dataclasses import CaseQuery, CaseFactoryMetaData
|
16
|
+
from .experts import Expert, Human
|
17
|
+
from .failures import RDRLoadError
|
18
|
+
from .rdr import GeneralRDR
|
19
|
+
from .utils import get_type_from_type_hint
|
20
|
+
|
15
21
|
try:
|
16
|
-
from
|
22
|
+
from .user_interface.gui import RDRCaseViewer
|
17
23
|
except ImportError:
|
18
24
|
RDRCaseViewer = None
|
19
|
-
from
|
20
|
-
get_method_class_if_exists,
|
25
|
+
from .utils import get_method_args_as_dict, get_func_rdr_model_name, make_set, \
|
26
|
+
get_method_class_if_exists, make_list
|
27
|
+
from .helpers import create_case_from_method
|
21
28
|
|
22
29
|
|
30
|
+
@dataclass(unsafe_hash=True)
|
23
31
|
class RDRDecorator:
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
32
|
+
models_dir: str
|
33
|
+
"""
|
34
|
+
The directory to save the RDR models in.
|
35
|
+
"""
|
36
|
+
output_type: Tuple[Type, ...]
|
37
|
+
"""
|
38
|
+
The type(s) of the output produced by the RDR model (The type(s) of the queried attribute).
|
39
|
+
"""
|
40
|
+
mutual_exclusive: bool
|
41
|
+
"""
|
42
|
+
Whether the queried attribute is mutually exclusive (i.e. allows for only one possible value) or not.
|
43
|
+
"""
|
44
|
+
fit: bool = field(default=True)
|
45
|
+
"""
|
46
|
+
Whether to run in fitting mode and prompt the expert or just classify using existing rules.
|
47
|
+
"""
|
48
|
+
expert: Optional[Expert] = field(default=None)
|
49
|
+
"""
|
50
|
+
The expert instance, this is used by the rdr to prompt for answers.
|
51
|
+
"""
|
52
|
+
update_existing_rules: bool = field(default=True)
|
53
|
+
"""
|
54
|
+
When in fitting mode, whether to ask for answers for existing rules as well or not.
|
55
|
+
"""
|
56
|
+
package_name: Optional[str] = field(default=None)
|
57
|
+
"""
|
58
|
+
The name of the user python package where the RDR model will be saved, this is useful for generating relative
|
59
|
+
imports in the generated rdr model files.
|
60
|
+
"""
|
61
|
+
use_generated_classifier: bool = field(default=False)
|
62
|
+
"""
|
63
|
+
Whether to use the generated classifier files of the rdr model instead of the RDR instance itself, this is useful
|
64
|
+
when you want to debug inside the rules.
|
65
|
+
"""
|
66
|
+
ask_now: Callable[Dict[str, Any], bool] = field(default=lambda _: True)
|
67
|
+
"""
|
68
|
+
A user provided callable function that outputs a boolean indicating when to ask the expert for an answer. The input
|
69
|
+
to the `ask_now` function is a dictionary with the original function arguments, while arguments like `self` and
|
70
|
+
`cls` are passed as a special key `self_` or `cls_` respectively.
|
71
|
+
"""
|
72
|
+
fitting_decorator: Optional[Callable] = field(default=lambda f: f)
|
73
|
+
"""
|
74
|
+
A user provided decorator that wraps the `py:meth:ripple_down_rules.rdr.RippleDownRules.fit_case` method which is
|
75
|
+
used when in fitting mode, this is useful when you want special logic pre and post the fitting operation, you can
|
76
|
+
for example freeze your system during fitting such that you have a stable state that you can query and use while
|
77
|
+
writing and testing your answer/rule.
|
78
|
+
"""
|
79
|
+
generate_dot_file: bool = field(default=False)
|
80
|
+
"""
|
81
|
+
Whether to generate a dynamic dot file representing the state of the rule tree each time the rdr is queried, showing
|
82
|
+
which rules fired and which rules didn't get evaluated, ...etc.
|
83
|
+
"""
|
84
|
+
model_name: Optional[str] = field(default=None)
|
85
|
+
"""
|
86
|
+
The name of the rdr model, this gets auto generated from the function signature and the class/file it is contained
|
87
|
+
in.
|
88
|
+
"""
|
89
|
+
rdr: GeneralRDR = field(init=False, default=None)
|
90
|
+
"""
|
91
|
+
The ripple down rules instance of the decorator class.
|
92
|
+
"""
|
93
|
+
parsed_output_type: List[Type] = field(init=False, default_factory=list)
|
94
|
+
"""
|
95
|
+
The output of a post processing done on the output types, for example converting typing module types
|
96
|
+
(i.e. type hints) to python types.
|
97
|
+
"""
|
98
|
+
origin_type: Optional[Type] = field(init=False, default=None)
|
99
|
+
"""
|
100
|
+
The origin of the type hint of the attribute, useful in the case of not mutually exclusive attributes to map the
|
101
|
+
result to the specified container type (e.g. a list instead of a set which is the default container type for rdr
|
102
|
+
output).
|
103
|
+
"""
|
104
|
+
output_name: str = field(init=False, default='output_')
|
105
|
+
"""
|
106
|
+
This is used to refer to the output value of the decorated function, this is used as part of the case as input to
|
107
|
+
the rdr model, but is never used in the rule logic to prevent cycles from happening. The correct way to use the
|
108
|
+
output of an rdr is through refinement rules which happens automatically by the rdr prompting for refinements.
|
109
|
+
"""
|
110
|
+
_not_none_output_found: bool = field(init=False, default=False)
|
111
|
+
"""
|
112
|
+
This is a flag that indicates that a not None output for the rdr has been inferred, this is used to update the
|
113
|
+
generated dot file if it is set to `True`.
|
114
|
+
"""
|
115
|
+
case_factory_metadata: CaseFactoryMetaData = field(init=False, default_factory=CaseFactoryMetaData)
|
116
|
+
"""
|
117
|
+
Metadata that contains the case factory method, and the scenario that is being run during the case query.
|
118
|
+
"""
|
70
119
|
|
71
120
|
def decorator(self, func: Callable) -> Callable:
|
72
121
|
|
73
122
|
@wraps(func)
|
74
123
|
def wrapper(*args, **kwargs) -> Optional[Any]:
|
75
124
|
|
125
|
+
original_kwargs = {pname: p for pname, p in inspect.signature(func).parameters.items() if
|
126
|
+
p.default != inspect._empty}
|
127
|
+
for og_kwarg in original_kwargs:
|
128
|
+
if og_kwarg not in kwargs:
|
129
|
+
kwargs[og_kwarg] = original_kwargs[og_kwarg].default
|
130
|
+
|
76
131
|
if self.model_name is None:
|
77
132
|
self.initialize_rdr_model_name_and_load(func)
|
133
|
+
if self.origin_type is None and not self.mutual_exclusive:
|
134
|
+
self.origin_type = get_origin(get_type_hints(func)['return'])
|
135
|
+
if self.origin_type:
|
136
|
+
self.origin_type = get_type_from_type_hint(self.origin_type)
|
78
137
|
|
79
138
|
func_output = {self.output_name: func(*args, **kwargs)}
|
80
139
|
|
81
|
-
case, case_dict =
|
82
|
-
|
83
|
-
|
140
|
+
case, case_dict = create_case_from_method(func, func_output, *args, **kwargs)
|
141
|
+
|
142
|
+
@self.fitting_decorator
|
143
|
+
def fit():
|
84
144
|
if len(self.parsed_output_type) == 0:
|
85
145
|
self.parsed_output_type = self.parse_output_type(func, self.output_type, *args)
|
86
146
|
if self.expert is None:
|
87
|
-
self.expert = Human(
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
147
|
+
self.expert = Human(answers_save_path=self.models_dir + f'/{self.model_name}/expert_answers')
|
148
|
+
case_query = self.create_case_query_from_method(
|
149
|
+
func, func_output,
|
150
|
+
self.parsed_output_type,
|
151
|
+
self.mutual_exclusive,
|
152
|
+
args, kwargs,
|
153
|
+
case=case, case_dict=case_dict,
|
154
|
+
scenario=self.case_factory_metadata.scenario,
|
155
|
+
this_case_target_value=self.case_factory_metadata.this_case_target_value)
|
93
156
|
output = self.rdr.fit_case(case_query, expert=self.expert,
|
94
|
-
update_existing_rules=self.update_existing_rules
|
95
|
-
|
157
|
+
update_existing_rules=self.update_existing_rules)
|
158
|
+
return output
|
159
|
+
|
160
|
+
if self.fit and not self.use_generated_classifier and self.ask_now(case_dict):
|
161
|
+
output = fit()
|
96
162
|
else:
|
97
163
|
if self.use_generated_classifier:
|
98
164
|
if self.generated_classifier is None:
|
99
|
-
model_path = os.path.join(self.
|
165
|
+
model_path = os.path.join(self.models_dir, self.model_name)
|
100
166
|
self.generated_classifier = self.rdr.get_rdr_classifier_from_python_file(model_path)
|
101
167
|
output = self.generated_classifier(case)
|
102
168
|
else:
|
103
169
|
output = self.rdr.classify(case)
|
170
|
+
if self.generate_dot_file:
|
171
|
+
eval_rule_tree = self.rdr.get_evaluated_rule_tree()
|
172
|
+
if not self._not_none_output_found or (eval_rule_tree and len(eval_rule_tree) > 1):
|
173
|
+
self.rdr.render_evaluated_rule_tree(self.models_dir + f'/{self.model_name}',
|
174
|
+
show_full_tree=True)
|
175
|
+
if eval_rule_tree and len(eval_rule_tree) > 1:
|
176
|
+
self._not_none_output_found = True
|
104
177
|
|
105
178
|
if self.output_name in output:
|
179
|
+
if self.origin_type == list:
|
180
|
+
return make_list(output[self.output_name])
|
106
181
|
return output[self.output_name]
|
107
182
|
else:
|
108
183
|
return func_output[self.output_name]
|
109
184
|
|
185
|
+
wrapper._rdr_decorator_instance = self
|
186
|
+
|
110
187
|
return wrapper
|
111
188
|
|
112
189
|
@staticmethod
|
@@ -114,7 +191,11 @@ class RDRDecorator:
|
|
114
191
|
func_output: Dict[str, Any],
|
115
192
|
output_type: Sequence[Type],
|
116
193
|
mutual_exclusive: bool,
|
117
|
-
|
194
|
+
func_args: Tuple[Any, ...], func_kwargs: Dict[str, Any],
|
195
|
+
case: Optional[Case] = None,
|
196
|
+
case_dict: Optional[Dict[str, Any]] = None,
|
197
|
+
scenario: Optional[Callable] = None,
|
198
|
+
this_case_target_value: Optional[Any] = None,) -> CaseQuery:
|
118
199
|
"""
|
119
200
|
Create a CaseQuery from the function and its arguments.
|
120
201
|
|
@@ -122,42 +203,28 @@ class RDRDecorator:
|
|
122
203
|
:param func_output: The output of the function as a dictionary, where the key is the output name.
|
123
204
|
:param output_type: The type of the output as a sequence of types.
|
124
205
|
:param mutual_exclusive: If True, the output types are mutually exclusive.
|
125
|
-
:param
|
126
|
-
:param
|
206
|
+
:param func_args: The positional arguments of the function.
|
207
|
+
:param func_kwargs: The keyword arguments of the function.
|
208
|
+
:param case: The case to create.
|
209
|
+
:param case_dict: The dictionary of the case.
|
210
|
+
:param scenario: The scenario that produced the given case.
|
211
|
+
:param this_case_target_value: The target value for the case.
|
127
212
|
:return: A CaseQuery object representing the case.
|
128
213
|
"""
|
129
214
|
output_type = make_set(output_type)
|
130
|
-
case
|
215
|
+
if case is None or case_dict is None:
|
216
|
+
case, case_dict = create_case_from_method(func, func_output, *func_args, **func_kwargs)
|
131
217
|
scope = func.__globals__
|
132
218
|
scope.update(case_dict)
|
133
219
|
func_args_type_hints = get_type_hints(func)
|
134
220
|
output_name = list(func_output.keys())[0]
|
135
221
|
func_args_type_hints.update({output_name: Union[tuple(output_type)]})
|
136
222
|
return CaseQuery(case, output_name, tuple(output_type),
|
137
|
-
mutual_exclusive, scope=scope,
|
223
|
+
mutual_exclusive, scope=scope, scenario=scenario, this_case_target_value=this_case_target_value,
|
138
224
|
is_function=True, function_args_type_hints=func_args_type_hints)
|
139
225
|
|
140
|
-
@staticmethod
|
141
|
-
def create_case_from_method(func: Callable,
|
142
|
-
func_output: Dict[str, Any],
|
143
|
-
*args, **kwargs) -> Tuple[Case, Dict[str, Any]]:
|
144
|
-
"""
|
145
|
-
Create a Case from the function and its arguments.
|
146
|
-
|
147
|
-
:param func: The function to create a case from.
|
148
|
-
:param func_output: A dictionary containing the output of the function, where the key is the output name.
|
149
|
-
:param args: The positional arguments of the function.
|
150
|
-
:param kwargs: The keyword arguments of the function.
|
151
|
-
:return: A Case object representing the case.
|
152
|
-
"""
|
153
|
-
case_dict = get_method_args_as_dict(func, *args, **kwargs)
|
154
|
-
case_dict.update(func_output)
|
155
|
-
case_name = get_func_rdr_model_name(func)
|
156
|
-
return Case(dict, id(case_dict), case_name, case_dict, **case_dict), case_dict
|
157
|
-
|
158
226
|
def initialize_rdr_model_name_and_load(self, func: Callable) -> None:
|
159
|
-
|
160
|
-
self.model_name = str_to_snake_case(model_file_name)
|
227
|
+
self.model_name = get_func_rdr_model_name(func, include_file_name=True)
|
161
228
|
self.load()
|
162
229
|
|
163
230
|
@staticmethod
|
@@ -175,28 +242,13 @@ class RDRDecorator:
|
|
175
242
|
parsed_output_type.append(ot)
|
176
243
|
return parsed_output_type
|
177
244
|
|
178
|
-
def save(self):
|
179
|
-
"""
|
180
|
-
Save the RDR model to the specified directory.
|
181
|
-
"""
|
182
|
-
self.rdr.save(self.rdr_models_dir, self.model_name, package_name=self.package_name)
|
183
|
-
|
184
245
|
def load(self):
|
185
246
|
"""
|
186
|
-
Load the RDR model from the specified directory.
|
187
|
-
"""
|
188
|
-
self.rdr = None
|
189
|
-
if self.model_name is not None:
|
190
|
-
model_path = os.path.join(self.rdr_models_dir, self.model_name + f"/rdr_metadata/{self.model_name}.json")
|
191
|
-
if os.path.exists(os.path.join(self.rdr_models_dir, model_path)):
|
192
|
-
self.rdr = GeneralRDR.load(self.rdr_models_dir, self.model_name, package_name=self.package_name)
|
193
|
-
self.rdr.set_viewer(self.viewer)
|
194
|
-
if self.rdr is None:
|
195
|
-
self.rdr = GeneralRDR(save_dir=self.rdr_models_dir, model_name=self.model_name,
|
196
|
-
viewer=self.viewer)
|
197
|
-
|
198
|
-
def update_from_python(self):
|
247
|
+
Load the RDR model from the specified directory, otherwise create a new one.
|
199
248
|
"""
|
200
|
-
|
201
|
-
|
202
|
-
|
249
|
+
self.rdr = GeneralRDR(save_dir=self.models_dir, model_name=self.model_name)
|
250
|
+
|
251
|
+
|
252
|
+
def fit_rdr_func(scenario: Callable, rdr_decorated_func: Callable, *func_args, **func_kwargs) -> None:
|
253
|
+
rdr_decorated_func._rdr_decorator_instance.case_factory_metadata = CaseFactoryMetaData(scenario=scenario)
|
254
|
+
rdr_decorated_func(*func_args, **func_kwargs)
|