user-simulator 0.1.1__py3-none-any.whl → 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- metamorphic/__init__.py +9 -0
- metamorphic/results.py +62 -0
- metamorphic/rule_utils.py +482 -0
- metamorphic/rules.py +231 -0
- metamorphic/tests.py +83 -0
- metamorphic/text_comparison_utils.py +31 -0
- technologies/__init__.py +0 -0
- technologies/chatbot_connectors.py +567 -0
- technologies/chatbots.py +80 -0
- technologies/taskyto.py +110 -0
- user_sim/cli/sensei_chat.py +1 -1
- user_sim/core/role_structure.py +1 -1
- user_sim/handlers/pdf_parser_module.py +1 -1
- user_sim/utils/register_management.py +1 -1
- {user_simulator-0.1.1.dist-info → user_simulator-0.1.2.dist-info}/METADATA +1 -1
- {user_simulator-0.1.1.dist-info → user_simulator-0.1.2.dist-info}/RECORD +20 -10
- user_simulator-0.1.2.dist-info/entry_points.txt +6 -0
- user_simulator-0.1.2.dist-info/top_level.txt +3 -0
- user_simulator-0.1.1.dist-info/entry_points.txt +0 -6
- user_simulator-0.1.1.dist-info/top_level.txt +0 -1
- {user_simulator-0.1.1.dist-info → user_simulator-0.1.2.dist-info}/WHEEL +0 -0
- {user_simulator-0.1.1.dist-info → user_simulator-0.1.2.dist-info}/licenses/LICENSE.txt +0 -0
metamorphic/rules.py
ADDED
@@ -0,0 +1,231 @@
|
|
1
|
+
from pydantic import BaseModel, Field, model_validator
|
2
|
+
from typing import Optional, List, Any
|
3
|
+
from types import SimpleNamespace
|
4
|
+
|
5
|
+
from . import get_filtered_tests, empty_filtered_tests
|
6
|
+
from .rule_utils import *
|
7
|
+
|
8
|
+
# Do not remove this import, it is used to dynamically import the functions
|
9
|
+
from .rule_utils import filtered_tests, _conversation_length, extract_float, _only_talks_about
|
10
|
+
from .rule_utils import _utterance_index, _chatbot_returns, _repeated_answers, _data_collected, _missing_slots, _responds_in_same_language
|
11
|
+
from .rule_utils import _responds_in_same_language, semantic_content
|
12
|
+
|
13
|
+
from metamorphic.tests import Test
|
14
|
+
|
15
|
+
|
16
|
+
class Rule(BaseModel):
|
17
|
+
name: str
|
18
|
+
description: str
|
19
|
+
conversations: int = 1
|
20
|
+
active: Optional[bool] = True
|
21
|
+
when: Optional[str] = "True"
|
22
|
+
if_: Optional[str] = Field("True", alias="if")
|
23
|
+
then: str
|
24
|
+
yields: Optional[str] = None
|
25
|
+
|
26
|
+
@model_validator(mode='before')
|
27
|
+
@classmethod
|
28
|
+
def check_aliases(cls, values: Any) -> Any:
|
29
|
+
if 'oracle' in values: # Handle 'oracle' or 'then' interchangeably
|
30
|
+
values['then'] = values.pop('oracle')
|
31
|
+
if 'conversations' in values and values['conversations'] == 'all': # handle global rules ('all')
|
32
|
+
values['conversations'] = -1
|
33
|
+
return values
|
34
|
+
|
35
|
+
def test(self, tests: List[Test], verbose: bool = False) -> dict:
|
36
|
+
print(f" - Checking rule {self.name} [conversations: {self.conversations if self.conversations!=-1 else 'all'}]")
|
37
|
+
if self.conversations == 1:
|
38
|
+
return self.__property_test(tests, verbose)
|
39
|
+
elif self.conversations == -1: # global rules
|
40
|
+
return self.__global_test(tests, verbose)
|
41
|
+
else: # by now we assume just 2 conversations...
|
42
|
+
return self.__metamorphic_test(tests, verbose)
|
43
|
+
|
44
|
+
def __global_test(self, tests: List[Test], verbose: bool) -> dict:
|
45
|
+
results = {'pass': [], 'fail': [], 'not_applicable': []}
|
46
|
+
# filter the conversations, to select only those satisfying when and if
|
47
|
+
empty_filtered_tests()
|
48
|
+
filtered = get_filtered_tests()
|
49
|
+
for test in tests:
|
50
|
+
test_dict = test.to_dict()
|
51
|
+
conv = [SimpleNamespace(**test_dict)]
|
52
|
+
test_dict['conv'] = conv
|
53
|
+
test_dict.update(util_functions_to_dict())
|
54
|
+
if self.applies(test_dict) and self.if_eval(test_dict):
|
55
|
+
filtered.append(test)
|
56
|
+
else: # does not apply
|
57
|
+
results['not_applicable'].append(test.file_name)
|
58
|
+
if verbose:
|
59
|
+
print(f" - On file {test.file_name}")
|
60
|
+
print(f" -> Does not apply.")
|
61
|
+
|
62
|
+
try:
|
63
|
+
if self.then_eval(test_dict):
|
64
|
+
results['pass'].append(filtered)
|
65
|
+
if verbose:
|
66
|
+
print(f" - On files {', '.join([test.file_name for test in filtered])}")
|
67
|
+
print(f" -> Satisfied!")
|
68
|
+
else:
|
69
|
+
results['fail'].append(filtered)
|
70
|
+
except Exception:
|
71
|
+
results['not_applicable'].append(filtered)
|
72
|
+
if verbose:
|
73
|
+
print(f" - On files {', '.join([test.file_name for test in filtered])}")
|
74
|
+
print(f" -> Satisfied!")
|
75
|
+
|
76
|
+
return results
|
77
|
+
|
78
|
+
def __property_test(self, tests: List[Test], verbose: bool) -> dict:
|
79
|
+
results = {'pass': [], 'fail': [], 'not_applicable': []}
|
80
|
+
for test in tests:
|
81
|
+
test_dict = test.to_dict()
|
82
|
+
conv = [SimpleNamespace(**test_dict)]
|
83
|
+
test_dict['conv'] = conv
|
84
|
+
test_dict.update(util_functions_to_dict())
|
85
|
+
if verbose:
|
86
|
+
print(f" - On file {test.file_name}")
|
87
|
+
if self.applies(test_dict):
|
88
|
+
if self.if_eval(test_dict):
|
89
|
+
try:
|
90
|
+
return_value = self.then_eval(test_dict)
|
91
|
+
except Exception:
|
92
|
+
self.__handle_not_applicable(verbose, results, test)
|
93
|
+
continue
|
94
|
+
if return_value == True: # can be a boolean or another value to signal an error
|
95
|
+
self.__handle_pass(verbose, results, test)
|
96
|
+
else:
|
97
|
+
self.__handle_fail(verbose, results, return_value, test_dict, test)
|
98
|
+
else:
|
99
|
+
self.__handle_not_applicable(verbose, results, test)
|
100
|
+
else:
|
101
|
+
self.__handle_not_applicable(verbose, results, test)
|
102
|
+
return results
|
103
|
+
|
104
|
+
|
105
|
+
def __handle_not_applicable(self, verbose, results, *tests):
|
106
|
+
if len(tests)==1:
|
107
|
+
results['not_applicable'].append(tests[0].file_name)
|
108
|
+
else:
|
109
|
+
results['not_applicable'].append(tuple(test.file_name for test in tests))
|
110
|
+
if verbose: print(f" -> Does not apply.")
|
111
|
+
|
112
|
+
def __handle_pass(self, verbose, results, *tests):
|
113
|
+
if len(tests)==1:
|
114
|
+
results['pass'].append(tests[0].file_name)
|
115
|
+
else:
|
116
|
+
results['pass'].append(tuple(test.file_name for test in tests))
|
117
|
+
if verbose:
|
118
|
+
print(f" -> Satisfied!")
|
119
|
+
|
120
|
+
def __handle_fail(self, verbose, results, return_value, test_dict, *tests):
|
121
|
+
if len(tests)==1:
|
122
|
+
results['fail'].append(tests[0].file_name)
|
123
|
+
else:
|
124
|
+
results['fail'].append(tuple(test.file_name for test in tests))
|
125
|
+
if verbose:
|
126
|
+
message = ""
|
127
|
+
if self.yields is not None:
|
128
|
+
message = ". "+self.yield_eval(test_dict)
|
129
|
+
if return_value != False: # return_value can be a boolean or something else
|
130
|
+
print(f" -> NOT Satisfied!. Reason: {return_value}{message}.")
|
131
|
+
else:
|
132
|
+
print(f" -> NOT Satisfied!. Reason: oracle violated{message}.")
|
133
|
+
|
134
|
+
def __metamorphic_test(self, tests: List[Test], verbose: bool) -> dict:
|
135
|
+
results = {'pass': [], 'fail': [], 'not_applicable': []}
|
136
|
+
for test1 in tests:
|
137
|
+
test_dict1 = test1.to_dict()
|
138
|
+
sns = SimpleNamespace(**test_dict1)
|
139
|
+
conv = [sns, sns]
|
140
|
+
test_dict = {'conv': conv, 'interaction': []} # just add a dummy interaction variable
|
141
|
+
test_dict.update(util_functions_to_dict())
|
142
|
+
for test2 in tests:
|
143
|
+
if test1 == test2:
|
144
|
+
continue
|
145
|
+
test_dict2 = test2.to_dict()
|
146
|
+
conv[1] = SimpleNamespace(**test_dict2)
|
147
|
+
if verbose:
|
148
|
+
print(f" - On files: {test1.file_name}, {test2.file_name}")
|
149
|
+
if self.applies(test_dict):
|
150
|
+
if self.if_eval(test_dict):
|
151
|
+
try:
|
152
|
+
return_value = self.then_eval(test_dict)
|
153
|
+
except Exception:
|
154
|
+
self.__handle_fail(verbose, results, return_value, test_dict, test1, test2)
|
155
|
+
continue
|
156
|
+
if return_value == True:
|
157
|
+
self.__handle_pass(verbose, results, test1, test2)
|
158
|
+
else:
|
159
|
+
self.__handle_fail(verbose, results, return_value, test_dict, test1, test2)
|
160
|
+
else:
|
161
|
+
self.__handle_not_applicable(verbose, results, test1, test2)
|
162
|
+
else:
|
163
|
+
self.__handle_not_applicable(verbose, results, test1, test2)
|
164
|
+
return results
|
165
|
+
|
166
|
+
def applies(self, test_dict: dict):
|
167
|
+
try:
|
168
|
+
return eval(self.when, test_dict)
|
169
|
+
except Exception:
|
170
|
+
return False
|
171
|
+
|
172
|
+
def if_eval(self, test_dict: dict):
|
173
|
+
try:
|
174
|
+
return eval(self.if_, test_dict)
|
175
|
+
except Exception:
|
176
|
+
return False
|
177
|
+
|
178
|
+
def then_eval(self, test_dict: dict):
|
179
|
+
code = f"""
|
180
|
+
def _eval(**kwargs):
|
181
|
+
# unpack parameters
|
182
|
+
interaction = kwargs['interaction']
|
183
|
+
conv = kwargs['conv']
|
184
|
+
{self.__unpack_dict(test_dict)}
|
185
|
+
|
186
|
+
#wrappers for functions with implicit parameters
|
187
|
+
{self.__wrapper_functions()}
|
188
|
+
|
189
|
+
return {self.then}
|
190
|
+
"""
|
191
|
+
local_namespace = {}
|
192
|
+
exec(code, globals(), local_namespace)
|
193
|
+
self._eval = local_namespace['_eval']
|
194
|
+
return self._eval(**test_dict)
|
195
|
+
#return eval(self.then, test_dict)
|
196
|
+
|
197
|
+
def yield_eval(self, test_dict: dict):
|
198
|
+
code = f"""
|
199
|
+
def _eval(**kwargs):
|
200
|
+
# unpack parameters
|
201
|
+
interaction = kwargs['interaction']
|
202
|
+
conv = kwargs['conv']
|
203
|
+
{self.__unpack_dict(test_dict)}
|
204
|
+
|
205
|
+
#wrappers for functions with implicit parameters
|
206
|
+
{self.__wrapper_functions()}
|
207
|
+
|
208
|
+
return {self.yields}
|
209
|
+
"""
|
210
|
+
local_namespace = {}
|
211
|
+
|
212
|
+
exec(code, globals(), local_namespace)
|
213
|
+
self._eval = local_namespace['_eval']
|
214
|
+
try:
|
215
|
+
return str(self._eval(**test_dict))
|
216
|
+
except Exception:
|
217
|
+
return ""
|
218
|
+
|
219
|
+
def __wrapper_functions(self):
|
220
|
+
result = "\n".join(func for func in util_to_wrapper_dict().values())
|
221
|
+
return result
|
222
|
+
|
223
|
+
def __unpack_dict(self, dict):
|
224
|
+
reserved = ['conv', 'interaction', '__builtins__']
|
225
|
+
reserved += util_functions_to_dict().keys()
|
226
|
+
code = ""
|
227
|
+
for key, value in dict.items():
|
228
|
+
if key in reserved:
|
229
|
+
continue
|
230
|
+
code += " " + key + f"= kwargs['{key}']\n"
|
231
|
+
return code
|
metamorphic/tests.py
ADDED
@@ -0,0 +1,83 @@
|
|
1
|
+
from pydantic import BaseModel, Field
|
2
|
+
from typing import List, Optional
|
3
|
+
|
4
|
+
|
5
|
+
class Test(BaseModel):
|
6
|
+
ask_about: list
|
7
|
+
conversation: list
|
8
|
+
data_output: Optional[list] = []
|
9
|
+
interaction: Optional[list] = []
|
10
|
+
language: Optional[str] = None
|
11
|
+
serial: str
|
12
|
+
file_name: Optional[str] = None
|
13
|
+
conversation_time: Optional[str] = None
|
14
|
+
errors: Optional[list] = []
|
15
|
+
assistant_times: Optional[list] = []
|
16
|
+
|
17
|
+
@staticmethod
|
18
|
+
def build_test(file, documents):
|
19
|
+
test_metadata = next(documents) # unpack the 1st YAML document: meta_data
|
20
|
+
test = Test(**test_metadata)
|
21
|
+
test.file_name = file
|
22
|
+
times_doc = next(documents)
|
23
|
+
test.conversation_time = times_doc['conversation time'] # 2nd YAML document is time
|
24
|
+
if 'assistant response time' in times_doc:
|
25
|
+
test.assistant_times = times_doc['assistant response time']
|
26
|
+
test.interaction = next(documents)['interaction'] # 3rd YAML document is the conversation
|
27
|
+
return test
|
28
|
+
|
29
|
+
def to_dict(self):
|
30
|
+
variable_dict = self.__get_ask_about_dict()
|
31
|
+
variable_dict.update(self.__get_parameters_dict(self.conversation, 'conversation'))
|
32
|
+
variable_dict.update(self.__get_parameters_dict(self.data_output, 'data_output'))
|
33
|
+
variable_dict.update(self.__get_interactions_dict(self.interaction))
|
34
|
+
variable_dict.update({'data_output': self.data_output})
|
35
|
+
#print(f"Dict = {variable_dict}")
|
36
|
+
return variable_dict
|
37
|
+
|
38
|
+
|
39
|
+
def __get_interactions_dict(self, interactions_dict):
|
40
|
+
"""
|
41
|
+
return a dictionary with, the interactions, the chatbot_phrases and the user_phrases
|
42
|
+
:param interactions_dict:
|
43
|
+
:return:
|
44
|
+
"""
|
45
|
+
clean_dict = dict()
|
46
|
+
clean_dict.update({'interaction': interactions_dict})
|
47
|
+
chatbot_phrases = []
|
48
|
+
for phrase in interactions_dict:
|
49
|
+
if 'Assistant' in phrase:
|
50
|
+
chatbot_phrases.append(phrase['Assistant'])
|
51
|
+
clean_dict.update({'chatbot_phrases': chatbot_phrases})
|
52
|
+
user_phrases = []
|
53
|
+
for phrase in interactions_dict:
|
54
|
+
if 'User' in phrase:
|
55
|
+
user_phrases.append(phrase['User'])
|
56
|
+
clean_dict.update({'user_phrases': user_phrases})
|
57
|
+
return clean_dict
|
58
|
+
|
59
|
+
|
60
|
+
def __get_ask_about_dict(self):
|
61
|
+
clean_dict = dict()
|
62
|
+
for item in self.ask_about:
|
63
|
+
if isinstance(item, dict):
|
64
|
+
for key in item:
|
65
|
+
clean_dict[key] = item[key]
|
66
|
+
return clean_dict
|
67
|
+
|
68
|
+
def __get_parameters_dict(self, attribute, name):
|
69
|
+
clean_dict = dict()
|
70
|
+
for item in attribute:
|
71
|
+
if isinstance(item, dict):
|
72
|
+
clean_dict.update(self.__flatten_dict(name, item))
|
73
|
+
return clean_dict
|
74
|
+
|
75
|
+
def __flatten_dict(self, name, map):
|
76
|
+
flatten_dict = dict()
|
77
|
+
for key in map:
|
78
|
+
if not isinstance(map[key], dict):
|
79
|
+
flatten_dict[name + '_' + key] = map[key]
|
80
|
+
flatten_dict[key] = map[key]
|
81
|
+
else:
|
82
|
+
flatten_dict.update(self.__flatten_dict(name + '_' + key, map[key]))
|
83
|
+
return flatten_dict
|
@@ -0,0 +1,31 @@
|
|
1
|
+
from difflib import SequenceMatcher
|
2
|
+
from sklearn.feature_extraction.text import TfidfVectorizer
|
3
|
+
from sklearn.metrics.pairwise import cosine_similarity
|
4
|
+
|
5
|
+
def sequence_similarity(text1, text2):
|
6
|
+
return SequenceMatcher(None, text1, text2).ratio()
|
7
|
+
|
8
|
+
def exact_similarity(text1, text2):
|
9
|
+
if text1 == text2:
|
10
|
+
return 1.0
|
11
|
+
return 0.0
|
12
|
+
|
13
|
+
def jaccard_similarity(text1, text2):
|
14
|
+
# Split the texts into sets of words
|
15
|
+
set1 = set(text1.lower().split())
|
16
|
+
set2 = set(text2.lower().split())
|
17
|
+
|
18
|
+
# Calculate the Jaccard similarity
|
19
|
+
intersection = set1.intersection(set2)
|
20
|
+
union = set1.union(set2)
|
21
|
+
|
22
|
+
return len(intersection) / len(union)
|
23
|
+
|
24
|
+
def tf_idf_cosine_similarity(text1, text2):
|
25
|
+
# Convert the texts into TF-IDF vectors
|
26
|
+
vectorizer = TfidfVectorizer()
|
27
|
+
tfidf_matrix = vectorizer.fit_transform([text1, text2])
|
28
|
+
|
29
|
+
# Compute cosine similarity
|
30
|
+
cosine_sim = cosine_similarity(tfidf_matrix[0:1], tfidf_matrix[1:2])
|
31
|
+
return cosine_sim[0][0]
|
technologies/__init__.py
ADDED
File without changes
|