lybic-guiagents 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lybic-guiagents might be problematic. Click here for more details.
- desktop_env/__init__.py +1 -0
- desktop_env/actions.py +203 -0
- desktop_env/controllers/__init__.py +0 -0
- desktop_env/controllers/python.py +471 -0
- desktop_env/controllers/setup.py +882 -0
- desktop_env/desktop_env.py +509 -0
- desktop_env/evaluators/__init__.py +5 -0
- desktop_env/evaluators/getters/__init__.py +41 -0
- desktop_env/evaluators/getters/calc.py +15 -0
- desktop_env/evaluators/getters/chrome.py +1774 -0
- desktop_env/evaluators/getters/file.py +154 -0
- desktop_env/evaluators/getters/general.py +42 -0
- desktop_env/evaluators/getters/gimp.py +38 -0
- desktop_env/evaluators/getters/impress.py +126 -0
- desktop_env/evaluators/getters/info.py +24 -0
- desktop_env/evaluators/getters/misc.py +406 -0
- desktop_env/evaluators/getters/replay.py +20 -0
- desktop_env/evaluators/getters/vlc.py +86 -0
- desktop_env/evaluators/getters/vscode.py +35 -0
- desktop_env/evaluators/metrics/__init__.py +160 -0
- desktop_env/evaluators/metrics/basic_os.py +68 -0
- desktop_env/evaluators/metrics/chrome.py +493 -0
- desktop_env/evaluators/metrics/docs.py +1011 -0
- desktop_env/evaluators/metrics/general.py +665 -0
- desktop_env/evaluators/metrics/gimp.py +637 -0
- desktop_env/evaluators/metrics/libreoffice.py +28 -0
- desktop_env/evaluators/metrics/others.py +92 -0
- desktop_env/evaluators/metrics/pdf.py +31 -0
- desktop_env/evaluators/metrics/slides.py +957 -0
- desktop_env/evaluators/metrics/table.py +585 -0
- desktop_env/evaluators/metrics/thunderbird.py +176 -0
- desktop_env/evaluators/metrics/utils.py +719 -0
- desktop_env/evaluators/metrics/vlc.py +524 -0
- desktop_env/evaluators/metrics/vscode.py +283 -0
- desktop_env/providers/__init__.py +35 -0
- desktop_env/providers/aws/__init__.py +0 -0
- desktop_env/providers/aws/manager.py +278 -0
- desktop_env/providers/aws/provider.py +186 -0
- desktop_env/providers/aws/provider_with_proxy.py +315 -0
- desktop_env/providers/aws/proxy_pool.py +193 -0
- desktop_env/providers/azure/__init__.py +0 -0
- desktop_env/providers/azure/manager.py +87 -0
- desktop_env/providers/azure/provider.py +207 -0
- desktop_env/providers/base.py +97 -0
- desktop_env/providers/gcp/__init__.py +0 -0
- desktop_env/providers/gcp/manager.py +0 -0
- desktop_env/providers/gcp/provider.py +0 -0
- desktop_env/providers/virtualbox/__init__.py +0 -0
- desktop_env/providers/virtualbox/manager.py +463 -0
- desktop_env/providers/virtualbox/provider.py +124 -0
- desktop_env/providers/vmware/__init__.py +0 -0
- desktop_env/providers/vmware/manager.py +455 -0
- desktop_env/providers/vmware/provider.py +105 -0
- gui_agents/__init__.py +0 -0
- gui_agents/agents/Action.py +209 -0
- gui_agents/agents/__init__.py +0 -0
- gui_agents/agents/agent_s.py +832 -0
- gui_agents/agents/global_state.py +610 -0
- gui_agents/agents/grounding.py +651 -0
- gui_agents/agents/hardware_interface.py +129 -0
- gui_agents/agents/manager.py +568 -0
- gui_agents/agents/translator.py +132 -0
- gui_agents/agents/worker.py +355 -0
- gui_agents/cli_app.py +560 -0
- gui_agents/core/__init__.py +0 -0
- gui_agents/core/engine.py +1496 -0
- gui_agents/core/knowledge.py +449 -0
- gui_agents/core/mllm.py +555 -0
- gui_agents/tools/__init__.py +0 -0
- gui_agents/tools/tools.py +727 -0
- gui_agents/unit_test/__init__.py +0 -0
- gui_agents/unit_test/run_tests.py +65 -0
- gui_agents/unit_test/test_manager.py +330 -0
- gui_agents/unit_test/test_worker.py +269 -0
- gui_agents/utils/__init__.py +0 -0
- gui_agents/utils/analyze_display.py +301 -0
- gui_agents/utils/common_utils.py +263 -0
- gui_agents/utils/display_viewer.py +281 -0
- gui_agents/utils/embedding_manager.py +53 -0
- gui_agents/utils/image_axis_utils.py +27 -0
- lybic_guiagents-0.1.0.dist-info/METADATA +416 -0
- lybic_guiagents-0.1.0.dist-info/RECORD +85 -0
- lybic_guiagents-0.1.0.dist-info/WHEEL +5 -0
- lybic_guiagents-0.1.0.dist-info/licenses/LICENSE +201 -0
- lybic_guiagents-0.1.0.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,665 @@
|
|
|
1
|
+
import csv
|
|
2
|
+
import datetime
|
|
3
|
+
import difflib
|
|
4
|
+
import functools
|
|
5
|
+
import json
|
|
6
|
+
import logging
|
|
7
|
+
import operator
|
|
8
|
+
import os
|
|
9
|
+
import re
|
|
10
|
+
import sqlite3
|
|
11
|
+
from numbers import Number
|
|
12
|
+
from typing import Callable, Any, Union
|
|
13
|
+
from typing import Dict, List, Pattern
|
|
14
|
+
|
|
15
|
+
import lxml.etree
|
|
16
|
+
import pdfplumber
|
|
17
|
+
import yaml
|
|
18
|
+
from docx import Document
|
|
19
|
+
from lxml.cssselect import CSSSelector
|
|
20
|
+
from lxml.etree import _Element
|
|
21
|
+
from rapidfuzz import fuzz
|
|
22
|
+
|
|
23
|
+
from desktop_env.evaluators.metrics.utils import _match_record, _match_value_to_rule
|
|
24
|
+
|
|
25
|
+
logger = logging.getLogger("desktopenv.metric.general")
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def check_include_exclude(result: str, rules: Dict[str, List[str]]) -> float:
|
|
29
|
+
if result is None:
|
|
30
|
+
return 0.
|
|
31
|
+
|
|
32
|
+
print(result, rules)
|
|
33
|
+
include = rules.get("include", [])
|
|
34
|
+
exclude = rules.get("exclude", [])
|
|
35
|
+
if all(r in result for r in include) and all(r not in result for r in exclude):
|
|
36
|
+
return 1.
|
|
37
|
+
else:
|
|
38
|
+
return 0.
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def exact_match(result, rules) -> float:
|
|
42
|
+
expect = rules["expected"]
|
|
43
|
+
print(result, expect)
|
|
44
|
+
|
|
45
|
+
if result == expect:
|
|
46
|
+
return 1.
|
|
47
|
+
else:
|
|
48
|
+
return 0.
|
|
49
|
+
|
|
50
|
+
def match_in_list(result, rules) -> float:
|
|
51
|
+
expect = rules["expected"]
|
|
52
|
+
print(result, expect)
|
|
53
|
+
|
|
54
|
+
if result in expect:
|
|
55
|
+
return 1.
|
|
56
|
+
else:
|
|
57
|
+
return 0.
|
|
58
|
+
|
|
59
|
+
def literal_match(result: Any, expected: Any, **options) -> float:
|
|
60
|
+
literal_type = options.get('type', 'str')
|
|
61
|
+
if literal_type == 'str':
|
|
62
|
+
ignore_case = options.get('ignore_case', False)
|
|
63
|
+
score = str(result) == str(expected) if not ignore_case else str(result).lower() == str(expected).lower()
|
|
64
|
+
return float(score)
|
|
65
|
+
elif literal_type == 'list':
|
|
66
|
+
if type(result) not in [list, tuple] or type(expected) not in [list, tuple] or len(result) != len(expected):
|
|
67
|
+
return .0
|
|
68
|
+
ignore_case = options.get('ignore_case', False)
|
|
69
|
+
result = [str(s) for s in result] if not ignore_case else [str(s).lower() for s in result]
|
|
70
|
+
expected = [str(s) for s in expected] if not ignore_case else [str(s).lower() for s in expected]
|
|
71
|
+
return float(result == expected)
|
|
72
|
+
else:
|
|
73
|
+
raise NotImplementedError(f"Type {type} not supported")
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def is_in_list(result, rules) -> float:
|
|
77
|
+
expect = rules["expected"]
|
|
78
|
+
if expect in result:
|
|
79
|
+
return 1.
|
|
80
|
+
else:
|
|
81
|
+
return 0.
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def diff_text_file(result: str, expect: str) -> float:
|
|
85
|
+
if result is None:
|
|
86
|
+
return 0.
|
|
87
|
+
|
|
88
|
+
with open(result) as f:
|
|
89
|
+
result_lines: List[str] = f.read().splitlines()
|
|
90
|
+
with open(expect) as f:
|
|
91
|
+
expected_lines: List[str] = f.read().splitlines()
|
|
92
|
+
return difflib.SequenceMatcher(a=result_lines, b=expected_lines).ratio()
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def fuzzy_match(result, rules) -> float:
|
|
96
|
+
expect = rules["expected"]
|
|
97
|
+
|
|
98
|
+
return fuzz.ratio(result, expect) / 100.
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def fuzzy_place_math(result_file_path, rules) -> float:
|
|
102
|
+
if result_file_path is None:
|
|
103
|
+
return 0.
|
|
104
|
+
expect = rules["expected"] # a list of possible answers
|
|
105
|
+
# read list.docx, and get all texts out, overlook blank lines, remove blanks before and after each line
|
|
106
|
+
doc = Document(result_file_path)
|
|
107
|
+
words_list = []
|
|
108
|
+
for para in doc.paragraphs:
|
|
109
|
+
words_list.extend(para.text.split())
|
|
110
|
+
fuzzy_score_list = []
|
|
111
|
+
for word in words_list:
|
|
112
|
+
max_score = 0
|
|
113
|
+
for ans in expect:
|
|
114
|
+
score = fuzz.ratio(word, ans) / 100
|
|
115
|
+
max_score = max(max_score, score)
|
|
116
|
+
fuzzy_score_list.append(max_score)
|
|
117
|
+
if len(fuzzy_score_list) != 3:
|
|
118
|
+
return 0.
|
|
119
|
+
return sum(fuzzy_score_list) / 3
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def check_csv(result: str, rules: Dict[str, List[Dict[str, str]]]) -> float:
|
|
123
|
+
"""
|
|
124
|
+
Args:
|
|
125
|
+
result (str): path to csv file
|
|
126
|
+
rules (Dict[str, List[Dict[str, str]]]): dict like
|
|
127
|
+
{
|
|
128
|
+
"expect": [{key: value}]
|
|
129
|
+
"unexpect": [{key: value}]
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
Returns:
|
|
133
|
+
float
|
|
134
|
+
"""
|
|
135
|
+
|
|
136
|
+
if result is None:
|
|
137
|
+
return 0.
|
|
138
|
+
|
|
139
|
+
expect_metrics = [False] * len(rules.get("expect", []))
|
|
140
|
+
unexpect_metric = True
|
|
141
|
+
with open(result) as f:
|
|
142
|
+
reader = csv.DictReader(f)
|
|
143
|
+
|
|
144
|
+
for rcd in reader:
|
|
145
|
+
for i, r in enumerate(rules.get("expect", [])):
|
|
146
|
+
expect_metrics[i] = expect_metrics[i] or _match_record(r, rcd)
|
|
147
|
+
unexpect_metric = unexpect_metric and not any(_match_record(r, rcd) for r in rules.get("unexpect", []))
|
|
148
|
+
return float(all(expect_metrics) and unexpect_metric)
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def check_list(result: str, rules: Dict[str, List[str]]) -> float:
|
|
152
|
+
"""
|
|
153
|
+
Args:
|
|
154
|
+
result (str): path to list file
|
|
155
|
+
rules (Dict[str, List[str]]): dict like
|
|
156
|
+
{
|
|
157
|
+
"expect": list of str as regexes
|
|
158
|
+
"unexpect": list of str as regexes
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
Returns:
|
|
162
|
+
float
|
|
163
|
+
"""
|
|
164
|
+
|
|
165
|
+
if result is None:
|
|
166
|
+
return 0.
|
|
167
|
+
|
|
168
|
+
expect_patterns: List[Pattern[str]] = [re.compile(ptt) for ptt in rules.get("expect", [])]
|
|
169
|
+
unexpect_patterns: List[Pattern[str]] = [re.compile(ptt) for ptt in rules.get("unexpect", [])]
|
|
170
|
+
|
|
171
|
+
expect_metrics = [False] * len(expect_patterns)
|
|
172
|
+
unexpect_metric = True
|
|
173
|
+
with open(result) as f:
|
|
174
|
+
for l in f:
|
|
175
|
+
for i, r in enumerate(expect_patterns):
|
|
176
|
+
expect_metrics[i] = expect_metrics[i] or (r.search(l) is not None)
|
|
177
|
+
unexpect_metric = unexpect_metric and all(r.search(l) is None for r in unexpect_patterns)
|
|
178
|
+
return float(all(expect_metrics) and unexpect_metric)
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
_accessibility_ns_map = {
|
|
182
|
+
"ubuntu": {
|
|
183
|
+
"st": "https://accessibility.ubuntu.example.org/ns/state",
|
|
184
|
+
"attr": "https://accessibility.ubuntu.example.org/ns/attributes",
|
|
185
|
+
"cp": "https://accessibility.ubuntu.example.org/ns/component",
|
|
186
|
+
"doc": "https://accessibility.ubuntu.example.org/ns/document",
|
|
187
|
+
"docattr": "https://accessibility.ubuntu.example.org/ns/document/attributes",
|
|
188
|
+
"txt": "https://accessibility.ubuntu.example.org/ns/text",
|
|
189
|
+
"val": "https://accessibility.ubuntu.example.org/ns/value",
|
|
190
|
+
"act": "https://accessibility.ubuntu.example.org/ns/action",
|
|
191
|
+
},
|
|
192
|
+
"windows": {
|
|
193
|
+
"st": "https://accessibility.windows.example.org/ns/state",
|
|
194
|
+
"attr": "https://accessibility.windows.example.org/ns/attributes",
|
|
195
|
+
"cp": "https://accessibility.windows.example.org/ns/component",
|
|
196
|
+
"doc": "https://accessibility.windows.example.org/ns/document",
|
|
197
|
+
"docattr": "https://accessibility.windows.example.org/ns/document/attributes",
|
|
198
|
+
"txt": "https://accessibility.windows.example.org/ns/text",
|
|
199
|
+
"val": "https://accessibility.windows.example.org/ns/value",
|
|
200
|
+
"act": "https://accessibility.windows.example.org/ns/action",
|
|
201
|
+
"class": "https://accessibility.windows.example.org/ns/class"
|
|
202
|
+
},
|
|
203
|
+
"macos": {
|
|
204
|
+
"st": "https://accessibility.macos.example.org/ns/state",
|
|
205
|
+
"attr": "https://accessibility.macos.example.org/ns/attributes",
|
|
206
|
+
"cp": "https://accessibility.macos.example.org/ns/component",
|
|
207
|
+
"doc": "https://accessibility.macos.example.org/ns/document",
|
|
208
|
+
"txt": "https://accessibility.macos.example.org/ns/text",
|
|
209
|
+
"val": "https://accessibility.macos.example.org/ns/value",
|
|
210
|
+
"act": "https://accessibility.macos.example.org/ns/action",
|
|
211
|
+
"role": "https://accessibility.macos.example.org/ns/role",
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
def check_accessibility_tree(result: str, rules: List[Dict[str, Any]], osname: str = "ubuntu") -> float:
|
|
217
|
+
"""
|
|
218
|
+
Args:
|
|
219
|
+
result (str): XML of GNOME Accessibility Tree
|
|
220
|
+
rules (List[Dict[str, Any]]): list of dict like
|
|
221
|
+
{
|
|
222
|
+
"selectors": list of str as CSS selectors, will be connected by ", "
|
|
223
|
+
to form a composite selector. Only one from `selectors` and
|
|
224
|
+
`xpath` is needed. If both are present, `xpath` takes the
|
|
225
|
+
priority.
|
|
226
|
+
"xpath": str as xpath. Only one from `selectors` and `xpath` is
|
|
227
|
+
needed. If both are present, `xpath` takes the priority.
|
|
228
|
+
"text": str as the expected text content of the selected element.
|
|
229
|
+
"exact": bool specifying whether exact match or fuzzy match should
|
|
230
|
+
be performed. defaults to True.
|
|
231
|
+
}
|
|
232
|
+
osname (str): "ubuntu" | "windows" | "macos". "ubuntu" by default.
|
|
233
|
+
|
|
234
|
+
Returns:
|
|
235
|
+
float
|
|
236
|
+
"""
|
|
237
|
+
|
|
238
|
+
a11y_ns_map = _accessibility_ns_map[osname]
|
|
239
|
+
|
|
240
|
+
at: _Element = lxml.etree.fromstring(result)
|
|
241
|
+
total_match_score = 1.
|
|
242
|
+
for r in rules:
|
|
243
|
+
if "xpath" in r:
|
|
244
|
+
elements: List[_Element] = at.xpath(r["xpath"], namespaces=a11y_ns_map)
|
|
245
|
+
elif "selectors" in r:
|
|
246
|
+
selector = CSSSelector(", ".join(r["selectors"]), namespaces=a11y_ns_map)
|
|
247
|
+
elements: List[_Element] = selector(at)
|
|
248
|
+
else:
|
|
249
|
+
raise ValueError("At least one of xpath and selectors is required")
|
|
250
|
+
|
|
251
|
+
if len(elements) == 0:
|
|
252
|
+
logger.info("No elements: %s", r["xpath"] if "xpath" in r else r["selectors"])
|
|
253
|
+
return 0.
|
|
254
|
+
|
|
255
|
+
if "text" in r:
|
|
256
|
+
match_func: Callable[[str], Number] = functools.partial(operator.eq if r["exact"] \
|
|
257
|
+
else (lambda a, b: fuzz.ratio(a, b) / 100.)
|
|
258
|
+
, r["text"]
|
|
259
|
+
)
|
|
260
|
+
match_score: Number = 0
|
|
261
|
+
for elm in elements:
|
|
262
|
+
match_score = max(match_score, match_func(elm.text or None))
|
|
263
|
+
else:
|
|
264
|
+
match_score = 1.
|
|
265
|
+
total_match_score *= match_score
|
|
266
|
+
|
|
267
|
+
return float(total_match_score)
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
# def check_existence(result: str, *args) -> float:
|
|
271
|
+
# return 1. - (result is None)
|
|
272
|
+
|
|
273
|
+
def run_sqlite3(result: str, rules: Dict[str, Any]) -> float:
|
|
274
|
+
connection: sqlite3.Connection = sqlite3.connect(result)
|
|
275
|
+
cursor: sqlite3.Cursor = connection.execute(rules["sql"])
|
|
276
|
+
return float(cursor.fetchone()[0] or 0)
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
def check_json(result: str, rules: Dict[str, List[Dict[str, Union[List[str], str]]]], is_yaml: bool = False) -> float:
|
|
280
|
+
"""
|
|
281
|
+
Args:
|
|
282
|
+
result (str): path to json file
|
|
283
|
+
rules (Dict[str, List[Dict[str, Union[List[str], str]]]]): dict like
|
|
284
|
+
{
|
|
285
|
+
"expect": [
|
|
286
|
+
{
|
|
287
|
+
"key": list of str
|
|
288
|
+
"method": str
|
|
289
|
+
"ref": something
|
|
290
|
+
}
|
|
291
|
+
],
|
|
292
|
+
"unexpect": <the same as `expect`
|
|
293
|
+
}
|
|
294
|
+
is_yaml (bool): yaml rather than json
|
|
295
|
+
|
|
296
|
+
Returns:
|
|
297
|
+
float
|
|
298
|
+
"""
|
|
299
|
+
|
|
300
|
+
if result is None:
|
|
301
|
+
logger.warning("Result file path is None, returning 0.0")
|
|
302
|
+
return 0.
|
|
303
|
+
|
|
304
|
+
# Check if file exists
|
|
305
|
+
if not os.path.exists(result):
|
|
306
|
+
logger.warning(f"Result file does not exist: {result}, returning 0.0")
|
|
307
|
+
return 0.
|
|
308
|
+
|
|
309
|
+
try:
|
|
310
|
+
with open(result, 'r', encoding='utf-8') as f:
|
|
311
|
+
if is_yaml:
|
|
312
|
+
try:
|
|
313
|
+
# Use SafeLoader instead of Loader for better security and error handling
|
|
314
|
+
result_data: Dict[str, Any] = yaml.safe_load(f)
|
|
315
|
+
if result_data is None:
|
|
316
|
+
logger.warning(f"YAML file {result} is empty or contains only null values, returning 0.0")
|
|
317
|
+
return 0.
|
|
318
|
+
except yaml.YAMLError as e:
|
|
319
|
+
logger.error(f"YAML parsing error in file {result}: {e}")
|
|
320
|
+
logger.error(f"File content might be corrupted or have invalid YAML syntax")
|
|
321
|
+
return 0.
|
|
322
|
+
except Exception as e:
|
|
323
|
+
logger.error(f"Unexpected error parsing YAML file {result}: {e}")
|
|
324
|
+
return 0.
|
|
325
|
+
else:
|
|
326
|
+
try:
|
|
327
|
+
result_data: Dict[str, Any] = json.load(f)
|
|
328
|
+
except json.JSONDecodeError as e:
|
|
329
|
+
logger.error(f"JSON parsing error in file {result}: {e}")
|
|
330
|
+
return 0.
|
|
331
|
+
except Exception as e:
|
|
332
|
+
logger.error(f"Unexpected error parsing JSON file {result}: {e}")
|
|
333
|
+
return 0.
|
|
334
|
+
except IOError as e:
|
|
335
|
+
logger.error(f"IO error reading file {result}: {e}")
|
|
336
|
+
return 0.
|
|
337
|
+
except Exception as e:
|
|
338
|
+
logger.error(f"Unexpected error reading file {result}: {e}")
|
|
339
|
+
return 0.
|
|
340
|
+
|
|
341
|
+
expect_rules = rules.get("expect", {})
|
|
342
|
+
unexpect_rules = rules.get("unexpect", {})
|
|
343
|
+
|
|
344
|
+
metric = True
|
|
345
|
+
for r in expect_rules:
|
|
346
|
+
value = result_data
|
|
347
|
+
try:
|
|
348
|
+
for k in r["key"]:
|
|
349
|
+
try:
|
|
350
|
+
value = value[k]
|
|
351
|
+
except KeyError:
|
|
352
|
+
logger.debug(f"Key '{k}' not found in result data, returning 0.0")
|
|
353
|
+
return 0.
|
|
354
|
+
except TypeError:
|
|
355
|
+
logger.debug(f"Cannot access key '{k}' - value is not a dictionary, returning 0.0")
|
|
356
|
+
return 0.
|
|
357
|
+
metric = metric and _match_value_to_rule(value, r)
|
|
358
|
+
except Exception as e:
|
|
359
|
+
logger.error(f"Error processing expect rule {r}: {e}")
|
|
360
|
+
return 0.
|
|
361
|
+
|
|
362
|
+
for r in unexpect_rules:
|
|
363
|
+
value = result_data
|
|
364
|
+
try:
|
|
365
|
+
for k in r["key"]:
|
|
366
|
+
try:
|
|
367
|
+
value = value[k]
|
|
368
|
+
except KeyError:
|
|
369
|
+
value = None
|
|
370
|
+
break
|
|
371
|
+
except TypeError:
|
|
372
|
+
value = None
|
|
373
|
+
break
|
|
374
|
+
metric = metric and not _match_value_to_rule(value, r)
|
|
375
|
+
except Exception as e:
|
|
376
|
+
logger.error(f"Error processing unexpect rule {r}: {e}")
|
|
377
|
+
return 0.
|
|
378
|
+
|
|
379
|
+
return float(metric)
|
|
380
|
+
|
|
381
|
+
|
|
382
|
+
def check_direct_json_object(result, rules) -> float:
|
|
383
|
+
"""
|
|
384
|
+
One of the most commonly used function to evalute.
|
|
385
|
+
Compare two json objects directly.
|
|
386
|
+
"""
|
|
387
|
+
logger.info(f"[DEBUG] check_direct_json_object called with result: {result}")
|
|
388
|
+
logger.info(f"[DEBUG] check_direct_json_object called with rules: {rules}")
|
|
389
|
+
|
|
390
|
+
if isinstance(result, str):
|
|
391
|
+
# remove blanks before and after result
|
|
392
|
+
result = result.strip()
|
|
393
|
+
# replace all ' with "
|
|
394
|
+
result = result.replace("'", '"')
|
|
395
|
+
# load json object
|
|
396
|
+
result = json.loads(result)
|
|
397
|
+
|
|
398
|
+
logger.info(f"[DEBUG] Processed result: {result}")
|
|
399
|
+
|
|
400
|
+
if result is None:
|
|
401
|
+
logger.info("[DEBUG] Result is None, returning 0.0")
|
|
402
|
+
return 0.
|
|
403
|
+
|
|
404
|
+
# Check if expected value contains evaluation failure indicator
|
|
405
|
+
try:
|
|
406
|
+
expected_json = rules.get("expected", {})
|
|
407
|
+
if expected_json:
|
|
408
|
+
for key, value in expected_json.items():
|
|
409
|
+
if value == "__EVALUATION_FAILED__":
|
|
410
|
+
logger.error(f"[DEBUG] Expected value for key '{key}' indicates evaluation failure, returning 0.0")
|
|
411
|
+
return 0.
|
|
412
|
+
except Exception as e:
|
|
413
|
+
logger.error(f"[DEBUG] Error checking for evaluation failure indicator: {e}")
|
|
414
|
+
return 0.
|
|
415
|
+
try:
|
|
416
|
+
expect_in_result = rules.get("expect_in_result", False)
|
|
417
|
+
logger.info(f"[DEBUG] expect_in_result: {expect_in_result}")
|
|
418
|
+
|
|
419
|
+
if not expect_in_result:
|
|
420
|
+
expected_json = rules["expected"]
|
|
421
|
+
logger.info(f"[DEBUG] Expected JSON: {expected_json}")
|
|
422
|
+
|
|
423
|
+
for key in expected_json.keys():
|
|
424
|
+
expected_value = expected_json.get(key)
|
|
425
|
+
actual_value = result.get(key)
|
|
426
|
+
logger.info(f"[DEBUG] Checking key '{key}': expected='{expected_value}', actual='{actual_value}'")
|
|
427
|
+
|
|
428
|
+
if expected_json.get("ignore_list_order", False):
|
|
429
|
+
expected_value = sorted(expected_value)
|
|
430
|
+
result_value = sorted(result.get(key))
|
|
431
|
+
logger.info(f"[DEBUG] Comparing lists (sorted): expected={expected_value}, actual={result_value}")
|
|
432
|
+
if expected_value != result_value:
|
|
433
|
+
logger.info(f"[DEBUG] List comparison failed for key '{key}', returning 0.0")
|
|
434
|
+
return 0.
|
|
435
|
+
else:
|
|
436
|
+
if expected_value != actual_value:
|
|
437
|
+
logger.info(f"[DEBUG] Value comparison failed for key '{key}': expected='{expected_value}', actual='{actual_value}', returning 0.0")
|
|
438
|
+
return 0.
|
|
439
|
+
else:
|
|
440
|
+
logger.info(f"[DEBUG] Value comparison passed for key '{key}'")
|
|
441
|
+
|
|
442
|
+
logger.info("[DEBUG] All comparisons passed, returning 1.0")
|
|
443
|
+
return 1.0
|
|
444
|
+
else:
|
|
445
|
+
expected_json = rules["expected"]
|
|
446
|
+
logger.info(f"[DEBUG] Expected JSON (expect_in_result mode): {expected_json}")
|
|
447
|
+
|
|
448
|
+
for key in expected_json.keys():
|
|
449
|
+
if isinstance(expected_json.get(key), list):
|
|
450
|
+
flag = 0
|
|
451
|
+
expected_value_list = expected_json.get(key)
|
|
452
|
+
logger.info(f"[DEBUG] Checking list key '{key}': expected_list={expected_value_list}, actual='{result.get(key)}'")
|
|
453
|
+
for each_expected_value in expected_value_list:
|
|
454
|
+
# Handle both list and string cases
|
|
455
|
+
if isinstance(result.get(key), list) and each_expected_value in result.get(key):
|
|
456
|
+
flag = 1
|
|
457
|
+
logger.info(f"[DEBUG] Found expected value '{each_expected_value}' in result list for key '{key}'")
|
|
458
|
+
break
|
|
459
|
+
elif isinstance(result.get(key), str) and each_expected_value == result.get(key):
|
|
460
|
+
flag = 1
|
|
461
|
+
logger.info(f"[DEBUG] Found expected value '{each_expected_value}' matches result string for key '{key}'")
|
|
462
|
+
break
|
|
463
|
+
if flag == 0:
|
|
464
|
+
logger.info(f"[DEBUG] No expected values found in result for key '{key}', returning 0.0")
|
|
465
|
+
return 0.
|
|
466
|
+
elif isinstance(expected_json.get(key), str):
|
|
467
|
+
expected_str = expected_json.get(key)
|
|
468
|
+
actual_str = result.get(key)
|
|
469
|
+
logger.info(f"[DEBUG] Checking string key '{key}': expected='{expected_str}', actual='{actual_str}'")
|
|
470
|
+
if expected_str not in actual_str:
|
|
471
|
+
logger.info(f"[DEBUG] Expected string '{expected_str}' not found in actual string '{actual_str}' for key '{key}', returning 0.0")
|
|
472
|
+
return 0.
|
|
473
|
+
else:
|
|
474
|
+
logger.debug("check_direct_json_object: expected value type not supported")
|
|
475
|
+
return 0.
|
|
476
|
+
logger.info("[DEBUG] All expect_in_result comparisons passed, returning 1.0")
|
|
477
|
+
return 1.0
|
|
478
|
+
except Exception as e:
|
|
479
|
+
logger.debug(f"check_direct_json_object: result is not a valid json object, error: {e}")
|
|
480
|
+
return 0.
|
|
481
|
+
|
|
482
|
+
|
|
483
|
+
def compare_time_in_speedtest_results(speedtest_result_path, time_diff):
|
|
484
|
+
if not speedtest_result_path:
|
|
485
|
+
return 0
|
|
486
|
+
|
|
487
|
+
# open the speedtest results file(csv)
|
|
488
|
+
#date_col = None
|
|
489
|
+
try:
|
|
490
|
+
with open(speedtest_result_path, 'r') as f:
|
|
491
|
+
for i, line in enumerate(f):
|
|
492
|
+
if i == 1:
|
|
493
|
+
date = line.split(',')[1]
|
|
494
|
+
break
|
|
495
|
+
now_date_time = datetime.datetime.now().strftime('%H:%M')
|
|
496
|
+
date_time = date[-5:]
|
|
497
|
+
# compare the date time with the current date time, if time diff less than time_diff para, then return true
|
|
498
|
+
if not abs((datetime.datetime.strptime(date_time, '%H:%M') - datetime.datetime.strptime(now_date_time,
|
|
499
|
+
'%H:%M')).total_seconds()) / 60 < int(
|
|
500
|
+
time_diff):
|
|
501
|
+
return 0
|
|
502
|
+
return 1
|
|
503
|
+
except:
|
|
504
|
+
logger.debug("compare_time_in_speedtest_results: file not found or not readable")
|
|
505
|
+
return 0
|
|
506
|
+
|
|
507
|
+
|
|
508
|
+
def is_included_all_json_objects(gold_file_path, result_file_path):
|
|
509
|
+
if not gold_file_path or not result_file_path:
|
|
510
|
+
return 0
|
|
511
|
+
|
|
512
|
+
print("gold_file_path: ")
|
|
513
|
+
print(gold_file_path)
|
|
514
|
+
print("result_file_path: ")
|
|
515
|
+
print(result_file_path)
|
|
516
|
+
# two json file, check if all the key-value pair in gold_file_path is included in result_file_path
|
|
517
|
+
with open(gold_file_path, 'r') as f:
|
|
518
|
+
gold_json = json.load(f)
|
|
519
|
+
with open(result_file_path, 'r') as fr:
|
|
520
|
+
result_json = json.load(fr)
|
|
521
|
+
for key in gold_json.keys():
|
|
522
|
+
if key not in result_json.keys() or gold_json[key] != result_json[key]:
|
|
523
|
+
return 0
|
|
524
|
+
return 1
|
|
525
|
+
|
|
526
|
+
|
|
527
|
+
def is_gold_text_included_in_pdf(pdf_file_path, gold_text_path):
|
|
528
|
+
if not gold_text_path or not pdf_file_path:
|
|
529
|
+
return 0
|
|
530
|
+
|
|
531
|
+
print("gold_text_path: ")
|
|
532
|
+
print(gold_text_path)
|
|
533
|
+
print("pdf_file_path: ")
|
|
534
|
+
print(pdf_file_path)
|
|
535
|
+
# gold file is a json file, we need to check all the value in json are included in pdf file.
|
|
536
|
+
with open(gold_text_path, 'r') as f:
|
|
537
|
+
gold_json = json.load(f)
|
|
538
|
+
with pdfplumber.open(pdf_file_path) as pdf:
|
|
539
|
+
text = ''
|
|
540
|
+
for page in pdf.pages:
|
|
541
|
+
text += page.extract_text()
|
|
542
|
+
false_list = []
|
|
543
|
+
for key in gold_json.keys():
|
|
544
|
+
if gold_json[key] not in text:
|
|
545
|
+
false_list.append(key)
|
|
546
|
+
if len(false_list) > 0:
|
|
547
|
+
print("false_list: ")
|
|
548
|
+
print(false_list)
|
|
549
|
+
return 0
|
|
550
|
+
else:
|
|
551
|
+
return 1
|
|
552
|
+
|
|
553
|
+
|
|
554
|
+
def file_contains(file_path, config):
|
|
555
|
+
# file_path ends with .txt
|
|
556
|
+
if not file_path:
|
|
557
|
+
return 0.
|
|
558
|
+
try:
|
|
559
|
+
with open(file_path, 'r') as f:
|
|
560
|
+
file_text = f.read()
|
|
561
|
+
for text in config["expected"]:
|
|
562
|
+
if text not in file_text:
|
|
563
|
+
logger.debug(f"file_contains: {text} not found in {file_path}")
|
|
564
|
+
return 0.
|
|
565
|
+
except:
|
|
566
|
+
logger.debug("file_contains: file not found or not readable")
|
|
567
|
+
return 0.
|
|
568
|
+
return 1.
|
|
569
|
+
|
|
570
|
+
|
|
571
|
+
def check_line_number(file_path, line_number):
|
|
572
|
+
# check if file_path exists
|
|
573
|
+
if file_path is None or not os.path.isfile(file_path):
|
|
574
|
+
return 0.
|
|
575
|
+
timeRegex = "([01]\\d|2[0-3]):[0-5]\\d:([0-5]\\d|60)"
|
|
576
|
+
# check if the string that matches the timeRegex in this txt file equals to line_number["expected"]
|
|
577
|
+
try:
|
|
578
|
+
with open(file_path, 'r') as f:
|
|
579
|
+
line_count = 0
|
|
580
|
+
for line in f:
|
|
581
|
+
if re.search(timeRegex, line):
|
|
582
|
+
line_count += 1
|
|
583
|
+
# if line_count equals to line_number["expected"], return 1, else return 0
|
|
584
|
+
return 1 if line_count == int(line_number["expected"]) else 0
|
|
585
|
+
except:
|
|
586
|
+
logger.debug("check_line_number: file not found or not readable")
|
|
587
|
+
return 0.
|
|
588
|
+
|
|
589
|
+
|
|
590
|
+
def compare_terminal_and_txt(txt_file_path, terminal_output):
|
|
591
|
+
if not txt_file_path or not terminal_output:
|
|
592
|
+
return 0
|
|
593
|
+
|
|
594
|
+
# read txt file content
|
|
595
|
+
with open(txt_file_path, 'r') as f:
|
|
596
|
+
txt_file_content = f.read()
|
|
597
|
+
# compare terminal output with txt file content
|
|
598
|
+
return 1 if terminal_output == txt_file_content else 0
|
|
599
|
+
|
|
600
|
+
|
|
601
|
+
def compare_python_pure_text(py_file_path, gold_file_path):
|
|
602
|
+
if not py_file_path or not gold_file_path:
|
|
603
|
+
return 0.0
|
|
604
|
+
|
|
605
|
+
def _normalize(text):
|
|
606
|
+
"""
|
|
607
|
+
Minimal normalization - only handle basic formatting:
|
|
608
|
+
- Skip obvious file metadata (encoding, shebang) at the beginning
|
|
609
|
+
- Normalize whitespace and indentation
|
|
610
|
+
- Remove empty lines
|
|
611
|
+
|
|
612
|
+
This preserves any content that shouldn't be there (like markdown)
|
|
613
|
+
so it can be detected as an error.
|
|
614
|
+
"""
|
|
615
|
+
lines = text.splitlines()
|
|
616
|
+
result_lines = []
|
|
617
|
+
i = 0
|
|
618
|
+
|
|
619
|
+
# Only skip obvious metadata at the very beginning
|
|
620
|
+
while i < len(lines) and i < 3: # Check only first 3 lines
|
|
621
|
+
stripped = lines[i].strip()
|
|
622
|
+
|
|
623
|
+
if (stripped.startswith('#!') or
|
|
624
|
+
stripped.startswith('# -*- coding:') or
|
|
625
|
+
stripped.startswith('# coding:') or
|
|
626
|
+
stripped.startswith('# coding=')):
|
|
627
|
+
i += 1
|
|
628
|
+
continue
|
|
629
|
+
|
|
630
|
+
break
|
|
631
|
+
|
|
632
|
+
# Process all remaining lines with minimal filtering
|
|
633
|
+
while i < len(lines):
|
|
634
|
+
line = lines[i]
|
|
635
|
+
stripped = line.strip()
|
|
636
|
+
|
|
637
|
+
if stripped: # Keep all non-empty lines
|
|
638
|
+
normalized = line.expandtabs(4).rstrip()
|
|
639
|
+
result_lines.append(normalized)
|
|
640
|
+
|
|
641
|
+
i += 1
|
|
642
|
+
|
|
643
|
+
return '\n'.join(result_lines)
|
|
644
|
+
|
|
645
|
+
try:
|
|
646
|
+
with open(py_file_path, 'r', encoding='utf-8') as file1:
|
|
647
|
+
user_content = file1.read()
|
|
648
|
+
with open(gold_file_path, 'r', encoding='utf-8') as file2:
|
|
649
|
+
gold_content = file2.read()
|
|
650
|
+
|
|
651
|
+
# Apply different normalization strategies
|
|
652
|
+
user_normalized = _normalize(user_content)
|
|
653
|
+
gold_normalized = _normalize(gold_content)
|
|
654
|
+
|
|
655
|
+
if user_normalized == gold_normalized:
|
|
656
|
+
return 1.0
|
|
657
|
+
else:
|
|
658
|
+
return 0.0
|
|
659
|
+
|
|
660
|
+
except (FileNotFoundError, IOError, UnicodeDecodeError) as e:
|
|
661
|
+
logger.debug(f"compare_python_pure_text: Error reading files - {e}")
|
|
662
|
+
return 0.0
|
|
663
|
+
except Exception as e:
|
|
664
|
+
logger.debug(f"compare_python_pure_text: Unexpected error - {e}")
|
|
665
|
+
return 0.0
|