lybic-guiagents 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lybic-guiagents might be problematic. Click here for more details.
- desktop_env/__init__.py +1 -0
- desktop_env/actions.py +203 -0
- desktop_env/controllers/__init__.py +0 -0
- desktop_env/controllers/python.py +471 -0
- desktop_env/controllers/setup.py +882 -0
- desktop_env/desktop_env.py +509 -0
- desktop_env/evaluators/__init__.py +5 -0
- desktop_env/evaluators/getters/__init__.py +41 -0
- desktop_env/evaluators/getters/calc.py +15 -0
- desktop_env/evaluators/getters/chrome.py +1774 -0
- desktop_env/evaluators/getters/file.py +154 -0
- desktop_env/evaluators/getters/general.py +42 -0
- desktop_env/evaluators/getters/gimp.py +38 -0
- desktop_env/evaluators/getters/impress.py +126 -0
- desktop_env/evaluators/getters/info.py +24 -0
- desktop_env/evaluators/getters/misc.py +406 -0
- desktop_env/evaluators/getters/replay.py +20 -0
- desktop_env/evaluators/getters/vlc.py +86 -0
- desktop_env/evaluators/getters/vscode.py +35 -0
- desktop_env/evaluators/metrics/__init__.py +160 -0
- desktop_env/evaluators/metrics/basic_os.py +68 -0
- desktop_env/evaluators/metrics/chrome.py +493 -0
- desktop_env/evaluators/metrics/docs.py +1011 -0
- desktop_env/evaluators/metrics/general.py +665 -0
- desktop_env/evaluators/metrics/gimp.py +637 -0
- desktop_env/evaluators/metrics/libreoffice.py +28 -0
- desktop_env/evaluators/metrics/others.py +92 -0
- desktop_env/evaluators/metrics/pdf.py +31 -0
- desktop_env/evaluators/metrics/slides.py +957 -0
- desktop_env/evaluators/metrics/table.py +585 -0
- desktop_env/evaluators/metrics/thunderbird.py +176 -0
- desktop_env/evaluators/metrics/utils.py +719 -0
- desktop_env/evaluators/metrics/vlc.py +524 -0
- desktop_env/evaluators/metrics/vscode.py +283 -0
- desktop_env/providers/__init__.py +35 -0
- desktop_env/providers/aws/__init__.py +0 -0
- desktop_env/providers/aws/manager.py +278 -0
- desktop_env/providers/aws/provider.py +186 -0
- desktop_env/providers/aws/provider_with_proxy.py +315 -0
- desktop_env/providers/aws/proxy_pool.py +193 -0
- desktop_env/providers/azure/__init__.py +0 -0
- desktop_env/providers/azure/manager.py +87 -0
- desktop_env/providers/azure/provider.py +207 -0
- desktop_env/providers/base.py +97 -0
- desktop_env/providers/gcp/__init__.py +0 -0
- desktop_env/providers/gcp/manager.py +0 -0
- desktop_env/providers/gcp/provider.py +0 -0
- desktop_env/providers/virtualbox/__init__.py +0 -0
- desktop_env/providers/virtualbox/manager.py +463 -0
- desktop_env/providers/virtualbox/provider.py +124 -0
- desktop_env/providers/vmware/__init__.py +0 -0
- desktop_env/providers/vmware/manager.py +455 -0
- desktop_env/providers/vmware/provider.py +105 -0
- gui_agents/__init__.py +0 -0
- gui_agents/agents/Action.py +209 -0
- gui_agents/agents/__init__.py +0 -0
- gui_agents/agents/agent_s.py +832 -0
- gui_agents/agents/global_state.py +610 -0
- gui_agents/agents/grounding.py +651 -0
- gui_agents/agents/hardware_interface.py +129 -0
- gui_agents/agents/manager.py +568 -0
- gui_agents/agents/translator.py +132 -0
- gui_agents/agents/worker.py +355 -0
- gui_agents/cli_app.py +560 -0
- gui_agents/core/__init__.py +0 -0
- gui_agents/core/engine.py +1496 -0
- gui_agents/core/knowledge.py +449 -0
- gui_agents/core/mllm.py +555 -0
- gui_agents/tools/__init__.py +0 -0
- gui_agents/tools/tools.py +727 -0
- gui_agents/unit_test/__init__.py +0 -0
- gui_agents/unit_test/run_tests.py +65 -0
- gui_agents/unit_test/test_manager.py +330 -0
- gui_agents/unit_test/test_worker.py +269 -0
- gui_agents/utils/__init__.py +0 -0
- gui_agents/utils/analyze_display.py +301 -0
- gui_agents/utils/common_utils.py +263 -0
- gui_agents/utils/display_viewer.py +281 -0
- gui_agents/utils/embedding_manager.py +53 -0
- gui_agents/utils/image_axis_utils.py +27 -0
- lybic_guiagents-0.1.0.dist-info/METADATA +416 -0
- lybic_guiagents-0.1.0.dist-info/RECORD +85 -0
- lybic_guiagents-0.1.0.dist-info/WHEEL +5 -0
- lybic_guiagents-0.1.0.dist-info/licenses/LICENSE +201 -0
- lybic_guiagents-0.1.0.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,585 @@
|
|
|
1
|
+
import functools
|
|
2
|
+
import itertools
|
|
3
|
+
import logging
|
|
4
|
+
import os.path
|
|
5
|
+
# import operator
|
|
6
|
+
from numbers import Number
|
|
7
|
+
from typing import Any, Union, cast, Callable, Iterable
|
|
8
|
+
from typing import Dict, List, Tuple, Set
|
|
9
|
+
|
|
10
|
+
import openpyxl
|
|
11
|
+
import pandas as pd
|
|
12
|
+
from openpyxl import Workbook
|
|
13
|
+
from openpyxl.cell.cell import Cell
|
|
14
|
+
from openpyxl.utils import get_column_letter
|
|
15
|
+
from openpyxl.worksheet.cell_range import MultiCellRange
|
|
16
|
+
from openpyxl.worksheet.datavalidation import DataValidation
|
|
17
|
+
from openpyxl.worksheet.worksheet import Worksheet
|
|
18
|
+
from rapidfuzz import fuzz
|
|
19
|
+
|
|
20
|
+
from desktop_env.evaluators.metrics.utils import _match_value_to_rule, _read_cell_style, read_cell_value
|
|
21
|
+
from desktop_env.evaluators.metrics.utils import load_charts, load_sparklines, load_rows_or_cols, load_xlsx_styles \
|
|
22
|
+
, load_filters, load_pivot_tables
|
|
23
|
+
|
|
24
|
+
# from openpyxl.utils import coordinate_to_tuple
|
|
25
|
+
|
|
26
|
+
logger = logging.getLogger("desktopenv.metric.table")
|
|
27
|
+
|
|
28
|
+
BOOK = Union[pd.ExcelFile, Workbook, str]
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def _parse_sheet_idx(sheet_idx: Union[int, str]
|
|
32
|
+
, result: BOOK, expected: BOOK
|
|
33
|
+
, result_sheet_names: List[str]
|
|
34
|
+
, expected_sheet_names: List[str]
|
|
35
|
+
) -> Tuple[BOOK, str]:
|
|
36
|
+
# function _parse_sheet_idx {{{ #
|
|
37
|
+
if isinstance(sheet_idx, int):
|
|
38
|
+
try:
|
|
39
|
+
if not result_sheet_names or sheet_idx >= len(result_sheet_names):
|
|
40
|
+
logger.error(f"Sheet index {sheet_idx} out of range. Available sheets: {result_sheet_names}")
|
|
41
|
+
index = ""
|
|
42
|
+
else:
|
|
43
|
+
index: str = result_sheet_names[sheet_idx]
|
|
44
|
+
logger.debug(f"Sheet index {sheet_idx} resolved to sheet: {index}")
|
|
45
|
+
except Exception as e:
|
|
46
|
+
logger.error(f"Error resolving sheet index {sheet_idx}: {e}")
|
|
47
|
+
index = ""
|
|
48
|
+
book: BOOK = result
|
|
49
|
+
elif sheet_idx.startswith("RI"):
|
|
50
|
+
try:
|
|
51
|
+
index: str = result_sheet_names[int(sheet_idx[2:])]
|
|
52
|
+
except:
|
|
53
|
+
index = ""
|
|
54
|
+
book: BOOK = result
|
|
55
|
+
elif sheet_idx.startswith("RN"):
|
|
56
|
+
index: str = sheet_idx[2:]
|
|
57
|
+
book: BOOK = result
|
|
58
|
+
elif sheet_idx.startswith("EI"):
|
|
59
|
+
try:
|
|
60
|
+
index: str = expected_sheet_names[int(sheet_idx[2:])]
|
|
61
|
+
except:
|
|
62
|
+
index = ""
|
|
63
|
+
book: BOOK = expected
|
|
64
|
+
elif sheet_idx.startswith("EN"):
|
|
65
|
+
index: str = sheet_idx[2:]
|
|
66
|
+
book: BOOK = expected
|
|
67
|
+
else:
|
|
68
|
+
logger.error("Unrecognized sheet index")
|
|
69
|
+
raise ValueError("Unrecognized sheet index")
|
|
70
|
+
return book, index
|
|
71
|
+
# }}} function _parse_sheet_idx #
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
SHEET = Union[pd.DataFrame, Worksheet, List[str]]
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def _load_sheet(book: BOOK, index: str) -> SHEET:
|
|
78
|
+
# function _load_sheet {{{ #
|
|
79
|
+
try:
|
|
80
|
+
if isinstance(book, str):
|
|
81
|
+
book: str = cast(str, book)
|
|
82
|
+
csv_name: str = "{:}-{:}.csv".format(os.path.splitext(book)[0], index)
|
|
83
|
+
|
|
84
|
+
with open(csv_name) as f:
|
|
85
|
+
csv_lines: List[str] = list(itertools.dropwhile(lambda l: len(l) == 0
|
|
86
|
+
, map(lambda l: l.strip()
|
|
87
|
+
, reversed(f.read().splitlines())
|
|
88
|
+
)
|
|
89
|
+
)
|
|
90
|
+
)
|
|
91
|
+
return csv_lines
|
|
92
|
+
if isinstance(book, pd.ExcelFile):
|
|
93
|
+
return pd.read_excel(book, index)
|
|
94
|
+
if isinstance(book, Workbook):
|
|
95
|
+
return book[index]
|
|
96
|
+
logger.error("Not supported workbook format")
|
|
97
|
+
raise NotImplementedError("Not supported workbook format")
|
|
98
|
+
except NotImplementedError as e:
|
|
99
|
+
raise e
|
|
100
|
+
except:
|
|
101
|
+
return None
|
|
102
|
+
# }}} function _load_sheet #
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def compare_table(result: str, expected: str = None, **options) -> float:
|
|
106
|
+
# function compare_table {{{ #
|
|
107
|
+
"""
|
|
108
|
+
Args:
|
|
109
|
+
result (str): path to result xlsx
|
|
110
|
+
expected (str): path to golden xlsx
|
|
111
|
+
rules (List[Dict[str, Any]]): list of dict like
|
|
112
|
+
{
|
|
113
|
+
"type": str,
|
|
114
|
+
<str as parameters>: anything
|
|
115
|
+
}
|
|
116
|
+
as sequential rules
|
|
117
|
+
|
|
118
|
+
Returns:
|
|
119
|
+
float: the score
|
|
120
|
+
"""
|
|
121
|
+
|
|
122
|
+
if result is None:
|
|
123
|
+
logger.error("Result file path is None")
|
|
124
|
+
return 0.
|
|
125
|
+
|
|
126
|
+
# Check if result file exists
|
|
127
|
+
if not os.path.exists(result):
|
|
128
|
+
logger.error(f"Result file not found: {result}")
|
|
129
|
+
return 0.
|
|
130
|
+
|
|
131
|
+
try:
|
|
132
|
+
logger.info(f"Loading result file: {result}")
|
|
133
|
+
xlworkbookr: Workbook = openpyxl.load_workbook(filename=result)
|
|
134
|
+
pdworkbookr = pd.ExcelFile(result)
|
|
135
|
+
logger.info(f"Successfully loaded result file with sheets: {pdworkbookr.sheet_names}")
|
|
136
|
+
except Exception as e:
|
|
137
|
+
logger.error(f"Failed to load result file {result}: {e}")
|
|
138
|
+
return 0.
|
|
139
|
+
worksheetr_names: List[str] = pdworkbookr.sheet_names
|
|
140
|
+
|
|
141
|
+
if expected is not None:
|
|
142
|
+
xlworkbooke: Workbook = openpyxl.load_workbook(filename=expected)
|
|
143
|
+
pdworkbooke = pd.ExcelFile(expected)
|
|
144
|
+
worksheete_names: List[str] = pdworkbooke.sheet_names
|
|
145
|
+
else:
|
|
146
|
+
xlworkbooke: Workbook = None
|
|
147
|
+
pdworkbooke = None
|
|
148
|
+
worksheete_names: List[str] = None
|
|
149
|
+
|
|
150
|
+
parse_idx: Callable[[Union[str, int], BOOK, BOOK], Tuple[BOOK, str]] = \
|
|
151
|
+
functools.partial(
|
|
152
|
+
_parse_sheet_idx,
|
|
153
|
+
result_sheet_names=worksheetr_names,
|
|
154
|
+
expected_sheet_names=worksheete_names
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
passes = True
|
|
158
|
+
for r in options["rules"]:
|
|
159
|
+
if r["type"] == "sheet_name":
|
|
160
|
+
# Compare Sheet Names {{{ #
|
|
161
|
+
metric: bool = worksheetr_names == worksheete_names
|
|
162
|
+
logger.debug("Assertion: %s.sheet_names == %s.sheet_names - %s", result, expected, metric)
|
|
163
|
+
# }}} Compare Sheet Names #
|
|
164
|
+
|
|
165
|
+
elif r["type"] == "sheet_data":
|
|
166
|
+
# Compare Sheet Data by Internal Value {{{ #
|
|
167
|
+
# sheet_idx0: 0 == "RI0" == "RNSheet1" | "EI0" == "ENSheet1"
|
|
168
|
+
# sheet_idx1: as sheet_idx0
|
|
169
|
+
# precision: int as number of decimal digits, default to 4
|
|
170
|
+
|
|
171
|
+
error_limit: int = r.get("precision", 4)
|
|
172
|
+
sheet1: pd.DataFrame = _load_sheet(*parse_idx(r["sheet_idx0"], pdworkbookr, pdworkbooke))
|
|
173
|
+
if sheet1 is None:
|
|
174
|
+
return 0.
|
|
175
|
+
sheet2: pd.DataFrame = _load_sheet(*parse_idx(r["sheet_idx1"], pdworkbookr, pdworkbooke))
|
|
176
|
+
|
|
177
|
+
sheet1 = sheet1.round(error_limit)
|
|
178
|
+
sheet2 = sheet2.round(error_limit)
|
|
179
|
+
metric: bool = sheet1.equals(sheet2)
|
|
180
|
+
logger.debug("Sheet1: \n%s", str(sheet1))
|
|
181
|
+
logger.debug("Sheet2: \n%s", str(sheet2))
|
|
182
|
+
try:
|
|
183
|
+
logger.debug("Sheet1 =v= Sheet2: \n%s", str(sheet1 == sheet2))
|
|
184
|
+
except:
|
|
185
|
+
logger.debug("Sheet1 =/v= Sheet2")
|
|
186
|
+
logger.debug("Assertion: %s =v= %s - %s", r["sheet_idx0"], r["sheet_idx1"], metric)
|
|
187
|
+
# }}} Compare Sheet Data by Internal Value #
|
|
188
|
+
|
|
189
|
+
elif r["type"] == "sheet_print":
|
|
190
|
+
# Compare Sheet Data by Printed Value {{{ #
|
|
191
|
+
# sheet_idx0: 0 == "RI0" == "RNSheet1" | "EI0" == "ENSheet1"
|
|
192
|
+
# sheet_idx1: as sheet_idx0
|
|
193
|
+
# ignore_case: optional, defaults to False
|
|
194
|
+
|
|
195
|
+
sheet1: List[str] = _load_sheet(*parse_idx(r["sheet_idx0"], result, expected))
|
|
196
|
+
if sheet1 is None:
|
|
197
|
+
return 0.
|
|
198
|
+
sheet2: List[str] = _load_sheet(*parse_idx(r["sheet_idx1"], result, expected))
|
|
199
|
+
if r.get("ignore_case", False):
|
|
200
|
+
sheet1 = [l.lower() for l in sheet1]
|
|
201
|
+
sheet2 = [l.lower() for l in sheet2]
|
|
202
|
+
metric: bool = sheet1 == sheet2
|
|
203
|
+
logger.debug("Assertion: %s =p= %s - %s", r["sheet_idx0"], r["sheet_idx1"], metric)
|
|
204
|
+
# }}} Compare Sheet Data by Printed Value #
|
|
205
|
+
|
|
206
|
+
elif r["type"] == "sheet_fuzzy":
|
|
207
|
+
# Fuzzy Match for Ranges {{{ #
|
|
208
|
+
# sheet_idx0: 0 == "RI0" == "RNSheet1" | "EI0" == "ENSheet1"
|
|
209
|
+
# sheet_idx1: as sheet_idx0
|
|
210
|
+
# rules: list of dict, each dict is like
|
|
211
|
+
# { "range": ["A1:B6", "C2:E5"],
|
|
212
|
+
# "type": "includes" | "included_by" | "fuzzy_match" | "exact_match", # 0 includes 1, 0 includes_by 1
|
|
213
|
+
# "threshold": 85, // for fuzzy match
|
|
214
|
+
# "ignore_case": true | false,
|
|
215
|
+
# "ignore_chars": " ()", # filtered out
|
|
216
|
+
# "trim_leadings": "+ ", # filtered by lstrip
|
|
217
|
+
# "trim_trailings": "", # filtered by rstrip
|
|
218
|
+
# "normalization": [["Rd", "Road"]], # filtered by replace
|
|
219
|
+
# }
|
|
220
|
+
|
|
221
|
+
sheet1: Tuple[BOOK, str] = parse_idx(r["sheet_idx0"], result, expected)
|
|
222
|
+
sheet2: Tuple[BOOK, str] = parse_idx(r["sheet_idx1"], result, expected)
|
|
223
|
+
total_metric = True
|
|
224
|
+
for rl in r["rules"]:
|
|
225
|
+
for rng in MultiCellRange(rl["range"]):
|
|
226
|
+
for cdn in rng.cells:
|
|
227
|
+
coordinate: str = "{:}{:d}".format(get_column_letter(cdn[1]), cdn[0])
|
|
228
|
+
value1: str = str(read_cell_value(*sheet1, coordinate))
|
|
229
|
+
value2: str = str(read_cell_value(*sheet2, coordinate))
|
|
230
|
+
logger.debug("%s: %s vs %s", cdn, value1, value2)
|
|
231
|
+
|
|
232
|
+
for rplc in rl.get("normalization", []):
|
|
233
|
+
value1 = value1.replace(rplc[0], rplc[1])
|
|
234
|
+
value2 = value2.replace(rplc[0], rplc[1])
|
|
235
|
+
if "trim_leadings" in rl:
|
|
236
|
+
value1 = value1.lstrip(rl["trim_leadings"])
|
|
237
|
+
value2 = value2.lstrip(rl["trim_leadings"])
|
|
238
|
+
if "trim_trailings" in rl:
|
|
239
|
+
value1 = value1.rstrip(rl["trim_trailings"])
|
|
240
|
+
value2 = value2.rstrip(rl["trim_trailings"])
|
|
241
|
+
if "ignore_chars" in rl:
|
|
242
|
+
ignore_chars: Set[str] = set(rl["ignore_chars"])
|
|
243
|
+
value1 = "".join(filter(lambda ch: ch not in ignore_chars, value1))
|
|
244
|
+
value2 = "".join(filter(lambda ch: ch not in ignore_chars, value2))
|
|
245
|
+
if rl.get("ignore_case", False):
|
|
246
|
+
value1 = value1.lower()
|
|
247
|
+
value2 = value2.lower()
|
|
248
|
+
|
|
249
|
+
if rl["type"] == "includes":
|
|
250
|
+
metric: bool = value2 in value1
|
|
251
|
+
elif rl["type"] == "included_by":
|
|
252
|
+
metric: bool = value1 in value2
|
|
253
|
+
elif rl["type"] == "fuzzy_match":
|
|
254
|
+
metric: bool = fuzz.ratio(value1, value2) >= rl.get("threshold", 85.)
|
|
255
|
+
elif rl["type"] == "exact_match":
|
|
256
|
+
metric: bool = value1 == value2
|
|
257
|
+
total_metric = total_metric and metric
|
|
258
|
+
|
|
259
|
+
metric: bool = total_metric
|
|
260
|
+
logger.debug("Assertion: %s =~= %s - %s", r["sheet_idx0"], r["sheet_idx1"], metric)
|
|
261
|
+
# }}} Fuzzy Match for Ranges #
|
|
262
|
+
|
|
263
|
+
elif r["type"] == "sparkline":
|
|
264
|
+
# Compare Sparklines {{{ #
|
|
265
|
+
# sheet_idx0: 0 == "RI0" == "RNSheet1" | "EI0" == "ENSheet1"
|
|
266
|
+
# sheet_idx1: as sheet_idx0
|
|
267
|
+
|
|
268
|
+
sparkline1: Dict[str, str] = load_sparklines(*parse_idx(r["sheet_idx0"], result, expected))
|
|
269
|
+
sparkline2: Dict[str, str] = load_sparklines(*parse_idx(r["sheet_idx1"], result, expected))
|
|
270
|
+
metric: bool = sparkline1 == sparkline2
|
|
271
|
+
logger.debug("Assertion: %s.sp == %.sp - %s", r["sheet_idx0"], r["sheet_idx1"], metric)
|
|
272
|
+
# }}} Compare Sparklines #
|
|
273
|
+
|
|
274
|
+
elif r["type"] == "chart":
|
|
275
|
+
# Compare Charts {{{ #
|
|
276
|
+
# sheet_idx0: 0 == "RI0" == "RNSheet1" | "EI0" == "ENSheet1"
|
|
277
|
+
# sheet_idx1: as sheet_idx0
|
|
278
|
+
# chart_props: list of str, see utils.load_charts
|
|
279
|
+
|
|
280
|
+
charts1: Dict[str, Any] = load_charts(*parse_idx(r["sheet_idx0"], xlworkbookr, xlworkbooke), **r)
|
|
281
|
+
charts2: Dict[str, Any] = load_charts(*parse_idx(r["sheet_idx1"], xlworkbookr, xlworkbooke), **r)
|
|
282
|
+
metric: bool = charts1 == charts2
|
|
283
|
+
logger.debug("Assertion: %s[chart] == %s[chart] - %s", r["sheet_idx0"], r["sheet_idx1"], metric)
|
|
284
|
+
# }}} Compare Charts #
|
|
285
|
+
|
|
286
|
+
elif r["type"] == "style":
|
|
287
|
+
# Compare Style (Also Conditional Formatiing) {{{ #
|
|
288
|
+
# sheet_idx0: 0 == "RI0" == "RNSheet1" | "EI0" == "ENSheet1"
|
|
289
|
+
# sheet_idx1: as sheet_idx0
|
|
290
|
+
# props: list of str indicating concerned styles, see utils._read_cell_style
|
|
291
|
+
|
|
292
|
+
sheet_idx1: Tuple[BOOK, str] = parse_idx(r["sheet_idx0"], xlworkbookr, xlworkbooke)
|
|
293
|
+
book_name1: str = parse_idx(r["sheet_idx0"], result, expected)[0]
|
|
294
|
+
styles1: Dict[str, List[Any]] = load_xlsx_styles(*sheet_idx1, book_name1, **r)
|
|
295
|
+
|
|
296
|
+
sheet_idx2: Tuple[BOOK, str] = parse_idx(r["sheet_idx1"], xlworkbookr, xlworkbooke)
|
|
297
|
+
book_name2: str = parse_idx(r["sheet_idx1"], result, expected)[0]
|
|
298
|
+
styles2: Dict[str, List[Any]] = load_xlsx_styles(*sheet_idx2, book_name2, **r)
|
|
299
|
+
# number_formats1: List[str] = [c.number_format.lower() for col in sheet1.iter_cols() for c in col if c.value is not None and c.data_type=="n"]
|
|
300
|
+
# number_formats2: List[str] = [c.number_format.lower() for col in sheet2.iter_cols() for c in col if c.value is not None and c.data_type=="n"]
|
|
301
|
+
metric: bool = styles1 == styles2
|
|
302
|
+
logger.debug("Assertion: %s.style == %s.style - %s", r["sheet_idx0"], r["sheet_idx1"], metric)
|
|
303
|
+
# }}} Compare Style (Also Conditional Formatiing) #
|
|
304
|
+
|
|
305
|
+
elif r["type"] == "freeze":
|
|
306
|
+
# Compare Freezing {{{ #
|
|
307
|
+
# sheet_idx0: 0 == "RI0" == "RNSheet1" | "EI0" == "ENSheet1"
|
|
308
|
+
# sheet_idx1: as sheet_idx0
|
|
309
|
+
|
|
310
|
+
sheet1: Worksheet = _load_sheet(*parse_idx(r["sheet_idx0"], xlworkbookr, xlworkbooke))
|
|
311
|
+
if sheet1 is None:
|
|
312
|
+
return 0.
|
|
313
|
+
sheet2: Worksheet = _load_sheet(*parse_idx(r["sheet_idx1"], xlworkbookr, xlworkbooke))
|
|
314
|
+
metric: bool = sheet1.freeze_panes == sheet2.freeze_panes
|
|
315
|
+
logger.debug("Assertion: %s.freeze(%s) == %s.freeze(%s) - %s"
|
|
316
|
+
, r["sheet_idx0"], sheet1.freeze_panes
|
|
317
|
+
, r["sheet_idx1"], sheet2.freeze_panes
|
|
318
|
+
, metric
|
|
319
|
+
)
|
|
320
|
+
# }}} Compare Freezing #
|
|
321
|
+
|
|
322
|
+
elif r["type"] == "zoom":
|
|
323
|
+
# Check Zooming {{{ #
|
|
324
|
+
# sheet_idx: 0 == "RI0" == "RNSheet1" | "EI0" == "ENSheet1"
|
|
325
|
+
# method: str
|
|
326
|
+
# ref: value
|
|
327
|
+
|
|
328
|
+
sheet: Worksheet = _load_sheet(*parse_idx(r["sheet_idx"], xlworkbookr, xlworkbooke))
|
|
329
|
+
if sheet is None:
|
|
330
|
+
return 0.
|
|
331
|
+
zoom_scale: Number = sheet.sheet_view.zoomScale or 100.
|
|
332
|
+
metric: bool = _match_value_to_rule(zoom_scale, r)
|
|
333
|
+
logger.debug("Assertion: %s.zoom(%.1f) %s %.1f - %s", r["sheet_idx"], zoom_scale, r["method"], r["ref"],
|
|
334
|
+
metric)
|
|
335
|
+
# }}} Check Zooming #
|
|
336
|
+
|
|
337
|
+
elif r["type"] == "data_validation":
|
|
338
|
+
# Check Data Validation {{{ #
|
|
339
|
+
# sheet_idx: 0 == "RI0" == "RNSheet1" | "EI0" == "ENSheet1"
|
|
340
|
+
# dv_props: list of dict like {attribute: {"method": str, "ref": anything}}
|
|
341
|
+
# available attributes:
|
|
342
|
+
# * ranges
|
|
343
|
+
# * type
|
|
344
|
+
# * formula1
|
|
345
|
+
# * formula2
|
|
346
|
+
# * operator
|
|
347
|
+
# * allowBlank
|
|
348
|
+
# * showDropDown
|
|
349
|
+
# * showInputMessage
|
|
350
|
+
# * showErrorMessage
|
|
351
|
+
# * error
|
|
352
|
+
# * errorTitle
|
|
353
|
+
# * errorStyle
|
|
354
|
+
# * prompt
|
|
355
|
+
# * promptTitle
|
|
356
|
+
# * imeMode
|
|
357
|
+
|
|
358
|
+
sheet: Worksheet = _load_sheet(*parse_idx(r["sheet_idx"], xlworkbookr, xlworkbooke))
|
|
359
|
+
if sheet is None:
|
|
360
|
+
return 0.
|
|
361
|
+
data_validators: List[DataValidation] = sheet.data_validations.dataValidation
|
|
362
|
+
|
|
363
|
+
total_metric = len(data_validators) >= len(r["dv_props"])
|
|
364
|
+
for dat_vldt in data_validators:
|
|
365
|
+
metric = False
|
|
366
|
+
for prpt in r["dv_props"]:
|
|
367
|
+
metric = metric or all(_match_value_to_rule(getattr(dat_vldt, attrbt)
|
|
368
|
+
, mr
|
|
369
|
+
) \
|
|
370
|
+
for attrbt, mr in prpt.items()
|
|
371
|
+
)
|
|
372
|
+
if metric:
|
|
373
|
+
break
|
|
374
|
+
total_metric = total_metric and metric
|
|
375
|
+
if not total_metric:
|
|
376
|
+
break
|
|
377
|
+
|
|
378
|
+
logger.debug("Assertion: %s.data_validation - %s", r["sheet_idx"], total_metric)
|
|
379
|
+
metric: bool = total_metric
|
|
380
|
+
# }}} Check Data Validation #
|
|
381
|
+
|
|
382
|
+
elif r["type"] == "row_props":
|
|
383
|
+
# Check Row Properties {{{ #
|
|
384
|
+
# sheet_idx0: 0 == "RI0" == "RNSheet1" | "EI0" == "ENSheet1"
|
|
385
|
+
# sheet_idx1: as sheet_idx0
|
|
386
|
+
# props: list of str, see utils.load_rows_or_cols
|
|
387
|
+
|
|
388
|
+
rows1: Dict[str, Any] = load_rows_or_cols(*parse_idx(r["sheet_idx0"], xlworkbookr, xlworkbooke)
|
|
389
|
+
, obj="row"
|
|
390
|
+
, **r
|
|
391
|
+
)
|
|
392
|
+
rows2: Dict[str, Any] = load_rows_or_cols(*parse_idx(r["sheet_idx1"], xlworkbookr, xlworkbooke)
|
|
393
|
+
, obj="row"
|
|
394
|
+
, **r
|
|
395
|
+
)
|
|
396
|
+
logger.debug("Rows1: %s", repr(rows1))
|
|
397
|
+
logger.debug("Rows2: %s", repr(rows2))
|
|
398
|
+
metric: bool = rows1 == rows2
|
|
399
|
+
logger.debug("Assertion: %s[rows] == %s[rows] - %s", r["sheet_idx0"], r["sheet_idx1"], metric)
|
|
400
|
+
# }}} Check Row Properties #
|
|
401
|
+
|
|
402
|
+
elif r["type"] == "col_props":
|
|
403
|
+
# Check Row Properties {{{ #
|
|
404
|
+
# sheet_idx0: 0 == "RI0" == "RNSheet1" | "EI0" == "ENSheet1"
|
|
405
|
+
# sheet_idx1: as sheet_idx0
|
|
406
|
+
# props: list of str, see utils.load_rows_or_cols
|
|
407
|
+
|
|
408
|
+
cols1: Dict[str, Any] = load_rows_or_cols(*parse_idx(r["sheet_idx0"], xlworkbookr, xlworkbooke)
|
|
409
|
+
, obj="column"
|
|
410
|
+
, **r
|
|
411
|
+
)
|
|
412
|
+
cols2: Dict[str, Any] = load_rows_or_cols(*parse_idx(r["sheet_idx1"], xlworkbookr, xlworkbooke)
|
|
413
|
+
, obj="column"
|
|
414
|
+
, **r
|
|
415
|
+
)
|
|
416
|
+
metric: bool = cols1 == cols2
|
|
417
|
+
logger.debug("Assertion: %s[cols] == %s[cols] - %s", r["sheet_idx0"], r["sheet_idx1"], metric)
|
|
418
|
+
# }}} Check Row Properties #
|
|
419
|
+
|
|
420
|
+
elif r["type"] == "filter":
|
|
421
|
+
# Compare Filters {{{ #
|
|
422
|
+
# sheet_idx0: 0 == "RI0" == "RNSheet1" | "EI0" == "ENSheet1"
|
|
423
|
+
# sheet_idx1: as sheet_idx0
|
|
424
|
+
|
|
425
|
+
filters1: Dict[str, Any] = load_filters(*parse_idx(r["sheet_idx0"], xlworkbookr, xlworkbooke), **r)
|
|
426
|
+
filters2: Dict[str, Any] = load_filters(*parse_idx(r["sheet_idx1"], xlworkbookr, xlworkbooke), **r)
|
|
427
|
+
metric: bool = filters1 == filters2
|
|
428
|
+
logger.debug("Assertion: %s[filter] == %s[filter] - %s", r["sheet_idx0"], r["sheet_idx1"], metric)
|
|
429
|
+
# }}} Compare Filters #
|
|
430
|
+
|
|
431
|
+
elif r["type"] == "pivot_table":
|
|
432
|
+
# Compare Pivot Tables {{{ #
|
|
433
|
+
# sheet_idx0: 0 == "RI0" == "RNSheet1" | "EI0" == "ENSheet1"
|
|
434
|
+
# sheet_idx1: as sheet_idx0
|
|
435
|
+
# pivot_props: list of str, see utils.load_pivot_tables
|
|
436
|
+
|
|
437
|
+
pivots1: Dict[str, Any] = load_pivot_tables(*parse_idx(r["sheet_idx0"], xlworkbookr, xlworkbooke), **r)
|
|
438
|
+
pivots2: Dict[str, Any] = load_pivot_tables(*parse_idx(r["sheet_idx1"], xlworkbookr, xlworkbooke), **r)
|
|
439
|
+
metric: bool = pivots1 == pivots2
|
|
440
|
+
logger.debug("Assertion: %s[pivot]==%s[pivot] - %s", r["sheet_idx0"], r["sheet_idx1"], metric)
|
|
441
|
+
# }}} Compare Pivot Tables #
|
|
442
|
+
|
|
443
|
+
elif r["type"] == "check_cell":
|
|
444
|
+
# Check Cell Properties {{{ #
|
|
445
|
+
# sheet_idx: 0 == "RI0" == "RNSheet1" | "EI0" == "ENSheet1"
|
|
446
|
+
# coordinate: str, "E3"
|
|
447
|
+
# props: dict like {attribute: {"method": str, "ref": anything}}
|
|
448
|
+
# supported attributes: value & those supported by utils._read_cell_style
|
|
449
|
+
|
|
450
|
+
try:
|
|
451
|
+
sheet: Worksheet = _load_sheet(*parse_idx(r["sheet_idx"], xlworkbookr, xlworkbooke))
|
|
452
|
+
if sheet is None:
|
|
453
|
+
logger.error(f"Failed to load sheet for sheet_idx: {r['sheet_idx']}")
|
|
454
|
+
return 0.
|
|
455
|
+
# data_frame: pd.DataFrame = _load_sheet(*parse_idx(r["sheet_idx"], pdworkbookr, pdworkbooke))
|
|
456
|
+
cell: Cell = sheet[r["coordinate"]]
|
|
457
|
+
metric: bool = True
|
|
458
|
+
for prpt, rule in r["props"].items():
|
|
459
|
+
if prpt == "value":
|
|
460
|
+
try:
|
|
461
|
+
parsed_result = parse_idx(r["sheet_idx"], result, expected)
|
|
462
|
+
logger.debug(f"parse_idx result: {parsed_result}")
|
|
463
|
+
val = read_cell_value(*parsed_result, r["coordinate"])
|
|
464
|
+
logger.debug(f"Cell {r['coordinate']} value: {val}")
|
|
465
|
+
except Exception as e:
|
|
466
|
+
logger.error(f"Failed to read cell value at {r['coordinate']}: {e}")
|
|
467
|
+
val = None
|
|
468
|
+
else:
|
|
469
|
+
try:
|
|
470
|
+
val = _read_cell_style(prpt, cell)
|
|
471
|
+
except Exception as e:
|
|
472
|
+
logger.error(f"Failed to read cell style {prpt} at {r['coordinate']}: {e}")
|
|
473
|
+
val = None
|
|
474
|
+
|
|
475
|
+
metric = metric and _match_value_to_rule(val, rule)
|
|
476
|
+
except Exception as e:
|
|
477
|
+
logger.error(f"Error in check_cell processing: {e}")
|
|
478
|
+
return 0.
|
|
479
|
+
|
|
480
|
+
logger.debug("Assertion: %s[%s] :%s - %s"
|
|
481
|
+
, r["sheet_idx"], r["coordinate"]
|
|
482
|
+
, repr(r["props"]), metric
|
|
483
|
+
)
|
|
484
|
+
# }}} Check Cell Properties #
|
|
485
|
+
|
|
486
|
+
else:
|
|
487
|
+
raise NotImplementedError("Unimplemented sheet check: {:}".format(r["type"]))
|
|
488
|
+
|
|
489
|
+
passes = passes and metric
|
|
490
|
+
if not passes:
|
|
491
|
+
break
|
|
492
|
+
|
|
493
|
+
return float(passes)
|
|
494
|
+
# }}} function compare_table #
|
|
495
|
+
|
|
496
|
+
|
|
497
|
+
def compare_csv(result: str, expected: Union[str, List[str]], **options) -> float:
|
|
498
|
+
"""
|
|
499
|
+
Compare CSV files. If expected is a list, returns 1.0 if result matches any of the expected files.
|
|
500
|
+
|
|
501
|
+
Args:
|
|
502
|
+
result: Path to result CSV file
|
|
503
|
+
expected: Path to expected CSV file or list of paths to expected CSV files
|
|
504
|
+
options: Additional options (strict, ignore_case)
|
|
505
|
+
|
|
506
|
+
Returns:
|
|
507
|
+
1.0 if result matches expected (or any file in expected list), 0.0 otherwise
|
|
508
|
+
"""
|
|
509
|
+
if result is None:
|
|
510
|
+
return 0.
|
|
511
|
+
|
|
512
|
+
try:
|
|
513
|
+
with open(result) as f:
|
|
514
|
+
result_lines: Iterable[str] = f.read().splitlines()
|
|
515
|
+
except (FileNotFoundError, IOError):
|
|
516
|
+
return 0.
|
|
517
|
+
|
|
518
|
+
# Convert expected to list if it's a single string (for backward compatibility)
|
|
519
|
+
if isinstance(expected, str):
|
|
520
|
+
expected_files = [expected]
|
|
521
|
+
else:
|
|
522
|
+
expected_files = expected
|
|
523
|
+
|
|
524
|
+
# Try to match against each expected file
|
|
525
|
+
for expected_file in expected_files:
|
|
526
|
+
try:
|
|
527
|
+
with open(expected_file) as f:
|
|
528
|
+
expected_lines: Iterable[str] = f.read().splitlines()
|
|
529
|
+
|
|
530
|
+
# Process lines based on options
|
|
531
|
+
current_result_lines = result_lines
|
|
532
|
+
current_expected_lines = expected_lines
|
|
533
|
+
|
|
534
|
+
if not options.get("strict", True):
|
|
535
|
+
current_result_lines = map(str.strip, current_result_lines)
|
|
536
|
+
current_expected_lines = map(str.strip, current_expected_lines)
|
|
537
|
+
if options.get("ignore_case", False):
|
|
538
|
+
current_result_lines = map(str.lower, current_result_lines)
|
|
539
|
+
current_expected_lines = map(str.lower, current_expected_lines)
|
|
540
|
+
|
|
541
|
+
# Check if this expected file matches
|
|
542
|
+
if list(current_result_lines) == list(current_expected_lines):
|
|
543
|
+
return 1.0
|
|
544
|
+
|
|
545
|
+
except (FileNotFoundError, IOError):
|
|
546
|
+
# If this expected file doesn't exist, continue to next one
|
|
547
|
+
continue
|
|
548
|
+
|
|
549
|
+
# No match found
|
|
550
|
+
return 0.0
|
|
551
|
+
|
|
552
|
+
|
|
553
|
+
def compare_conference_city_in_order(actual_city_list_path, expected_city):
|
|
554
|
+
expected_city_list = expected_city["expected"]
|
|
555
|
+
wb = openpyxl.load_workbook(actual_city_list_path)
|
|
556
|
+
sheet = wb.active
|
|
557
|
+
actual_city_list = []
|
|
558
|
+
for row in sheet["C2:C22"]:
|
|
559
|
+
for cell in row:
|
|
560
|
+
actual_city_list.append(cell.value)
|
|
561
|
+
# expected_city is the city that we want to compare with the actual city list
|
|
562
|
+
# must in order index
|
|
563
|
+
# debug
|
|
564
|
+
try:
|
|
565
|
+
for i in range(len(actual_city_list)):
|
|
566
|
+
if isinstance(expected_city_list[i], str):
|
|
567
|
+
if expected_city_list[i] not in actual_city_list[i]:
|
|
568
|
+
logger.debug(f"Expected city {expected_city_list[i]}; Actual city {actual_city_list[i]}")
|
|
569
|
+
print(f"Expected city {expected_city_list[i]}; Actual city {actual_city_list[i]}")
|
|
570
|
+
return 0.
|
|
571
|
+
|
|
572
|
+
|
|
573
|
+
elif isinstance(expected_city_list[i], List):
|
|
574
|
+
if not any(possible_str in actual_city_list[i] for possible_str in expected_city_list[i]):
|
|
575
|
+
logger.debug(f"Expected city {expected_city_list[i]}; Actual city {actual_city_list[i]}")
|
|
576
|
+
print(f"Expected city {expected_city_list[i]}; Actual city {actual_city_list[i]}")
|
|
577
|
+
return 0.
|
|
578
|
+
|
|
579
|
+
else:
|
|
580
|
+
raise TypeError("Expected city should be a string or a list of strings")
|
|
581
|
+
|
|
582
|
+
except:
|
|
583
|
+
return 0.
|
|
584
|
+
|
|
585
|
+
return 1.
|