evalscope 1.0.1__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of evalscope might be problematic. Click here for more details.

Files changed (155) hide show
  1. evalscope/api/benchmark/adapters/default_data_adapter.py +18 -4
  2. evalscope/api/benchmark/adapters/multi_choice_adapter.py +5 -2
  3. evalscope/api/benchmark/adapters/text2image_adapter.py +5 -4
  4. evalscope/api/benchmark/adapters/vision_language_adapter.py +3 -1
  5. evalscope/api/benchmark/benchmark.py +27 -2
  6. evalscope/api/benchmark/meta.py +3 -0
  7. evalscope/api/evaluator/evaluator.py +5 -0
  8. evalscope/api/evaluator/state.py +5 -0
  9. evalscope/api/messages/chat_message.py +6 -1
  10. evalscope/api/mixin/__init__.py +1 -0
  11. evalscope/api/mixin/llm_judge_mixin.py +2 -0
  12. evalscope/api/mixin/sandbox_mixin.py +204 -0
  13. evalscope/api/model/generate_config.py +0 -3
  14. evalscope/api/model/model.py +1 -1
  15. evalscope/api/tool/tool_info.py +1 -1
  16. evalscope/app/ui/multi_model.py +6 -1
  17. evalscope/app/ui/single_model.py +8 -2
  18. evalscope/app/utils/data_utils.py +3 -2
  19. evalscope/app/utils/visualization.py +2 -2
  20. evalscope/arguments.py +6 -0
  21. evalscope/benchmarks/ai2d/ai2d_adapter.py +54 -0
  22. evalscope/benchmarks/amc/__init__.py +0 -0
  23. evalscope/benchmarks/amc/amc_adapter.py +46 -0
  24. evalscope/benchmarks/bbh/bbh_adapter.py +43 -17
  25. evalscope/benchmarks/bfcl/bfcl_adapter.py +106 -2
  26. evalscope/benchmarks/bfcl/generation.py +7 -7
  27. evalscope/benchmarks/blink/__init__.py +0 -0
  28. evalscope/benchmarks/blink/blink_adapter.py +61 -0
  29. evalscope/benchmarks/chartqa/__init__.py +0 -0
  30. evalscope/benchmarks/chartqa/chartqa_adapter.py +80 -0
  31. evalscope/benchmarks/chartqa/utils.py +38 -0
  32. evalscope/benchmarks/docvqa/__init__.py +0 -0
  33. evalscope/benchmarks/docvqa/docvqa_adapter.py +67 -0
  34. evalscope/benchmarks/drop/drop_adapter.py +1 -1
  35. evalscope/benchmarks/general_arena/utils.py +2 -1
  36. evalscope/benchmarks/healthbench/__init__.py +0 -0
  37. evalscope/benchmarks/healthbench/healthbench_adapter.py +282 -0
  38. evalscope/benchmarks/healthbench/utils.py +102 -0
  39. evalscope/benchmarks/hle/hle_adapter.py +3 -2
  40. evalscope/benchmarks/humaneval/humaneval_adapter.py +19 -35
  41. evalscope/benchmarks/humaneval/utils.py +235 -0
  42. evalscope/benchmarks/infovqa/__init__.py +0 -0
  43. evalscope/benchmarks/infovqa/infovqa_adapter.py +66 -0
  44. evalscope/benchmarks/live_code_bench/evaluate_utils.py +13 -6
  45. evalscope/benchmarks/live_code_bench/live_code_bench_adapter.py +60 -37
  46. evalscope/benchmarks/live_code_bench/sandbox_evaluate_utils.py +220 -0
  47. evalscope/benchmarks/math_500/math_500_adapter.py +0 -1
  48. evalscope/benchmarks/minerva_math/__init__.py +0 -0
  49. evalscope/benchmarks/minerva_math/minerva_math_adapter.py +48 -0
  50. evalscope/benchmarks/mm_bench/__init__.py +0 -0
  51. evalscope/benchmarks/mm_bench/mm_bench_adapter.py +99 -0
  52. evalscope/benchmarks/mm_star/__init__.py +0 -0
  53. evalscope/benchmarks/mm_star/mm_star_adapter.py +73 -0
  54. evalscope/benchmarks/mmmu/mmmu_adapter.py +1 -1
  55. evalscope/benchmarks/mmmu_pro/mmmu_pro_adapter.py +4 -9
  56. evalscope/benchmarks/multi_if/__init__.py +0 -0
  57. evalscope/benchmarks/multi_if/ifeval.py +3354 -0
  58. evalscope/benchmarks/multi_if/metrics.py +120 -0
  59. evalscope/benchmarks/multi_if/multi_if_adapter.py +161 -0
  60. evalscope/benchmarks/needle_haystack/needle_haystack_adapter.py +1 -4
  61. evalscope/benchmarks/ocr_bench/__init__.py +0 -0
  62. evalscope/benchmarks/ocr_bench/ocr_bench_adapter.py +101 -0
  63. evalscope/benchmarks/ocr_bench_v2/IoUscore_metric.py +87 -0
  64. evalscope/benchmarks/ocr_bench_v2/TEDS_metric.py +963 -0
  65. evalscope/benchmarks/ocr_bench_v2/__init__.py +0 -0
  66. evalscope/benchmarks/ocr_bench_v2/ocr_bench_v2_adapter.py +161 -0
  67. evalscope/benchmarks/ocr_bench_v2/page_ocr_metric.py +50 -0
  68. evalscope/benchmarks/ocr_bench_v2/parallel.py +46 -0
  69. evalscope/benchmarks/ocr_bench_v2/spotting_eval/__init__.py +0 -0
  70. evalscope/benchmarks/ocr_bench_v2/spotting_eval/readme.txt +26 -0
  71. evalscope/benchmarks/ocr_bench_v2/spotting_eval/rrc_evaluation_funcs_1_1.py +537 -0
  72. evalscope/benchmarks/ocr_bench_v2/spotting_eval/script.py +481 -0
  73. evalscope/benchmarks/ocr_bench_v2/spotting_metric.py +179 -0
  74. evalscope/benchmarks/ocr_bench_v2/utils.py +432 -0
  75. evalscope/benchmarks/ocr_bench_v2/vqa_metric.py +254 -0
  76. evalscope/benchmarks/olympiad_bench/__init__.py +0 -0
  77. evalscope/benchmarks/olympiad_bench/olympiad_bench_adapter.py +163 -0
  78. evalscope/benchmarks/olympiad_bench/utils.py +565 -0
  79. evalscope/benchmarks/omni_bench/__init__.py +0 -0
  80. evalscope/benchmarks/omni_bench/omni_bench_adapter.py +86 -0
  81. evalscope/benchmarks/real_world_qa/__init__.py +0 -0
  82. evalscope/benchmarks/real_world_qa/real_world_qa_adapter.py +64 -0
  83. evalscope/benchmarks/tau_bench/tau_bench_adapter.py +6 -1
  84. evalscope/config.py +24 -1
  85. evalscope/constants.py +3 -0
  86. evalscope/evaluator/evaluator.py +25 -7
  87. evalscope/metrics/metric.py +78 -2
  88. evalscope/metrics/metrics.py +16 -0
  89. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/config.py +0 -0
  90. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/dist_utils.py +0 -0
  91. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/gradcam.py +0 -0
  92. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/logger.py +0 -0
  93. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/optims.py +0 -0
  94. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/registry.py +0 -0
  95. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/utils.py +0 -0
  96. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/__init__.py +0 -0
  97. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa.py +0 -0
  98. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/common/vqa_tools/vqa_eval.py +0 -0
  99. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip2_models/Qformer.py +2 -6
  100. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/blip_models/nlvr_encoder.py +2 -6
  101. evalscope/metrics/t2v_metrics/models/vqascore_models/lavis/models/med.py +2 -6
  102. evalscope/models/model_apis.py +10 -8
  103. evalscope/models/utils/openai.py +1 -2
  104. evalscope/perf/arguments.py +2 -0
  105. evalscope/perf/plugin/api/base.py +2 -2
  106. evalscope/perf/plugin/api/default_api.py +7 -7
  107. evalscope/perf/plugin/api/openai_api.py +83 -19
  108. evalscope/perf/plugin/datasets/flickr8k.py +2 -2
  109. evalscope/perf/plugin/datasets/kontext_bench.py +2 -2
  110. evalscope/perf/plugin/datasets/random_vl_dataset.py +2 -2
  111. evalscope/perf/utils/benchmark_util.py +1 -2
  112. evalscope/report/__init__.py +9 -1
  113. evalscope/report/combinator.py +45 -20
  114. evalscope/report/report.py +8 -4
  115. evalscope/run.py +1 -1
  116. evalscope/utils/function_utils.py +41 -0
  117. evalscope/utils/import_utils.py +63 -13
  118. evalscope/utils/io_utils.py +19 -11
  119. evalscope/utils/json_schema.py +25 -2
  120. evalscope/utils/logger.py +19 -0
  121. evalscope/utils/model_utils.py +1 -1
  122. evalscope/utils/multi_choices.py +16 -1
  123. evalscope/version.py +2 -2
  124. {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/METADATA +10 -40
  125. {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/RECORD +120 -95
  126. {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/top_level.txt +0 -1
  127. tests/__init__.py +0 -1
  128. tests/benchmark/__init__.py +0 -1
  129. tests/benchmark/test_eval.py +0 -385
  130. tests/benchmark/test_image_edit.py +0 -65
  131. tests/benchmark/test_t2i.py +0 -142
  132. tests/benchmark/test_vlm.py +0 -80
  133. tests/cli/__init__.py +0 -1
  134. tests/cli/test_all.py +0 -269
  135. tests/cli/test_collection.py +0 -99
  136. tests/cli/test_custom.py +0 -268
  137. tests/cli/test_reasoning.py +0 -81
  138. tests/common.py +0 -73
  139. tests/perf/__init__.py +0 -1
  140. tests/perf/test_perf.py +0 -178
  141. tests/rag/test_clip_benchmark.py +0 -87
  142. tests/rag/test_mteb.py +0 -213
  143. tests/rag/test_ragas.py +0 -128
  144. tests/swift/__init__.py +0 -1
  145. tests/swift/test_run_swift_eval.py +0 -146
  146. tests/swift/test_run_swift_vlm_eval.py +0 -128
  147. tests/swift/test_run_swift_vlm_jugde_eval.py +0 -157
  148. tests/test_run_all.py +0 -12
  149. tests/utils.py +0 -13
  150. tests/vlm/__init__.py +0 -1
  151. tests/vlm/test_vlmeval.py +0 -102
  152. {tests/rag → evalscope/benchmarks/ai2d}/__init__.py +0 -0
  153. {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/LICENSE +0 -0
  154. {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/WHEEL +0 -0
  155. {evalscope-1.0.1.dist-info → evalscope-1.1.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,537 @@
1
+ # flake8: noqa
2
+ #!/usr/bin/env python3
3
+ # encoding: UTF-8
4
+
5
+ # File: rrc_evaluation_funcs_1_1.py
6
+ # Version: 1.1
7
+ # Version info: changes for Python 3
8
+ # Date: 2019-12-29
9
+ # Description: File with useful functions to use by the evaluation scripts in the RRC website.
10
+
11
+ import json
12
+ import sys
13
+
14
+ sys.path.append('./')
15
+ import importlib
16
+ import os
17
+ import re
18
+ import zipfile
19
+
20
+
21
+ def print_help():
22
+ sys.stdout.write('Usage: python %s.py -g=<gtFile> -s=<submFile> [-o=<outputFolder> -p=<jsonParams>]' % sys.argv[0])
23
+ sys.exit(2)
24
+
25
+
26
+ def load_zip_file_keys(file, fileNameRegExp=''):
27
+ """
28
+ Returns an array with the entries of the ZIP file that match with the regular expression.
29
+ The key's are the names or the file or the capturing group definied in the fileNameRegExp
30
+ """
31
+ try:
32
+ archive = zipfile.ZipFile(file, mode='r', allowZip64=True)
33
+ except:
34
+ raise Exception('Error loading the ZIP archive.')
35
+
36
+ pairs = []
37
+
38
+ for name in archive.namelist():
39
+ addFile = True
40
+ keyName = name
41
+ if fileNameRegExp != '':
42
+ m = re.match(fileNameRegExp, name)
43
+ if m == None:
44
+ addFile = False
45
+ else:
46
+ if len(m.groups()) > 0:
47
+ keyName = m.group(1)
48
+
49
+ if addFile:
50
+ pairs.append(keyName)
51
+
52
+ return pairs
53
+
54
+
55
+ def load_zip_file(file, fileNameRegExp='', allEntries=False):
56
+ """
57
+ Returns an array with the contents (filtered by fileNameRegExp) of a ZIP file.
58
+ The key's are the names or the file or the capturing group definied in the fileNameRegExp
59
+ allEntries validates that all entries in the ZIP file pass the fileNameRegExp
60
+ """
61
+ try:
62
+ archive = zipfile.ZipFile(file, mode='r', allowZip64=True)
63
+ except:
64
+ raise Exception('Error loading the ZIP archive')
65
+
66
+ pairs = []
67
+ for name in archive.namelist():
68
+ addFile = True
69
+ keyName = name
70
+ if fileNameRegExp != '':
71
+ m = re.match(fileNameRegExp, name)
72
+ if m == None:
73
+ addFile = False
74
+ else:
75
+ if len(m.groups()) > 0:
76
+ keyName = m.group(1)
77
+
78
+ if addFile:
79
+ pairs.append([keyName, archive.read(name)])
80
+ else:
81
+ if allEntries:
82
+ raise Exception('ZIP entry not valid: %s' % name)
83
+
84
+ return dict(pairs)
85
+
86
+
87
+ def decode_utf8(raw):
88
+ """
89
+ Returns a Unicode object on success, or None on failure
90
+ """
91
+ try:
92
+ return raw.decode('utf-8-sig', errors='replace')
93
+ except:
94
+ return None
95
+
96
+
97
+ def validate_lines_in_file(
98
+ fileName,
99
+ file_contents,
100
+ CRLF=True,
101
+ LTRB=True,
102
+ withTranscription=False,
103
+ withConfidence=False,
104
+ imWidth=0,
105
+ imHeight=0
106
+ ):
107
+ """
108
+ This function validates that all lines of the file calling the Line validation function for each line
109
+ """
110
+ utf8File = decode_utf8(file_contents)
111
+ if utf8File is None:
112
+ raise Exception('The file %s is not UTF-8' % fileName)
113
+
114
+ lines = utf8File.split('\r\n' if CRLF else '\n')
115
+ for line in lines:
116
+ line = line.replace('\r', '').replace('\n', '')
117
+ if line != '':
118
+ try:
119
+ validate_tl_line(line, LTRB, withTranscription, withConfidence, imWidth, imHeight)
120
+ except Exception as e:
121
+ raise Exception(('Line in sample not valid. Sample: %s Line: %s Error: %s' %
122
+ (fileName, line, str(e))).encode('utf-8', 'replace'))
123
+
124
+
125
+ def validate_tl_line(line, LTRB=True, withTranscription=True, withConfidence=True, imWidth=0, imHeight=0):
126
+ """
127
+ Validate the format of the line. If the line is not valid an exception will be raised.
128
+ If maxWidth and maxHeight are specified, all points must be inside the imgage bounds.
129
+ Posible values are:
130
+ LTRB=True: xmin,ymin,xmax,ymax[,confidence][,transcription]
131
+ LTRB=False: x1,y1,x2,y2,x3,y3,x4,y4[,confidence][,transcription]
132
+ """
133
+ get_tl_line_values(line, LTRB, withTranscription, withConfidence, imWidth, imHeight)
134
+
135
+
136
+ def get_tl_line_values(line, LTRB=True, withTranscription=False, withConfidence=False, imWidth=0, imHeight=0):
137
+ """
138
+ Validate the format of the line. If the line is not valid an exception will be raised.
139
+ If maxWidth and maxHeight are specified, all points must be inside the imgage bounds.
140
+ Posible values are:
141
+ LTRB=True: xmin,ymin,xmax,ymax[,confidence][,transcription]
142
+ LTRB=False: x1,y1,x2,y2,x3,y3,x4,y4[,confidence][,transcription]
143
+ Returns values from a textline. Points , [Confidences], [Transcriptions]
144
+ """
145
+ confidence = 0.0
146
+ transcription = ''
147
+ points = []
148
+
149
+ numPoints = 4
150
+
151
+ if LTRB:
152
+ numPoints = 4
153
+
154
+ if withTranscription and withConfidence:
155
+ m = re.match(
156
+ r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$', line
157
+ )
158
+ if m == None:
159
+ m = re.match(
160
+ r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',
161
+ line
162
+ )
163
+ raise Exception('Format incorrect. Should be: xmin,ymin,xmax,ymax,confidence,transcription')
164
+ elif withConfidence:
165
+ m = re.match(
166
+ r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*$', line
167
+ )
168
+ if m == None:
169
+ raise Exception('Format incorrect. Should be: xmin,ymin,xmax,ymax,confidence')
170
+ elif withTranscription:
171
+ m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,(.*)$', line)
172
+ if m == None:
173
+ raise Exception('Format incorrect. Should be: xmin,ymin,xmax,ymax,transcription')
174
+ else:
175
+ m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,?\s*$', line)
176
+ if m == None:
177
+ raise Exception('Format incorrect. Should be: xmin,ymin,xmax,ymax')
178
+
179
+ xmin = int(m.group(1))
180
+ ymin = int(m.group(2))
181
+ xmax = int(m.group(3))
182
+ ymax = int(m.group(4))
183
+ if xmax < xmin:
184
+ raise Exception('Xmax value (%s) not valid (Xmax < Xmin).' % (xmax))
185
+ if ymax < ymin:
186
+ raise Exception('Ymax value (%s) not valid (Ymax < Ymin).' % (ymax))
187
+
188
+ points = [float(m.group(i)) for i in range(1, (numPoints + 1))]
189
+
190
+ if imWidth > 0 and imHeight > 0:
191
+ validate_point_inside_bounds(xmin, ymin, imWidth, imHeight)
192
+ validate_point_inside_bounds(xmax, ymax, imWidth, imHeight)
193
+
194
+ else:
195
+ numPoints = 8
196
+
197
+ if withTranscription and withConfidence:
198
+ m = re.match(
199
+ r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',
200
+ line
201
+ )
202
+ if m == None:
203
+ raise Exception('Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,confidence,transcription')
204
+ elif withConfidence:
205
+ m = re.match(
206
+ r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-1].?[0-9]*)\s*$',
207
+ line
208
+ )
209
+ if m == None:
210
+ raise Exception('Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,confidence')
211
+ elif withTranscription:
212
+ m = re.match(
213
+ r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,(.*)$',
214
+ line
215
+ )
216
+ if m == None:
217
+ raise Exception('Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,transcription')
218
+ else:
219
+ m = re.match(
220
+ r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*$',
221
+ line
222
+ )
223
+ if m == None:
224
+ raise Exception('Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4')
225
+
226
+ points = [float(m.group(i)) for i in range(1, (numPoints + 1))]
227
+
228
+ validate_clockwise_points(points)
229
+
230
+ if imWidth > 0 and imHeight > 0:
231
+ validate_point_inside_bounds(points[0], points[1], imWidth, imHeight)
232
+ validate_point_inside_bounds(points[2], points[3], imWidth, imHeight)
233
+ validate_point_inside_bounds(points[4], points[5], imWidth, imHeight)
234
+ validate_point_inside_bounds(points[6], points[7], imWidth, imHeight)
235
+
236
+ if withConfidence:
237
+ try:
238
+ confidence = float(m.group(numPoints + 1))
239
+ except ValueError:
240
+ raise Exception('Confidence value must be a float')
241
+
242
+ if withTranscription:
243
+ posTranscription = numPoints + (2 if withConfidence else 1)
244
+ transcription = m.group(posTranscription)
245
+ m2 = re.match(r"^\s*\"(.*)\"\s*$", transcription)
246
+ if m2 != None: # Transcription with double quotes, we extract the value and replace escaped characters
247
+ transcription = m2.group(1).replace('\\\\', '\\').replace('\\"', '"')
248
+
249
+ return points, confidence, transcription
250
+
251
+
252
+ def get_tl_dict_values(
253
+ detection,
254
+ withTranscription=False,
255
+ withConfidence=False,
256
+ imWidth=0,
257
+ imHeight=0,
258
+ validNumPoints=[],
259
+ validate_cw=True
260
+ ):
261
+ """
262
+ Validate the format of the dictionary. If the dictionary is not valid an exception will be raised.
263
+ If maxWidth and maxHeight are specified, all points must be inside the imgage bounds.
264
+ Posible values:
265
+ {"points":[[x1,y1],[x2,y2],[x3,x3],..,[xn,yn]]}
266
+ {"points":[[x1,y1],[x2,y2],[x3,x3],..,[xn,yn]],"transcription":"###","confidence":0.4,"illegibility":false}
267
+ {"points":[[x1,y1],[x2,y2],[x3,x3],..,[xn,yn]],"transcription":"###","confidence":0.4,"dontCare":false}
268
+ Returns values from the dictionary. Points , [Confidences], [Transcriptions]
269
+ """
270
+ confidence = 0.0
271
+ transcription = ''
272
+ points = []
273
+
274
+ if isinstance(detection, dict) == False:
275
+ raise Exception('Incorrect format. Object has to be a dictionary')
276
+
277
+ if not 'points' in detection:
278
+ raise Exception('Incorrect format. Object has no points key)')
279
+
280
+ if isinstance(detection['points'], list) == False:
281
+ raise Exception('Incorrect format. Object points key have to be an array)')
282
+
283
+ num_points = len(detection['points'])
284
+
285
+ if num_points < 3:
286
+ raise Exception(
287
+ 'Incorrect format. Incorrect number of points. At least 3 points are necessary. Found: ' + str(num_points)
288
+ )
289
+
290
+ if len(validNumPoints) > 0 and num_points in validNumPoints == False:
291
+ raise Exception('Incorrect format. Incorrect number of points. Only allowed 4,8 or 12 points)')
292
+
293
+ for i in range(num_points):
294
+ if isinstance(detection['points'][i], list) == False:
295
+ raise Exception('Incorrect format. Point #' + str(i + 1) + ' has to be an array)')
296
+
297
+ if len(detection['points'][i]) != 2:
298
+ raise Exception('Incorrect format. Point #' + str(i + 1) + ' has to be an array with 2 objects(x,y) )')
299
+
300
+ if isinstance(detection['points'][i][0],
301
+ (int, float)) == False or isinstance(detection['points'][i][1], (int, float)) == False:
302
+ raise Exception('Incorrect format. Point #' + str(i + 1) + ' childs have to be Integers)')
303
+
304
+ if imWidth > 0 and imHeight > 0:
305
+ validate_point_inside_bounds(detection['points'][i][0], detection['points'][i][1], imWidth, imHeight)
306
+
307
+ points.append(float(detection['points'][i][0]))
308
+ points.append(float(detection['points'][i][1]))
309
+
310
+ if validate_cw:
311
+ validate_clockwise_points(points)
312
+
313
+ if withConfidence:
314
+ if not 'confidence' in detection:
315
+ raise Exception('Incorrect format. No confidence key)')
316
+
317
+ if isinstance(detection['confidence'], (int, float)) == False:
318
+ raise Exception('Incorrect format. Confidence key has to be a float)')
319
+
320
+ if detection['confidence'] < 0 or detection['confidence'] > 1:
321
+ raise Exception('Incorrect format. Confidence key has to be a float between 0.0 and 1.0')
322
+
323
+ confidence = detection['confidence']
324
+
325
+ if withTranscription:
326
+ if not 'transcription' in detection:
327
+ raise Exception('Incorrect format. No transcription key)')
328
+
329
+ if isinstance(detection['transcription'], str) == False:
330
+ raise Exception(
331
+ 'Incorrect format. Transcription has to be a string. Detected: '
332
+ + type(detection['transcription']).__name__
333
+ )
334
+
335
+ transcription = detection['transcription']
336
+
337
+ if 'illegibility' in detection: # Ensures that if illegibility atribute is present and is True the transcription is set to ### (don't care)
338
+ if detection['illegibility'] == True:
339
+ transcription = '###'
340
+
341
+ if 'dontCare' in detection: # Ensures that if dontCare atribute is present and is True the transcription is set to ### (don't care)
342
+ if detection['dontCare'] == True:
343
+ transcription = '###'
344
+
345
+ return points, confidence, transcription
346
+
347
+
348
+ def validate_point_inside_bounds(x, y, imWidth, imHeight):
349
+ if x < 0 or x > imWidth:
350
+ raise Exception('X value (%s) not valid. Image dimensions: (%s,%s)' % (x, imWidth, imHeight))
351
+ if y < 0 or y > imHeight:
352
+ raise Exception(
353
+ 'Y value (%s) not valid. Image dimensions: (%s,%s) Sample: %s Line:%s' % (y, imWidth, imHeight)
354
+ )
355
+
356
+
357
+ def validate_clockwise_points(points):
358
+ """
359
+ Validates that the points are in clockwise order.
360
+ """
361
+ edge = []
362
+ for i in range(len(points) // 2):
363
+ edge.append((int(points[(i + 1) * 2 % len(points)]) - int(points[i * 2])) *
364
+ (int(points[((i + 1) * 2 + 1) % len(points)]) + int(points[i * 2 + 1])))
365
+ if sum(edge) > 0:
366
+ raise Exception(
367
+ "Points are not clockwise. The coordinates of bounding points have to be given in clockwise order. Regarding the correct interpretation of 'clockwise' remember that the image coordinate system used is the standard one, with the image origin at the upper left, the X axis extending to the right and Y axis extending downwards."
368
+ )
369
+
370
+
371
+ def get_tl_line_values_from_file_contents(
372
+ content,
373
+ CRLF=True,
374
+ LTRB=True,
375
+ withTranscription=False,
376
+ withConfidence=False,
377
+ imWidth=0,
378
+ imHeight=0,
379
+ sort_by_confidences=True
380
+ ):
381
+ """
382
+ Returns all points, confindences and transcriptions of a file in lists. Valid line formats:
383
+ xmin,ymin,xmax,ymax,[confidence],[transcription]
384
+ x1,y1,x2,y2,x3,y3,x4,y4,[confidence],[transcription]
385
+ """
386
+ pointsList = []
387
+ transcriptionsList = []
388
+ confidencesList = []
389
+
390
+ lines = content.split('\r\n' if CRLF else '\n')
391
+ for line in lines:
392
+ line = line.replace('\r', '').replace('\n', '')
393
+ if line != '':
394
+ points, confidence, transcription = get_tl_line_values(
395
+ line, LTRB, withTranscription, withConfidence, imWidth, imHeight
396
+ )
397
+ pointsList.append(points)
398
+ transcriptionsList.append(transcription)
399
+ confidencesList.append(confidence)
400
+
401
+ if withConfidence and len(confidencesList) > 0 and sort_by_confidences:
402
+ import numpy as np
403
+
404
+ sorted_ind = np.argsort(-np.array(confidencesList))
405
+ confidencesList = [confidencesList[i] for i in sorted_ind]
406
+ pointsList = [pointsList[i] for i in sorted_ind]
407
+ transcriptionsList = [transcriptionsList[i] for i in sorted_ind]
408
+
409
+ return pointsList, confidencesList, transcriptionsList
410
+
411
+
412
+ def get_tl_dict_values_from_array(
413
+ array,
414
+ withTranscription=False,
415
+ withConfidence=False,
416
+ imWidth=0,
417
+ imHeight=0,
418
+ sort_by_confidences=True,
419
+ validNumPoints=[],
420
+ validate_cw=True
421
+ ):
422
+ """
423
+ Returns all points, confindences and transcriptions of a file in lists. Valid dict formats:
424
+ {"points":[[x1,y1],[x2,y2],[x3,x3],..,[xn,yn]],"transcription":"###","confidence":0.4}
425
+ """
426
+ pointsList = []
427
+ transcriptionsList = []
428
+ confidencesList = []
429
+
430
+ for n in range(len(array)):
431
+ objectDict = array[n]
432
+ points, confidence, transcription = get_tl_dict_values(
433
+ objectDict, withTranscription, withConfidence, imWidth, imHeight, validNumPoints, validate_cw
434
+ )
435
+ pointsList.append(points)
436
+ transcriptionsList.append(transcription)
437
+ confidencesList.append(confidence)
438
+
439
+ if withConfidence and len(confidencesList) > 0 and sort_by_confidences:
440
+ import numpy as np
441
+
442
+ sorted_ind = np.argsort(-np.array(confidencesList))
443
+ confidencesList = [confidencesList[i] for i in sorted_ind]
444
+ pointsList = [pointsList[i] for i in sorted_ind]
445
+ transcriptionsList = [transcriptionsList[i] for i in sorted_ind]
446
+
447
+ return pointsList, confidencesList, transcriptionsList
448
+
449
+
450
+ def main_evaluation(
451
+ p, default_evaluation_params_fn, validate_data_fn, evaluate_method_fn, show_result=True, per_sample=True
452
+ ):
453
+ """
454
+ This process validates a method, evaluates it and if it succed generates a ZIP file with a JSON entry for each sample.
455
+ Params:
456
+ p: Dictionary of parmeters with the GT/submission locations. If None is passed, the parameters send by the system are used.
457
+ default_evaluation_params_fn: points to a function that returns a dictionary with the default parameters used for the evaluation
458
+ validate_data_fn: points to a method that validates the corrct format of the submission
459
+ evaluate_method_fn: points to a function that evaluated the submission and return a Dictionary with the results
460
+ """
461
+
462
+ if p == None:
463
+ p = dict([s[1:].split('=') for s in sys.argv[1:]])
464
+ if len(sys.argv) < 3:
465
+ print_help()
466
+
467
+ evalParams = default_evaluation_params_fn()
468
+ if 'p' in p.keys():
469
+ evalParams.update(p['p'] if isinstance(p['p'], dict) else json.loads(p['p']))
470
+
471
+ resDict = {'calculated': True, 'Message': '', 'method': '{}', 'per_sample': '{}'}
472
+ try:
473
+ validate_data_fn(p['g'], p['s'], evalParams)
474
+ evalData = evaluate_method_fn(p['g'], p['s'], evalParams)
475
+ resDict.update(evalData)
476
+
477
+ except Exception as e:
478
+ resDict['Message'] = str(e)
479
+ resDict['calculated'] = False
480
+
481
+ if 'o' in p:
482
+ if not os.path.exists(p['o']):
483
+ os.makedirs(p['o'], exist_ok=True)
484
+
485
+ resultsOutputname = os.path.join(p['o'], 'eval', 'ocrbench_v2', 'results.zip')
486
+ outZip = zipfile.ZipFile(resultsOutputname, mode='w', allowZip64=True)
487
+
488
+ del resDict['per_sample']
489
+ if 'output_items' in resDict.keys():
490
+ del resDict['output_items']
491
+
492
+ outZip.writestr('method.json', json.dumps(resDict))
493
+
494
+ if not resDict['calculated']:
495
+ if show_result:
496
+ sys.stderr.write('Error!\n' + resDict['Message'] + '\n\n')
497
+ if 'o' in p:
498
+ outZip.close()
499
+ return resDict
500
+
501
+ if 'o' in p:
502
+ if per_sample == True:
503
+ for k, v in evalData['per_sample'].items():
504
+ outZip.writestr(k + '.json', json.dumps(v))
505
+
506
+ if 'output_items' in evalData.keys():
507
+ for k, v in evalData['output_items'].items():
508
+ outZip.writestr(k, v)
509
+
510
+ outZip.close()
511
+
512
+ if show_result:
513
+ sys.stdout.write('Calculated!')
514
+ sys.stdout.write(json.dumps(resDict['method']))
515
+
516
+ return resDict
517
+
518
+
519
+ def main_validation(default_evaluation_params_fn, validate_data_fn):
520
+ """
521
+ This process validates a method
522
+ Params:
523
+ default_evaluation_params_fn: points to a function that returns a dictionary with the default parameters used for the evaluation
524
+ validate_data_fn: points to a method that validates the corrct format of the submission
525
+ """
526
+ try:
527
+ p = dict([s[1:].split('=') for s in sys.argv[1:]])
528
+ evalParams = default_evaluation_params_fn()
529
+ if 'p' in p.keys():
530
+ evalParams.update(p['p'] if isinstance(p['p'], dict) else json.loads(p['p']))
531
+
532
+ validate_data_fn(p['g'], p['s'], evalParams)
533
+ print('SUCCESS')
534
+ sys.exit(0)
535
+ except Exception as e:
536
+ print(str(e))
537
+ sys.exit(101)