langfun 0.0.2.dev20240428__py3-none-any.whl → 0.0.2.dev20240502__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langfun/core/eval/__init__.py +14 -1
- langfun/core/eval/base.py +490 -105
- langfun/core/eval/base_test.py +185 -53
- langfun/core/eval/matching.py +22 -21
- langfun/core/eval/matching_test.py +23 -2
- langfun/core/eval/patching.py +130 -0
- langfun/core/eval/patching_test.py +170 -0
- langfun/core/eval/scoring.py +4 -4
- langfun/core/eval/scoring_test.py +19 -2
- langfun/core/language_model.py +6 -0
- langfun/core/llms/openai.py +1 -1
- langfun/core/llms/openai_test.py +2 -1
- langfun/core/structured/scoring.py +4 -1
- langfun/core/structured/scoring_test.py +6 -0
- {langfun-0.0.2.dev20240428.dist-info → langfun-0.0.2.dev20240502.dist-info}/METADATA +1 -2
- {langfun-0.0.2.dev20240428.dist-info → langfun-0.0.2.dev20240502.dist-info}/RECORD +19 -17
- {langfun-0.0.2.dev20240428.dist-info → langfun-0.0.2.dev20240502.dist-info}/LICENSE +0 -0
- {langfun-0.0.2.dev20240428.dist-info → langfun-0.0.2.dev20240502.dist-info}/WHEEL +0 -0
- {langfun-0.0.2.dev20240428.dist-info → langfun-0.0.2.dev20240502.dist-info}/top_level.txt +0 -0
langfun/core/eval/base.py
CHANGED
@@ -18,16 +18,16 @@ import collections
|
|
18
18
|
import dataclasses
|
19
19
|
import functools
|
20
20
|
import hashlib
|
21
|
+
import html
|
21
22
|
import inspect
|
22
23
|
import io
|
23
24
|
import os
|
24
25
|
import re
|
25
26
|
import threading
|
26
27
|
import time
|
28
|
+
import types
|
27
29
|
from typing import Annotated, Any, Callable, Iterator, Literal, Optional, Sequence, Type, Union
|
28
30
|
|
29
|
-
from absl import app
|
30
|
-
from absl import flags
|
31
31
|
import langfun.core as lf
|
32
32
|
import langfun.core.coding as lf_coding
|
33
33
|
from langfun.core.llms.cache import in_memory
|
@@ -40,7 +40,8 @@ class Evaluable(lf.Component):
|
|
40
40
|
|
41
41
|
EXPERIMENT_JSON = 'experiment.json'
|
42
42
|
RESULT_JSON = 'result.json'
|
43
|
-
|
43
|
+
OOP_FAILURES_JSON = 'oop_failures.json'
|
44
|
+
NON_OOP_FAILURES_JSON = 'non_oop_failures.json'
|
44
45
|
INDEX_HTML = 'index.html'
|
45
46
|
SUMMARY_HTML = 'summary.html'
|
46
47
|
|
@@ -358,7 +359,7 @@ class Evaluable(lf.Component):
|
|
358
359
|
color='yellow')
|
359
360
|
|
360
361
|
for node in self.nonleaf_nodes:
|
361
|
-
node._result = {c.id: c.result for c in node.
|
362
|
+
node._result = {c.id: c.result for c in node.leaf_nodes} # pylint: disable=protected-access
|
362
363
|
if should_save:
|
363
364
|
node.save(result=False, report=False)
|
364
365
|
|
@@ -540,13 +541,13 @@ class Evaluable(lf.Component):
|
|
540
541
|
f'<div style="color: {text_color}; white-space: pre-wrap;'
|
541
542
|
'padding: 10px; border: 1px solid; margin-top: 10px">'
|
542
543
|
)
|
543
|
-
s.write(m.get('formatted_text', m.text))
|
544
|
+
s.write(html.escape(m.get('formatted_text', m.text)))
|
544
545
|
if m.result is not None:
|
545
546
|
s.write(
|
546
547
|
'<div style="color: magenta; white-space: pre-wrap;'
|
547
548
|
'padding: 10px; border: 1px solid; margin: 10px">'
|
548
549
|
)
|
549
|
-
s.write(pg.format(m.result))
|
550
|
+
s.write(html.escape(pg.format(m.result)))
|
550
551
|
s.write('</div>')
|
551
552
|
if 'usage' in m.metadata:
|
552
553
|
s.write(
|
@@ -598,7 +599,6 @@ class _LeafNode:
|
|
598
599
|
@pg.use_init_args(['children'])
|
599
600
|
class Suite(Evaluable):
|
600
601
|
"""Evaluation suite."""
|
601
|
-
|
602
602
|
children: Annotated[list[Evaluable], 'Child evaluation sets or suites.']
|
603
603
|
|
604
604
|
# Use empty ID as suite is just a container of child evaluations.
|
@@ -753,10 +753,12 @@ class Evaluation(Evaluable):
|
|
753
753
|
|
754
754
|
# Constants.
|
755
755
|
CACHE_JSON = 'cache.json'
|
756
|
-
|
756
|
+
OOP_FAILURES_HTML = 'oop_failures.html'
|
757
|
+
NON_OOP_FAILURES_HTML = 'non_oop_failures.html'
|
757
758
|
|
758
759
|
@functools.cached_property
|
759
760
|
def hash(self) -> str:
|
761
|
+
"""Returns the semantic-based hash of the evaluation."""
|
760
762
|
if self.is_deterministic:
|
761
763
|
identity = pg.format(self._identifiers(), compact=True)
|
762
764
|
else:
|
@@ -805,6 +807,10 @@ class Evaluation(Evaluable):
|
|
805
807
|
"""Returns the complete rate."""
|
806
808
|
return self.num_completed / self.num_examples
|
807
809
|
|
810
|
+
#
|
811
|
+
# Properties on failures.
|
812
|
+
#
|
813
|
+
|
808
814
|
@property
|
809
815
|
def failures(self) -> list[tuple[Any, Exception]]:
|
810
816
|
"""Returns the failed examples and their errors."""
|
@@ -815,6 +821,15 @@ class Evaluation(Evaluable):
|
|
815
821
|
"""Returns the number of failed examples."""
|
816
822
|
return len(self.failures)
|
817
823
|
|
824
|
+
@functools.cached_property
|
825
|
+
def failure_breakdown(self) -> dict[str, int]:
|
826
|
+
"""Returns the breakdown of failures."""
|
827
|
+
breakdown = collections.defaultdict(int)
|
828
|
+
for _, error in self.failures:
|
829
|
+
breakdown[_error_key(error)] += 1
|
830
|
+
sorted_items = sorted(breakdown.items(), key=lambda x: x[1], reverse=True)
|
831
|
+
return pg.Dict({x[0]: x[1] for x in sorted_items})
|
832
|
+
|
818
833
|
@property
|
819
834
|
def failure_rate(self) -> float:
|
820
835
|
"""Returns the failure rate in range [0, 1]."""
|
@@ -822,6 +837,46 @@ class Evaluation(Evaluable):
|
|
822
837
|
return 0.0
|
823
838
|
return self.num_failures / self.num_completed
|
824
839
|
|
840
|
+
@functools.cached_property
|
841
|
+
def oop_failures(self) -> list[tuple[Any, lf_structured.MappingError]]:
|
842
|
+
"""Returns the OOP failures."""
|
843
|
+
return [item for item in self.failures
|
844
|
+
if isinstance(item[1], lf_structured.MappingError)]
|
845
|
+
|
846
|
+
@property
|
847
|
+
def num_oop_failures(self) -> int:
|
848
|
+
"""Returns the number of OOP failures."""
|
849
|
+
return len(self.oop_failures)
|
850
|
+
|
851
|
+
@property
|
852
|
+
def oop_failure_rate(self) -> float:
|
853
|
+
"""Returns the OOP failure rate in range [0, 1]."""
|
854
|
+
if self.num_completed == 0:
|
855
|
+
return 0.0
|
856
|
+
return self.num_oop_failures / self.num_completed
|
857
|
+
|
858
|
+
@functools.cached_property
|
859
|
+
def non_oop_failures(self) -> list[tuple[Any, Exception]]:
|
860
|
+
"""Returns the OOP failures."""
|
861
|
+
return [item for item in self.failures
|
862
|
+
if not isinstance(item[1], lf_structured.MappingError)]
|
863
|
+
|
864
|
+
@property
|
865
|
+
def num_non_oop_failures(self) -> int:
|
866
|
+
"""Returns the number of non-OOP failures."""
|
867
|
+
return len(self.non_oop_failures)
|
868
|
+
|
869
|
+
@property
|
870
|
+
def non_oop_failure_rate(self) -> float:
|
871
|
+
"""Returns the non-OOP failure rate in range [0, 1]."""
|
872
|
+
if self.num_completed == 0:
|
873
|
+
return 0.0
|
874
|
+
return self.num_non_oop_failures / self.num_completed
|
875
|
+
|
876
|
+
#
|
877
|
+
# Properties on usage.
|
878
|
+
#
|
879
|
+
|
825
880
|
@property
|
826
881
|
def has_usage(self) -> bool:
|
827
882
|
"""Returns True if token usage is enabled."""
|
@@ -976,13 +1031,22 @@ class Evaluation(Evaluable):
|
|
976
1031
|
self._total_prompt_tokens = 0
|
977
1032
|
self._total_completion_tokens = 0
|
978
1033
|
self._num_usages = 0
|
1034
|
+
self.__dict__.pop('oop_failures', None)
|
1035
|
+
self.__dict__.pop('non_oop_failures', None)
|
1036
|
+
|
1037
|
+
@property
|
1038
|
+
def oop_failures_link(self) -> str | None:
|
1039
|
+
"""Returns the link to the OOP failures page."""
|
1040
|
+
if self.dir is None:
|
1041
|
+
return None
|
1042
|
+
return self.link(os.path.join(self.dir, Evaluation.OOP_FAILURES_HTML))
|
979
1043
|
|
980
1044
|
@property
|
981
|
-
def
|
982
|
-
"""Returns the link to
|
1045
|
+
def non_oop_failures_link(self) -> str | None:
|
1046
|
+
"""Returns the link to then non-OOP failures page."""
|
983
1047
|
if self.dir is None:
|
984
1048
|
return None
|
985
|
-
return self.link(os.path.join(self.dir, Evaluation.
|
1049
|
+
return self.link(os.path.join(self.dir, Evaluation.NON_OOP_FAILURES_HTML))
|
986
1050
|
|
987
1051
|
def _dryrun(
|
988
1052
|
self,
|
@@ -1011,23 +1075,34 @@ class Evaluation(Evaluable):
|
|
1011
1075
|
color='green',
|
1012
1076
|
)
|
1013
1077
|
|
1014
|
-
|
1015
|
-
output_message = copy.process(example, **(self.additional_args or {}))
|
1016
|
-
if self.schema is None:
|
1017
|
-
output = output_message.text
|
1018
|
-
else:
|
1019
|
-
output = output_message.result
|
1078
|
+
error, output_message = None, None
|
1020
1079
|
|
1021
|
-
|
1080
|
+
try:
|
1081
|
+
with lf.use_settings(debug=debug):
|
1082
|
+
output_message = copy.process(example, **(self.additional_args or {}))
|
1083
|
+
if self.schema is None:
|
1084
|
+
output = output_message.text
|
1085
|
+
else:
|
1086
|
+
output = output_message.result
|
1087
|
+
|
1088
|
+
if verbose:
|
1089
|
+
lf.console.write('')
|
1090
|
+
lf.console.write(
|
1091
|
+
str(output),
|
1092
|
+
title='OUTPUT',
|
1093
|
+
color='blue',
|
1094
|
+
)
|
1095
|
+
except lf_structured.MappingError as e:
|
1022
1096
|
lf.console.write('')
|
1023
1097
|
lf.console.write(
|
1024
|
-
str(
|
1025
|
-
title='
|
1026
|
-
color='
|
1098
|
+
str(e),
|
1099
|
+
title='ERROR',
|
1100
|
+
color='red',
|
1027
1101
|
)
|
1102
|
+
error = e
|
1028
1103
|
|
1029
|
-
copy.audit(example, output_message,
|
1030
|
-
result = copy.
|
1104
|
+
copy.audit(example, output_message, error, dryrun=True)
|
1105
|
+
result = copy.finalize()
|
1031
1106
|
|
1032
1107
|
if verbose:
|
1033
1108
|
lf.console.write('')
|
@@ -1087,7 +1162,7 @@ class Evaluation(Evaluable):
|
|
1087
1162
|
self.cache.save()
|
1088
1163
|
|
1089
1164
|
# Summarize result.
|
1090
|
-
self._result = self.
|
1165
|
+
self._result = self.finalize()
|
1091
1166
|
if verbose:
|
1092
1167
|
lf.console.write(
|
1093
1168
|
str(self.result),
|
@@ -1143,13 +1218,13 @@ class Evaluation(Evaluable):
|
|
1143
1218
|
def _status(self, progress: lf.concurrent.Progress) -> dict[str, Any]:
|
1144
1219
|
return {
|
1145
1220
|
'Model': self.lm.model_id,
|
1146
|
-
'Succeeded':
|
1147
|
-
progress.success_rate
|
1221
|
+
'Succeeded': '%s (%d/%d)' % (
|
1222
|
+
self._format_rate(progress.success_rate),
|
1148
1223
|
progress.succeeded,
|
1149
1224
|
progress.completed,
|
1150
1225
|
),
|
1151
|
-
'Failed':
|
1152
|
-
progress.failure_rate
|
1226
|
+
'Failed': '%s (%d/%d)' % (
|
1227
|
+
self._format_rate(progress.failure_rate),
|
1153
1228
|
progress.failed,
|
1154
1229
|
progress.completed,
|
1155
1230
|
),
|
@@ -1159,21 +1234,20 @@ class Evaluation(Evaluable):
|
|
1159
1234
|
assert self.result is not None
|
1160
1235
|
m = self.result.metrics
|
1161
1236
|
return (
|
1162
|
-
|
1163
|
-
f' Failures=%.{self.report_precision}f%% (%d/%d)'
|
1237
|
+
'COMPLETED(%s): Successes=%s(%d/%d) Failures=%s (%d/%d)'
|
1164
1238
|
% (
|
1165
1239
|
run_status,
|
1166
|
-
(1 - m.failure_rate)
|
1240
|
+
self._format_rate(1 - m.failure_rate),
|
1167
1241
|
m.total - m.failures,
|
1168
1242
|
m.total,
|
1169
|
-
m.failure_rate
|
1243
|
+
self._format_rate(m.failure_rate),
|
1170
1244
|
m.failures,
|
1171
1245
|
m.total,
|
1172
1246
|
)
|
1173
1247
|
)
|
1174
1248
|
|
1175
|
-
def
|
1176
|
-
"""
|
1249
|
+
def finalize(self) -> pg.Dict:
|
1250
|
+
"""Finalizes the evaluation result."""
|
1177
1251
|
if self.cache:
|
1178
1252
|
cache_stats = dict(
|
1179
1253
|
use_cache=True,
|
@@ -1210,12 +1284,18 @@ class Evaluation(Evaluable):
|
|
1210
1284
|
total=self.num_completed,
|
1211
1285
|
failures=self.num_failures,
|
1212
1286
|
failure_rate=self.failure_rate,
|
1287
|
+
oop_failures=self.num_oop_failures,
|
1288
|
+
oop_failure_rate=self.oop_failure_rate,
|
1289
|
+
non_oop_failures=self.num_non_oop_failures,
|
1290
|
+
non_oop_failure_rate=self.non_oop_failure_rate,
|
1291
|
+
failure_breakdown=self.failure_breakdown,
|
1213
1292
|
),
|
1214
1293
|
usage=usage,
|
1215
1294
|
)
|
1216
1295
|
return result
|
1217
1296
|
|
1218
|
-
def
|
1297
|
+
def summary_card(self) -> str:
|
1298
|
+
"""Returns summary card in HTML."""
|
1219
1299
|
s = io.StringIO()
|
1220
1300
|
definition = _html_repr(self, compact=False, escape=True)
|
1221
1301
|
s.write('<div><table><tr><td>')
|
@@ -1230,18 +1310,19 @@ class Evaluation(Evaluable):
|
|
1230
1310
|
s.write(
|
1231
1311
|
f'<a target="_blank" title="{definition}" '
|
1232
1312
|
f'href="{self.index_link}">{self.hash}</a>'
|
1313
|
+
f' [<a href="{self.link(self.dir)}">dir</a>]'
|
1233
1314
|
'</td></tr><tr><td>'
|
1234
1315
|
)
|
1235
|
-
self.
|
1316
|
+
self._render_summary_metrics(s)
|
1236
1317
|
|
1237
1318
|
# Summarize average usage.
|
1238
1319
|
if self.result.usage is not None:
|
1239
|
-
self.
|
1320
|
+
self._render_summary_usage(s)
|
1240
1321
|
|
1241
1322
|
s.write('</td></tr></table></div>')
|
1242
1323
|
return s.getvalue()
|
1243
1324
|
|
1244
|
-
def
|
1325
|
+
def _render_summary_usage(self, s: io.StringIO) -> None:
|
1245
1326
|
"""Renders usage in HTML."""
|
1246
1327
|
usage = self.result.usage
|
1247
1328
|
total = usage.total_prompt_tokens + usage.total_completion_tokens
|
@@ -1255,19 +1336,65 @@ class Evaluation(Evaluable):
|
|
1255
1336
|
f'" style="color:gray">({total} tokens)</a>'
|
1256
1337
|
)
|
1257
1338
|
|
1258
|
-
def
|
1339
|
+
def _render_summary_metrics(self, s: io.StringIO) -> None:
|
1259
1340
|
"""Renders metrics in HTML."""
|
1260
1341
|
assert self.result is not None
|
1261
1342
|
m = self.result.metrics
|
1343
|
+
|
1344
|
+
# OOP failures.
|
1345
|
+
oop_failure_title = f'OOP failures ({m.oop_failures}/{m.total})'
|
1346
|
+
if m.oop_failures:
|
1347
|
+
oop_failure_title += '
'
|
1348
|
+
for name, count in m.failure_breakdown.items():
|
1349
|
+
if name.startswith('MappingError'):
|
1350
|
+
oop_failure_title += '
%s: %s (%d/%d)' % (
|
1351
|
+
name.removeprefix('MappingError.'),
|
1352
|
+
self._format_rate(count / m.total),
|
1353
|
+
count,
|
1354
|
+
m.total,
|
1355
|
+
)
|
1356
|
+
|
1357
|
+
extra_style = ''
|
1358
|
+
if m.oop_failure_rate > 0.1 and m.oop_failures > 3:
|
1359
|
+
extra_style = ';font-weight:bold'
|
1262
1360
|
s.write(
|
1263
|
-
'<a title="
|
1361
|
+
'<a title="%s" href="%s" style="color:magenta%s">%s</a>'
|
1264
1362
|
% (
|
1265
|
-
|
1266
|
-
|
1267
|
-
|
1268
|
-
|
1363
|
+
oop_failure_title,
|
1364
|
+
self.oop_failures_link,
|
1365
|
+
extra_style,
|
1366
|
+
self._format_rate(m.oop_failure_rate),
|
1269
1367
|
)
|
1270
1368
|
)
|
1369
|
+
s.write(' | ')
|
1370
|
+
|
1371
|
+
# Non-OOP failures.
|
1372
|
+
non_oop_failure_title = f'Non-OOP failures ({m.non_oop_failures}/{m.total})'
|
1373
|
+
if m.non_oop_failures:
|
1374
|
+
non_oop_failure_title += '
'
|
1375
|
+
for name, count in m.failure_breakdown.items():
|
1376
|
+
if not name.startswith('MappingError'):
|
1377
|
+
non_oop_failure_title += '
%s: %s (%d/%d)' % (
|
1378
|
+
name,
|
1379
|
+
self._format_rate(count / m.total),
|
1380
|
+
count,
|
1381
|
+
m.total,
|
1382
|
+
)
|
1383
|
+
|
1384
|
+
extra_style = ';font-weight:bold' if m.non_oop_failures > 0 else ''
|
1385
|
+
s.write(
|
1386
|
+
'<a title="%s" href="%s" style="color:red%s">%s</a>'
|
1387
|
+
% (
|
1388
|
+
non_oop_failure_title,
|
1389
|
+
self.non_oop_failures_link,
|
1390
|
+
extra_style,
|
1391
|
+
self._format_rate(m.non_oop_failure_rate),
|
1392
|
+
)
|
1393
|
+
)
|
1394
|
+
|
1395
|
+
def _format_rate(self, rate: float) -> str:
|
1396
|
+
"""Formats a rate."""
|
1397
|
+
return f'%.{self.report_precision}f%% ' % (rate * 100)
|
1271
1398
|
|
1272
1399
|
def audit(
|
1273
1400
|
self,
|
@@ -1287,7 +1414,13 @@ class Evaluation(Evaluable):
|
|
1287
1414
|
dryrun: Whether or not audition takes place during dryrun.
|
1288
1415
|
"""
|
1289
1416
|
if error is not None:
|
1290
|
-
self._failures.append((example,
|
1417
|
+
self._failures.append((example, error))
|
1418
|
+
|
1419
|
+
# Invalid cache of num_oop_failures.
|
1420
|
+
self.__dict__.pop('oop_failures', None)
|
1421
|
+
self.__dict__.pop('non_oop_failures', None)
|
1422
|
+
self.__dict__.pop('failure_breakdown', None)
|
1423
|
+
|
1291
1424
|
if isinstance(error, lf_structured.MappingError):
|
1292
1425
|
message = error.lm_response
|
1293
1426
|
else:
|
@@ -1333,16 +1466,26 @@ class Evaluation(Evaluable):
|
|
1333
1466
|
# Save failures.
|
1334
1467
|
pg.save(
|
1335
1468
|
[
|
1336
|
-
pg.Dict(
|
1337
|
-
|
1338
|
-
|
1339
|
-
|
1469
|
+
pg.Dict(input=input, error=_format_error(error))
|
1470
|
+
for input, error in self.oop_failures
|
1471
|
+
],
|
1472
|
+
os.path.join(self.dir, Evaluation.OOP_FAILURES_JSON),
|
1473
|
+
)
|
1474
|
+
pg.save(
|
1475
|
+
self._html([self._render_result, self._render_oop_failures]),
|
1476
|
+
os.path.join(self.dir, Evaluation.OOP_FAILURES_HTML),
|
1477
|
+
file_format='txt',
|
1478
|
+
)
|
1479
|
+
pg.save(
|
1480
|
+
[
|
1481
|
+
pg.Dict(input=input, error=_format_error(error))
|
1482
|
+
for input, error in self.non_oop_failures
|
1340
1483
|
],
|
1341
|
-
os.path.join(self.dir, Evaluation.
|
1484
|
+
os.path.join(self.dir, Evaluation.NON_OOP_FAILURES_JSON),
|
1342
1485
|
)
|
1343
1486
|
pg.save(
|
1344
|
-
self._html([self._render_result, self.
|
1345
|
-
os.path.join(self.dir, Evaluation.
|
1487
|
+
self._html([self._render_result, self._render_non_oop_failures]),
|
1488
|
+
os.path.join(self.dir, Evaluation.NON_OOP_FAILURES_HTML),
|
1346
1489
|
file_format='txt',
|
1347
1490
|
)
|
1348
1491
|
|
@@ -1357,7 +1500,8 @@ class Evaluation(Evaluable):
|
|
1357
1500
|
)
|
1358
1501
|
if self.result.usage is not None:
|
1359
1502
|
s.write('<td>Usage</td>')
|
1360
|
-
s.write('<td>Failures</td>')
|
1503
|
+
s.write('<td>OOP Failures</td>')
|
1504
|
+
s.write('<td>Non-OOP Failures</td>')
|
1361
1505
|
|
1362
1506
|
def _render_result_row(self, s: io.StringIO) -> None:
|
1363
1507
|
s.write(
|
@@ -1385,16 +1529,29 @@ class Evaluation(Evaluable):
|
|
1385
1529
|
# Usage.
|
1386
1530
|
if self.result.usage is not None:
|
1387
1531
|
s.write('<td>')
|
1388
|
-
self.
|
1532
|
+
self._render_summary_usage(s)
|
1389
1533
|
s.write('</td>')
|
1390
1534
|
|
1391
|
-
#
|
1535
|
+
# OOP failures.
|
1536
|
+
s.write(
|
1537
|
+
'<td><span style="color:magenta">%s</span>%s</td>'
|
1538
|
+
% (
|
1539
|
+
self._format_rate(self.oop_failure_rate),
|
1540
|
+
'<a href="%s">(%d/%d)</a>'
|
1541
|
+
% (self.oop_failures_link,
|
1542
|
+
self.num_oop_failures,
|
1543
|
+
self.num_completed),
|
1544
|
+
)
|
1545
|
+
)
|
1546
|
+
# Non-OOP failures.
|
1392
1547
|
s.write(
|
1393
|
-
'<td><span style="color:
|
1548
|
+
'<td><span style="color:red">%s</span>%s</td>'
|
1394
1549
|
% (
|
1395
|
-
|
1550
|
+
self._format_rate(self.non_oop_failure_rate),
|
1396
1551
|
'<a href="%s">(%d/%d)</a>'
|
1397
|
-
% (self.
|
1552
|
+
% (self.non_oop_failures_link,
|
1553
|
+
self.num_non_oop_failures,
|
1554
|
+
self.num_completed),
|
1398
1555
|
)
|
1399
1556
|
)
|
1400
1557
|
|
@@ -1408,24 +1565,77 @@ class Evaluation(Evaluable):
|
|
1408
1565
|
else:
|
1409
1566
|
return 'cyan'
|
1410
1567
|
|
1411
|
-
def
|
1568
|
+
def _render_oop_failures(self, s: io.StringIO) -> None:
|
1569
|
+
self._render_failures(s, '^MappingError.*', error_color='magenta')
|
1570
|
+
|
1571
|
+
def _render_non_oop_failures(self, s: io.StringIO) -> None:
|
1572
|
+
self._render_failures(s, '^(?!MappingError).*', error_color='red')
|
1573
|
+
|
1574
|
+
def _render_failures(
|
1575
|
+
self, s: io.StringIO, error_regex: str, error_color: str) -> None:
|
1412
1576
|
"""Formats the failed cases into html."""
|
1577
|
+
# Failure summary.
|
1413
1578
|
s.write(
|
1414
|
-
'<h2>
|
1579
|
+
'<h2> Error Summary </h2>'
|
1415
1580
|
'<div style="white-space:pre">\n'
|
1416
1581
|
'<table style="border:1px solid">'
|
1417
|
-
'<tr class="header"><td>
|
1582
|
+
'<tr class="header"><td>Error type</td><td>Stats</td></tr>'
|
1418
1583
|
)
|
1584
|
+
error_regex = re.compile(error_regex)
|
1585
|
+
if self.result.metrics.failure_breakdown:
|
1586
|
+
for name, count in self.result.metrics.failure_breakdown.items():
|
1587
|
+
if not error_regex.match(name):
|
1588
|
+
continue
|
1589
|
+
|
1590
|
+
link = f'<a href="#{name}">{name}</a>'
|
1591
|
+
error_rate = self._format_rate(count / self.result.metrics.total)
|
1592
|
+
stats = (f'<span style="color:{error_color}">{error_rate} '
|
1593
|
+
f'({count}/{self.result.metrics.total})</span>')
|
1594
|
+
s.write(f'<tr><td>{link}</td><td>{stats})</td></tr>')
|
1595
|
+
s.write(
|
1596
|
+
'</table></div>'
|
1597
|
+
'<h2> Failed Cases </h2>'
|
1598
|
+
'<div style="white-space:pre">'
|
1599
|
+
)
|
1600
|
+
# Failure details by error type.
|
1601
|
+
failures_by_error = collections.defaultdict(list)
|
1602
|
+
for example, error in self.failures:
|
1603
|
+
error_name = _error_key(error)
|
1604
|
+
if error_regex.match(error_name):
|
1605
|
+
failures_by_error[error_name].append((example, error))
|
1606
|
+
|
1607
|
+
for error_key, failures in failures_by_error.items():
|
1608
|
+
s.write(
|
1609
|
+
f'<h3 id="{error_key}"><a href="#{error_key}">{error_key}</a> '
|
1610
|
+
f'(count={len(failures)})</h3>'
|
1611
|
+
'<table style="border:1px solid">'
|
1612
|
+
'<tr class="header"><td>No.</td><td>Input</td>'
|
1613
|
+
'<td>LM invocation</td><td>Error</td></tr>'
|
1614
|
+
)
|
1615
|
+
for i, (example, error) in enumerate(failures):
|
1616
|
+
lm_response = None
|
1617
|
+
if isinstance(error, lf.structured.MappingError):
|
1618
|
+
lm_response = error.lm_response
|
1619
|
+
error = error.cause
|
1620
|
+
|
1621
|
+
bgcolor = 'white' if i % 2 == 0 else '#DDDDDD'
|
1622
|
+
s.write(f'<tr style="background-color: {bgcolor}"><td>{i + 1}</td>')
|
1623
|
+
s.write('<td style="color:green;white-space:pre-wrap">')
|
1624
|
+
s.write(pg.format(example, verbose=False))
|
1625
|
+
s.write('</td><td>')
|
1626
|
+
if lm_response is not None:
|
1627
|
+
self._render_message(lm_response, s)
|
1628
|
+
s.write(f'</td><td style="color:{error_color};white-space:pre">')
|
1629
|
+
s.write(_format_error(error))
|
1630
|
+
s.write('</td></tr>')
|
1631
|
+
s.write('</table>')
|
1632
|
+
s.write('</div>')
|
1419
1633
|
|
1420
|
-
|
1421
|
-
|
1422
|
-
|
1423
|
-
|
1424
|
-
|
1425
|
-
error_str = lf.text_formatting.decolored(str(error))
|
1426
|
-
s.write(f'<td style="color:red;white-space:pre">{error_str}</td>')
|
1427
|
-
s.write('</tr>')
|
1428
|
-
s.write('</table></div>')
|
1634
|
+
@classmethod
|
1635
|
+
def visualize(cls, evaluations: list['Evaluation']) -> str | None:
|
1636
|
+
"""Visualize the a list of evaluations of this task in HTML."""
|
1637
|
+
del evaluations
|
1638
|
+
return None
|
1429
1639
|
|
1430
1640
|
|
1431
1641
|
@pg.functor()
|
@@ -1578,7 +1788,7 @@ class Summary(pg.Object):
|
|
1578
1788
|
if e is None:
|
1579
1789
|
s.write('<span style="color: gray">N/A<span>')
|
1580
1790
|
else:
|
1581
|
-
s.write(e.
|
1791
|
+
s.write(e.summary_card())
|
1582
1792
|
s.write('</td>')
|
1583
1793
|
s.write('</tr>')
|
1584
1794
|
s.write('</table>')
|
@@ -1653,13 +1863,22 @@ class Summary(pg.Object):
|
|
1653
1863
|
s.write('<html><body>')
|
1654
1864
|
for task in sorted(self.tasks(), key=lambda cls: cls.__name__):
|
1655
1865
|
table_id = task.__name__.lower()
|
1866
|
+
evaluations = self.select(task=task).evaluations
|
1867
|
+
table = Summary.Table.from_evaluations(evaluations, pivot_field)
|
1656
1868
|
s.write('<div>')
|
1657
|
-
s.write(
|
1658
|
-
|
1659
|
-
|
1660
|
-
table = Summary.Table.from_evaluations(
|
1661
|
-
self.select(task=task).evaluations, pivot_field
|
1869
|
+
s.write(
|
1870
|
+
f'<a id="{table_id}" href="#{table_id}">'
|
1871
|
+
f'<h2>{task.__name__}</h2></a>'
|
1662
1872
|
)
|
1873
|
+
|
1874
|
+
# Allow users to plugin visualization code (e.g. matplot) in the summary
|
1875
|
+
# page.
|
1876
|
+
visual_part = task.visualize(evaluations)
|
1877
|
+
if visual_part:
|
1878
|
+
s.write(visual_part)
|
1879
|
+
|
1880
|
+
s.write(f'<h4 style="color:gray">{len(evaluations)} experiments</h4>')
|
1881
|
+
s.write('<hr/>')
|
1663
1882
|
s.write(table.html())
|
1664
1883
|
s.write('</div>')
|
1665
1884
|
s.write('</body></html>')
|
@@ -1685,6 +1904,7 @@ class Summary(pg.Object):
|
|
1685
1904
|
experiment=entry,
|
1686
1905
|
dir=entry.dir,
|
1687
1906
|
metrics=entry.result.metrics if entry.result else None,
|
1907
|
+
usage=entry.result.usage if entry.result else None,
|
1688
1908
|
)
|
1689
1909
|
)
|
1690
1910
|
task_results[task.__name__] = results
|
@@ -1833,6 +2053,21 @@ class Summary(pg.Object):
|
|
1833
2053
|
return result.join()
|
1834
2054
|
|
1835
2055
|
|
2056
|
+
def _format_error(error: Exception):
|
2057
|
+
"""Formats an error into a string."""
|
2058
|
+
return (f'({error.__class__.__name__}) '
|
2059
|
+
+ lf.text_formatting.decolored(str(error)))
|
2060
|
+
|
2061
|
+
|
2062
|
+
def _error_key(error: Exception) -> str:
|
2063
|
+
"""Returns the key for an error."""
|
2064
|
+
error_names = []
|
2065
|
+
while error is not None:
|
2066
|
+
error_names.append(error.__class__.__name__)
|
2067
|
+
error = getattr(error, 'cause', None)
|
2068
|
+
return '.'.join(error_names)
|
2069
|
+
|
2070
|
+
|
1836
2071
|
def _html_repr(value: Any, compact: bool = True, escape: bool = False) -> str:
|
1837
2072
|
"""Formats prompt in HTML."""
|
1838
2073
|
if type(value) is lf.Template: # pylint: disable=unidiomatic-typecheck
|
@@ -1909,41 +2144,191 @@ def monitor_async(
|
|
1909
2144
|
)
|
1910
2145
|
|
1911
2146
|
|
1912
|
-
|
1913
|
-
|
2147
|
+
#
|
2148
|
+
# Named evaluations and experiments support.
|
2149
|
+
#
|
1914
2150
|
|
1915
|
-
Args:
|
1916
|
-
target: An Langfun evaluable object.
|
1917
|
-
"""
|
1918
|
-
flags.DEFINE_string(
|
1919
|
-
'root_dir', None, 'Root directory for running the evaluation.'
|
1920
|
-
)
|
1921
2151
|
|
1922
|
-
|
1923
|
-
|
1924
|
-
)
|
2152
|
+
class _NamedEvaluationRegistry:
|
2153
|
+
"""Named evaluation registry."""
|
1925
2154
|
|
1926
|
-
|
1927
|
-
|
1928
|
-
|
2155
|
+
def __init__(self):
|
2156
|
+
self._registry = {}
|
2157
|
+
|
2158
|
+
def names(self) -> list[str]:
|
2159
|
+
"""Returns all registered names."""
|
2160
|
+
return sorted(self._registry.keys())
|
2161
|
+
|
2162
|
+
def get(self, name: str) -> Type[Evaluable]:
|
2163
|
+
"""Gets an evaluation by name."""
|
2164
|
+
if name not in self._registry:
|
2165
|
+
raise ValueError(
|
2166
|
+
f'Evaluation {name!r} not found. '
|
2167
|
+
'Did you forget to import the module that registers it?'
|
2168
|
+
)
|
2169
|
+
return self._registry[name]
|
2170
|
+
|
2171
|
+
def register(
|
2172
|
+
self,
|
2173
|
+
name: str,
|
2174
|
+
experiment_cls: Type[Evaluable],
|
2175
|
+
):
|
2176
|
+
"""Register an experiment class."""
|
2177
|
+
self._registry[name] = experiment_cls
|
2178
|
+
|
2179
|
+
|
2180
|
+
_eval_registry = _NamedEvaluationRegistry()
|
2181
|
+
|
2182
|
+
|
2183
|
+
def registered_names() -> list[str]:
|
2184
|
+
"""Returns all registered names."""
|
2185
|
+
return _eval_registry.names()
|
1929
2186
|
|
1930
|
-
flags.DEFINE_bool(
|
1931
|
-
'rerun',
|
1932
|
-
False,
|
1933
|
-
'If True, rerun the experiment even a cached result is found.',
|
1934
|
-
)
|
1935
2187
|
|
1936
|
-
|
2188
|
+
def get_evaluation(evaluation: str | Evaluable) -> Evaluable:
|
2189
|
+
"""Gets an evaluation experiment by name."""
|
2190
|
+
if isinstance(evaluation, str):
|
2191
|
+
return _eval_registry.get(evaluation)()
|
2192
|
+
return evaluation
|
1937
2193
|
|
1938
|
-
def _main(argv):
|
1939
|
-
if len(argv) > 1:
|
1940
|
-
raise app.UsageError('Too many command-line arguments.')
|
1941
2194
|
|
1942
|
-
|
1943
|
-
|
1944
|
-
|
1945
|
-
|
2195
|
+
def register(name: str):
|
2196
|
+
"""Decorator to create a named evaluation class."""
|
2197
|
+
|
2198
|
+
def _register(func_or_cls: Type[Evaluation] | types.FunctionType):
|
2199
|
+
if inspect.isfunction(func_or_cls):
|
2200
|
+
e = func_or_cls()
|
2201
|
+
if not isinstance(e, Evaluable):
|
2202
|
+
raise TypeError(
|
2203
|
+
f'The return value of `{func_or_cls}` should be an instance of '
|
2204
|
+
'`lf.eval.Evaluable` subclass.'
|
2205
|
+
)
|
2206
|
+
|
2207
|
+
class GeneratedSuite(Suite):
|
2208
|
+
# NOTE(daiyip): Delay serialization key registration for generated
|
2209
|
+
# class.
|
2210
|
+
auto_register = False
|
2211
|
+
children = e.children if isinstance(e, Suite) else [e]
|
2212
|
+
|
2213
|
+
cls = GeneratedSuite
|
2214
|
+
cls.__name__ = func_or_cls.__name__
|
2215
|
+
cls.__doc__ = func_or_cls.__doc__
|
2216
|
+
cls.__qualname__ = func_or_cls.__qualname__
|
2217
|
+
cls.__module__ = getattr(func_or_cls, '__module__', 'wrapper')
|
2218
|
+
cls.register_for_deserialization(cls.__type_name__)
|
2219
|
+
|
2220
|
+
elif issubclass(func_or_cls, Evaluable):
|
2221
|
+
cls = func_or_cls
|
1946
2222
|
else:
|
1947
|
-
|
2223
|
+
raise ValueError(f'Unsupported type: {type(func_or_cls)}')
|
2224
|
+
|
2225
|
+
_eval_registry.register(name, cls)
|
2226
|
+
return cls
|
2227
|
+
|
2228
|
+
return _register
|
2229
|
+
|
2230
|
+
|
2231
|
+
def get(
|
2232
|
+
root_dir: str,
|
2233
|
+
evaluations: list[str | Evaluable],
|
2234
|
+
filter: Union[ # pylint: disable=redefined-builtin
|
2235
|
+
str, # Regex to filter evaluation based on ID.
|
2236
|
+
Callable[[Evaluable], bool], # Custom filter function.
|
2237
|
+
None # No filtering (Default).
|
2238
|
+
] = None, # pylint: disable=bad-whitespace
|
2239
|
+
patches: list[Union[
|
2240
|
+
str, # String-based PyGlove patcher.
|
2241
|
+
pg.patching.Patcher, # PyGlove patcher object.
|
2242
|
+
Callable[[pg.KeyPath, Any, Any], Any], # PyGlove rebind function.
|
2243
|
+
]] | None = None, # pylint: disable=bad-whitespace
|
2244
|
+
) -> Suite:
|
2245
|
+
"""Gets a suite from a list of patched evaluations.
|
2246
|
+
|
2247
|
+
Args:
|
2248
|
+
root_dir: The root directory of the experiment.
|
2249
|
+
evaluations: A list of evaluations to be included in the suite.
|
2250
|
+
filter: A regular expression (str) for selecting sub-experiments of matched
|
2251
|
+
IDs, or a filter function to filter the evaluations.
|
2252
|
+
patches: A list of patches to be applied to the suite. Each element can be
|
2253
|
+
a string (for string-based patcher), a `pg.patching.Patcher` object, or
|
2254
|
+
a rebind function (e.g. `pg.rebind`). See `lf.eval.patch_*` for more
|
2255
|
+
details.
|
2256
|
+
|
2257
|
+
Returns:
|
2258
|
+
A suite of selected `lf.eval.Evaluation` objects.
|
2259
|
+
"""
|
2260
|
+
evaluations = [get_evaluation(e) for e in evaluations]
|
2261
|
+
suite = Suite(evaluations, root_dir=root_dir)
|
2262
|
+
if patches:
|
2263
|
+
suite = pg.patch(suite, patches)
|
2264
|
+
|
2265
|
+
if isinstance(filter, str):
|
2266
|
+
regex = re.compile(filter)
|
2267
|
+
filter = lambda x: bool(regex.match(x.id))
|
2268
|
+
|
2269
|
+
if filter:
|
2270
|
+
suite = Suite(
|
2271
|
+
[leaf for leaf in suite.leaf_nodes if filter(leaf)], root_dir=root_dir)
|
2272
|
+
return suite
|
2273
|
+
|
2274
|
+
|
2275
|
+
def run(
|
2276
|
+
root_dir: str,
|
2277
|
+
evaluations: list[str | Evaluable],
|
2278
|
+
filter: Union[ # pylint: disable=redefined-builtin
|
2279
|
+
str, # Regex to filter evaluation based on ID.
|
2280
|
+
Callable[[Evaluable], bool], # Custom filter function.
|
2281
|
+
None # No filtering (Default).
|
2282
|
+
] = None, # pylint: disable=bad-whitespace
|
2283
|
+
patches: list[Union[
|
2284
|
+
str, # String-based PyGlove patcher.
|
2285
|
+
pg.patching.Patcher, # PyGlove patcher object.
|
2286
|
+
Callable[[pg.KeyPath, Any, Any], Any], # PyGlove rebind function.
|
2287
|
+
]] | None = None, # pylint: disable=bad-whitespace
|
2288
|
+
mode: Literal['run', 'rerun', 'dryrun', 'noop'] = 'run',
|
2289
|
+
debug: bool = False,
|
2290
|
+
print_definition: bool = False,
|
2291
|
+
**kwargs,
|
2292
|
+
) -> Suite:
|
2293
|
+
"""Run selected evaluations with patching.
|
2294
|
+
|
2295
|
+
Args:
|
2296
|
+
root_dir: The root directory of the experiment.
|
2297
|
+
evaluations: A list of evaluations to be included in the suite.
|
2298
|
+
filter: A regular expression (str) for selecting sub-experiments of matched
|
2299
|
+
IDs, or a filter function to filter the evaluations.
|
2300
|
+
patches: A list of patches to be applied to the suite. Each element can be
|
2301
|
+
a string (for string-based patcher), a `pg.patching.Patcher` object, or
|
2302
|
+
a rebind function (e.g. `pg.rebind`). See `lf.eval.patch_*` for more
|
2303
|
+
details.
|
2304
|
+
mode: The mode to run the suite. "run" to run the suite, with reusing
|
2305
|
+
existing results if available; "rerun" to rerun all evaluations even if
|
2306
|
+
there are existing results; "dryrun" to dryrun the suite; and "noop"
|
2307
|
+
to do nothing.
|
2308
|
+
debug: Whether to run in debug mode.
|
2309
|
+
print_definition: Whether to print the experiment definition.
|
2310
|
+
**kwargs: Additional arguments to be passed to dryrun/run the suite.
|
2311
|
+
|
2312
|
+
Returns:
|
2313
|
+
A suite of selected `lf.eval.Evaluation` objects.
|
2314
|
+
"""
|
2315
|
+
suite = get(root_dir, evaluations, patches=patches, filter=filter)
|
2316
|
+
if print_definition:
|
2317
|
+
lf.console.write(
|
2318
|
+
pg.format(
|
2319
|
+
suite,
|
2320
|
+
compact=False,
|
2321
|
+
verbose=False,
|
2322
|
+
hide_default_values=True,
|
2323
|
+
python_format=True,
|
2324
|
+
),
|
2325
|
+
title='[EXPERIMENT DEFINITION]',
|
2326
|
+
color='blue',
|
2327
|
+
)
|
1948
2328
|
|
1949
|
-
|
2329
|
+
if mode == 'run':
|
2330
|
+
rerun = mode == 'rerun'
|
2331
|
+
suite.run(debug=debug, rerun=rerun, **kwargs)
|
2332
|
+
elif mode == 'dryrun':
|
2333
|
+
suite.dryrun(debug=debug, **kwargs)
|
2334
|
+
return suite
|