langfun 0.1.2.dev202512040805__py3-none-any.whl → 0.1.2.dev202601030804__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langfun/core/agentic/action.py +2 -3
- langfun/core/eval/v2/__init__.py +1 -0
- langfun/core/eval/v2/checkpointing.py +73 -41
- langfun/core/eval/v2/evaluation.py +10 -2
- langfun/core/eval/v2/experiment.py +27 -9
- langfun/core/eval/v2/progress.py +16 -11
- langfun/core/eval/v2/reporting.py +3 -4
- langfun/core/eval/v2/runners/base.py +2 -0
- langfun/core/eval/v2/runners/ckpt_monitor.py +61 -5
- langfun/core/eval/v2/runners/ckpt_monitor_test.py +51 -0
- langfun/core/llms/__init__.py +6 -0
- langfun/core/llms/anthropic.py +59 -0
- langfun/core/llms/gemini.py +53 -1
- langfun/core/llms/google_genai.py +24 -0
- langfun/core/llms/vertexai.py +33 -0
- langfun/core/modalities/mime.py +14 -1
- langfun/core/modalities/mime_test.py +48 -0
- {langfun-0.1.2.dev202512040805.dist-info → langfun-0.1.2.dev202601030804.dist-info}/METADATA +1 -1
- {langfun-0.1.2.dev202512040805.dist-info → langfun-0.1.2.dev202601030804.dist-info}/RECORD +22 -22
- {langfun-0.1.2.dev202512040805.dist-info → langfun-0.1.2.dev202601030804.dist-info}/WHEEL +0 -0
- {langfun-0.1.2.dev202512040805.dist-info → langfun-0.1.2.dev202601030804.dist-info}/licenses/LICENSE +0 -0
- {langfun-0.1.2.dev202512040805.dist-info → langfun-0.1.2.dev202601030804.dist-info}/top_level.txt +0 -0
langfun/core/agentic/action.py
CHANGED
|
@@ -288,6 +288,8 @@ class Action(pg.Object):
|
|
|
288
288
|
|
|
289
289
|
with session.track_action(self, max_execution_time=max_execution_time):
|
|
290
290
|
try:
|
|
291
|
+
# Early terminate the action if the execution time is exceeded.
|
|
292
|
+
session.check_execution_time()
|
|
291
293
|
result = self.call(session=session, **kwargs)
|
|
292
294
|
self._invocation.end(result)
|
|
293
295
|
except BaseException as e:
|
|
@@ -2005,9 +2007,6 @@ class Session(pg.Object, pg.views.html.HtmlTreeView.Extension):
|
|
|
2005
2007
|
'signal the start and end of the session.'
|
|
2006
2008
|
)
|
|
2007
2009
|
|
|
2008
|
-
# Early terminate the action if the execution time is exceeded.
|
|
2009
|
-
self.check_execution_time()
|
|
2010
|
-
|
|
2011
2010
|
invocation = ActionInvocation(
|
|
2012
2011
|
pg.maybe_ref(action),
|
|
2013
2012
|
max_execution_time=self._child_max_execution_time(max_execution_time)
|
langfun/core/eval/v2/__init__.py
CHANGED
|
@@ -41,6 +41,7 @@ from langfun.core.eval.v2.checkpointing import PerExampleCheckpointer
|
|
|
41
41
|
from langfun.core.eval.v2.reporting import HtmlReporter
|
|
42
42
|
from langfun.core.eval.v2.reporting import ExampleHtmlGenerator
|
|
43
43
|
|
|
44
|
+
# Google-internal imports.
|
|
44
45
|
|
|
45
46
|
# pylint: enable=g-bad-import-order
|
|
46
47
|
# pylint: enable=g-importing-member
|
|
@@ -38,7 +38,7 @@ class Checkpointer(experiment_lib.Plugin):
|
|
|
38
38
|
later. When an experiment starts, the checkpointer loads any previously saved
|
|
39
39
|
examples from an earlier run (or a warm-start run) into `experiment.state`,
|
|
40
40
|
so the runner can skip processing them again.
|
|
41
|
-
Subclasses should implement `
|
|
41
|
+
Subclasses should implement `_list_checkpoint_files` to identify
|
|
42
42
|
checkpoint files to load, and `_save_example` to save a newly processed
|
|
43
43
|
example.
|
|
44
44
|
"""
|
|
@@ -122,7 +122,7 @@ class Checkpointer(experiment_lib.Plugin):
|
|
|
122
122
|
example: Example,
|
|
123
123
|
) -> None:
|
|
124
124
|
"""Saves the example to the checkpoint file."""
|
|
125
|
-
if example.newly_processed:
|
|
125
|
+
if example.newly_processed or runner.current_run.force_recompute_metrics:
|
|
126
126
|
self._save_example(runner, experiment, example)
|
|
127
127
|
|
|
128
128
|
def _load_experiment(
|
|
@@ -131,7 +131,7 @@ class Checkpointer(experiment_lib.Plugin):
|
|
|
131
131
|
experiment: Experiment,
|
|
132
132
|
) -> None:
|
|
133
133
|
"""Creates the checkpoint file."""
|
|
134
|
-
ckpt_files = self.
|
|
134
|
+
ckpt_files = self._list_checkpoint_files(runner, experiment)
|
|
135
135
|
experiment.info(f'Found {len(ckpt_files)} checkpoint files to load.')
|
|
136
136
|
|
|
137
137
|
# Load the checkpoint files in parallel.
|
|
@@ -141,18 +141,18 @@ class Checkpointer(experiment_lib.Plugin):
|
|
|
141
141
|
experiment
|
|
142
142
|
)
|
|
143
143
|
context = dict(counter=0, counter_lock=threading.Lock())
|
|
144
|
-
copy_ckpt = current_run.input_root != current_run.output_root
|
|
145
144
|
|
|
146
145
|
def _load_state(ckpt_file):
|
|
147
146
|
error = None
|
|
148
147
|
with pg.timeit() as t:
|
|
149
148
|
try:
|
|
150
|
-
experiment.load_state(
|
|
151
|
-
|
|
149
|
+
loaded_examples = experiment.load_state(
|
|
150
|
+
ckpt_file,
|
|
152
151
|
filter=lambda x: x.id in examples_to_load,
|
|
153
152
|
load_example_metadata=lambda x: x.id in examples_to_load_metadata,
|
|
154
153
|
)
|
|
155
154
|
except BaseException as e: # pylint: disable=broad-except
|
|
155
|
+
loaded_examples = []
|
|
156
156
|
error = e
|
|
157
157
|
finally:
|
|
158
158
|
with context['counter_lock']:
|
|
@@ -170,22 +170,18 @@ class Checkpointer(experiment_lib.Plugin):
|
|
|
170
170
|
f'Skipping the file. ({progress_str})'
|
|
171
171
|
)
|
|
172
172
|
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
except BaseException as e: # pylint: disable=broad-except
|
|
186
|
-
experiment.warning(
|
|
187
|
-
f'Failed to copy checkpoint {ckpt_file!r}: {e}.'
|
|
188
|
-
)
|
|
173
|
+
output_ckpt_file = current_run.output_path_for(
|
|
174
|
+
experiment, os.path.basename(ckpt_file)
|
|
175
|
+
)
|
|
176
|
+
if (not runner.current_run.force_recompute_metrics
|
|
177
|
+
and ckpt_file != output_ckpt_file
|
|
178
|
+
and any(e for e in loaded_examples if not e.has_error)):
|
|
179
|
+
# Write the error-free warm-start examples to the output checkpoint
|
|
180
|
+
# file.
|
|
181
|
+
with SequenceWriter(output_ckpt_file) as writer:
|
|
182
|
+
for example in loaded_examples:
|
|
183
|
+
if not example.has_error:
|
|
184
|
+
writer.add(example)
|
|
189
185
|
|
|
190
186
|
_ = list(
|
|
191
187
|
lf.concurrent_map(
|
|
@@ -197,10 +193,10 @@ class Checkpointer(experiment_lib.Plugin):
|
|
|
197
193
|
)
|
|
198
194
|
|
|
199
195
|
@abc.abstractmethod
|
|
200
|
-
def
|
|
196
|
+
def _list_checkpoint_files(
|
|
201
197
|
self, runner: Runner, experiment: Experiment
|
|
202
198
|
) -> list[str]:
|
|
203
|
-
"""Lists the checkpoint
|
|
199
|
+
"""Lists the checkpoint file paths to restore."""
|
|
204
200
|
|
|
205
201
|
@abc.abstractmethod
|
|
206
202
|
def _save_example(
|
|
@@ -226,22 +222,41 @@ class PerExampleCheckpointer(Checkpointer):
|
|
|
226
222
|
self._checkpoint_file_prefix = prefix
|
|
227
223
|
self._checkpoint_file_ext = ext
|
|
228
224
|
|
|
229
|
-
def
|
|
225
|
+
def _list_checkpoint_files(
|
|
230
226
|
self, runner: Runner, experiment: Experiment
|
|
231
227
|
) -> list[str]:
|
|
232
|
-
|
|
233
|
-
|
|
228
|
+
|
|
229
|
+
def _list_checkpoints_from(ckpt_dir: str, examples_to_load: set[int]):
|
|
230
|
+
ckpt_files = []
|
|
231
|
+
if pg.io.path_exists(ckpt_dir):
|
|
232
|
+
regex = re.compile(
|
|
233
|
+
f'{self._checkpoint_file_prefix}_(\\d+){self._checkpoint_file_ext}'
|
|
234
|
+
.replace('.', '\\.')
|
|
235
|
+
)
|
|
236
|
+
for filename in pg.io.listdir(ckpt_dir):
|
|
237
|
+
match = regex.match(filename)
|
|
238
|
+
if match and int(match.group(1)) in examples_to_load:
|
|
239
|
+
examples_to_load.remove(int(match.group(1)))
|
|
240
|
+
ckpt_files.append(os.path.join(ckpt_dir, filename))
|
|
241
|
+
return ckpt_files
|
|
242
|
+
|
|
234
243
|
examples_to_load = runner.current_run.examples_to_load(experiment)
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
244
|
+
|
|
245
|
+
# Take output directory as the first priority to checkpoints processed in
|
|
246
|
+
# this run.
|
|
247
|
+
ckpt_files = _list_checkpoints_from(
|
|
248
|
+
runner.current_run.output_dir(experiment), examples_to_load
|
|
249
|
+
)
|
|
250
|
+
# If the input and output directories are different, also load from the
|
|
251
|
+
# input directory.
|
|
252
|
+
if (examples_to_load
|
|
253
|
+
and runner.current_run.input_root != runner.current_run.output_root):
|
|
254
|
+
ckpt_files.extend(
|
|
255
|
+
_list_checkpoints_from(
|
|
256
|
+
runner.current_run.input_dir(experiment), examples_to_load
|
|
257
|
+
)
|
|
239
258
|
)
|
|
240
|
-
|
|
241
|
-
match = regex.match(filename)
|
|
242
|
-
if match and int(match.group(1)) in examples_to_load:
|
|
243
|
-
filenames.append(filename)
|
|
244
|
-
return filenames
|
|
259
|
+
return ckpt_files
|
|
245
260
|
|
|
246
261
|
def _save_example(
|
|
247
262
|
self,
|
|
@@ -341,13 +356,23 @@ class BulkCheckpointer(Checkpointer):
|
|
|
341
356
|
if self._sequence_writer is not None:
|
|
342
357
|
self._sequence_writer[experiment.id] = sequence_writer
|
|
343
358
|
|
|
344
|
-
def
|
|
359
|
+
def _list_checkpoint_files(
|
|
345
360
|
self, runner: Runner, experiment: Experiment
|
|
346
361
|
) -> list[str]:
|
|
347
|
-
if
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
362
|
+
# Always honor the output directory if it's present, as it contains both
|
|
363
|
+
# the warm-started examples and newly processed examples.
|
|
364
|
+
output_ckpt_file = runner.current_run.output_path_for(
|
|
365
|
+
experiment, self.checkpoint_filename
|
|
366
|
+
)
|
|
367
|
+
if pg.io.path_exists(output_ckpt_file):
|
|
368
|
+
return [output_ckpt_file]
|
|
369
|
+
|
|
370
|
+
if runner.current_run.input_root != runner.current_run.output_root:
|
|
371
|
+
input_ckpt_file = runner.current_run.input_path_for(
|
|
372
|
+
experiment, self.checkpoint_filename
|
|
373
|
+
)
|
|
374
|
+
if pg.io.path_exists(input_ckpt_file):
|
|
375
|
+
return [input_ckpt_file]
|
|
351
376
|
return []
|
|
352
377
|
|
|
353
378
|
def on_experiment_complete(
|
|
@@ -441,5 +466,12 @@ class SequenceWriter:
|
|
|
441
466
|
self._sequence_writer = None
|
|
442
467
|
pg.io.rename(self._tmp_path, self._path)
|
|
443
468
|
|
|
469
|
+
def __enter__(self):
|
|
470
|
+
return self
|
|
471
|
+
|
|
472
|
+
def __exit__(self, *args, **kwargs):
|
|
473
|
+
del args, kwargs
|
|
474
|
+
self.close()
|
|
475
|
+
|
|
444
476
|
def __del__(self):
|
|
445
477
|
self.close()
|
|
@@ -114,6 +114,13 @@ class Evaluation(experiment_lib.Experiment):
|
|
|
114
114
|
self._log_entries = []
|
|
115
115
|
self._log_lock = threading.Lock()
|
|
116
116
|
|
|
117
|
+
def _identity(self) -> str:
|
|
118
|
+
"""Returns the definition of the evaluation."""
|
|
119
|
+
return self.format(
|
|
120
|
+
compact=True, hide_default_values=True, use_inferred=True,
|
|
121
|
+
exclude_keys=('plugins', 'progress', 'usage_summary')
|
|
122
|
+
)
|
|
123
|
+
|
|
117
124
|
#
|
|
118
125
|
# Handling evaluation hierarchy (materialized vs. hyper evaluations).
|
|
119
126
|
#
|
|
@@ -379,10 +386,10 @@ class Evaluation(experiment_lib.Experiment):
|
|
|
379
386
|
load_example_metadata: bool = True,
|
|
380
387
|
filter: Callable[[example_lib.Example], bool] | None = None, # pylint: disable=redefined-builtin
|
|
381
388
|
raise_if_not_exist: bool = False
|
|
382
|
-
) ->
|
|
389
|
+
) -> list[example_lib.Example]:
|
|
383
390
|
"""Loads saved state from a sequence IO file."""
|
|
384
391
|
if pg.io.path_exists(state_file):
|
|
385
|
-
self._state.load(
|
|
392
|
+
return self._state.load(
|
|
386
393
|
state_file,
|
|
387
394
|
example_input_by_id=self.example_input_by_id,
|
|
388
395
|
load_example_metadata=load_example_metadata,
|
|
@@ -390,6 +397,7 @@ class Evaluation(experiment_lib.Experiment):
|
|
|
390
397
|
)
|
|
391
398
|
elif raise_if_not_exist:
|
|
392
399
|
raise ValueError(f'State file {state_file} does not exist.')
|
|
400
|
+
return []
|
|
393
401
|
|
|
394
402
|
def _reset(self) -> None:
|
|
395
403
|
"""Resets the state of the evaluation."""
|
|
@@ -268,11 +268,11 @@ class Experiment(lf.Component, pg.views.HtmlTreeView.Extension):
|
|
|
268
268
|
@functools.cached_property
|
|
269
269
|
def hash(self) -> str:
|
|
270
270
|
"""A 8-byte MD5 hash computed from experiment identity."""
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
271
|
+
return hashlib.md5(self._identity().encode()).hexdigest()[:8]
|
|
272
|
+
|
|
273
|
+
@abc.abstractmethod
|
|
274
|
+
def _identity(self) -> str:
|
|
275
|
+
"""Returns the identity of the experiment."""
|
|
276
276
|
|
|
277
277
|
@classmethod
|
|
278
278
|
def link(cls, path: str) -> str:
|
|
@@ -376,23 +376,24 @@ class Experiment(lf.Component, pg.views.HtmlTreeView.Extension):
|
|
|
376
376
|
def run(
|
|
377
377
|
self,
|
|
378
378
|
root_dir: str,
|
|
379
|
-
id: str | None = None,
|
|
379
|
+
id: str | None = None, # pylint: disable=redefined-builtin
|
|
380
380
|
*,
|
|
381
381
|
runner: str = 'parallel',
|
|
382
382
|
warm_start_from: str | None = None,
|
|
383
|
-
filter: Callable[['Experiment'], bool] | None = None,
|
|
383
|
+
filter: Callable[['Experiment'], bool] | None = None, # pylint: disable=redefined-builtin
|
|
384
384
|
example_ids: list[int] | None = None,
|
|
385
385
|
shuffle_inputs: bool = False,
|
|
386
386
|
raise_if_has_error: bool = False,
|
|
387
387
|
reevaluate_upon_previous_errors: bool = True,
|
|
388
388
|
reprocess: bool | list[int] = False,
|
|
389
|
+
force_recompute_metrics: bool = False,
|
|
389
390
|
generate_example_html: Literal['new', 'all', 'no'] | list[int] = 'new',
|
|
390
391
|
process_timeout: int | None = None,
|
|
391
392
|
use_cache: Literal['global', 'per_dataset', 'no'] = 'per_dataset',
|
|
392
393
|
note: str | None = None,
|
|
393
394
|
tags: list[str] | None = None,
|
|
394
395
|
plugins: list['Plugin'] | None = None,
|
|
395
|
-
**kwargs
|
|
396
|
+
**kwargs,
|
|
396
397
|
) -> 'Run':
|
|
397
398
|
"""Runs the experiment.
|
|
398
399
|
|
|
@@ -445,6 +446,8 @@ class Experiment(lf.Component, pg.views.HtmlTreeView.Extension):
|
|
|
445
446
|
meaning that existing checkpoints will be ignored. If a list of
|
|
446
447
|
example IDs, it indicates that only the specified examples will be
|
|
447
448
|
reprocessed.
|
|
449
|
+
force_recompute_metrics: If True, it will recompute the metrics for all
|
|
450
|
+
examples, even if the previous checkpoints have metric metadata.
|
|
448
451
|
generate_example_html: Among 'new', 'all', 'no' or a list of example IDs.
|
|
449
452
|
If 'new', generate HTML files for all newly processed examples, and
|
|
450
453
|
keep/copy existing HTML files for unchanged examples.
|
|
@@ -481,6 +484,7 @@ class Experiment(lf.Component, pg.views.HtmlTreeView.Extension):
|
|
|
481
484
|
raise_if_has_error=raise_if_has_error,
|
|
482
485
|
reevaluate_upon_previous_errors=reevaluate_upon_previous_errors,
|
|
483
486
|
reprocess=reprocess,
|
|
487
|
+
force_recompute_metrics=force_recompute_metrics,
|
|
484
488
|
generate_example_html=generate_example_html,
|
|
485
489
|
use_cache=use_cache,
|
|
486
490
|
process_timeout=process_timeout,
|
|
@@ -691,6 +695,12 @@ class Suite(Experiment):
|
|
|
691
695
|
"""Returns whether the task is a leaf."""
|
|
692
696
|
return False
|
|
693
697
|
|
|
698
|
+
def _identity(self) -> str:
|
|
699
|
+
"""Returns the definition of the evaluation."""
|
|
700
|
+
return '[' + ', '.join(
|
|
701
|
+
[child._identity() for child in self.children] # pylint: disable=protected-access
|
|
702
|
+
) + ']'
|
|
703
|
+
|
|
694
704
|
|
|
695
705
|
class RunId(pg.Object):
|
|
696
706
|
"""Structured repreesentation a experiment run ID."""
|
|
@@ -878,6 +888,14 @@ class Run(pg.Object, pg.views.html.HtmlTreeView.Extension):
|
|
|
878
888
|
)
|
|
879
889
|
] = True
|
|
880
890
|
|
|
891
|
+
force_recompute_metrics: Annotated[
|
|
892
|
+
bool,
|
|
893
|
+
(
|
|
894
|
+
'If True, force recompute the metrics even if metric metadata is '
|
|
895
|
+
'already present from previous checkpoint.'
|
|
896
|
+
)
|
|
897
|
+
] = False
|
|
898
|
+
|
|
881
899
|
note: Annotated[
|
|
882
900
|
str | None,
|
|
883
901
|
'The user note for the current run.'
|
|
@@ -997,7 +1015,7 @@ class Run(pg.Object, pg.views.html.HtmlTreeView.Extension):
|
|
|
997
1015
|
load_metadata_ids = set()
|
|
998
1016
|
if isinstance(self.generate_example_html, list):
|
|
999
1017
|
load_metadata_ids = set(self.generate_example_html)
|
|
1000
|
-
elif self.generate_example_html == 'all':
|
|
1018
|
+
elif self.generate_example_html == 'all' or self.force_recompute_metrics:
|
|
1001
1019
|
load_metadata_ids = self.examples_to_evaluate(experiment)
|
|
1002
1020
|
load_metadata_ids -= self.examples_to_reprocess(experiment)
|
|
1003
1021
|
return load_metadata_ids
|
langfun/core/eval/v2/progress.py
CHANGED
|
@@ -227,7 +227,11 @@ class Progress(pg.Object, pg.views.HtmlTreeView.Extension):
|
|
|
227
227
|
|
|
228
228
|
def merge_from(self, other: 'Progress') -> None:
|
|
229
229
|
"""Merges the progress from another progress."""
|
|
230
|
-
with
|
|
230
|
+
with (
|
|
231
|
+
self._lock,
|
|
232
|
+
pg.notify_on_change(False),
|
|
233
|
+
pg.allow_writable_accessors(True)
|
|
234
|
+
):
|
|
231
235
|
if other.start_time is not None and (
|
|
232
236
|
self.start_time is None or self.start_time > other.start_time):
|
|
233
237
|
self.start_time = other.start_time
|
|
@@ -268,16 +272,17 @@ class Progress(pg.Object, pg.views.HtmlTreeView.Extension):
|
|
|
268
272
|
stop_time=self.stop_time_str,
|
|
269
273
|
)
|
|
270
274
|
if self.execution_summary:
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
275
|
+
with self._lock:
|
|
276
|
+
time_info['execution'] = pg.Dict(
|
|
277
|
+
{
|
|
278
|
+
k: pg.Dict(
|
|
279
|
+
num_started=v.num_started,
|
|
280
|
+
num_ended=v.num_ended,
|
|
281
|
+
num_failed=v.num_failed,
|
|
282
|
+
avg_duration=round(v.avg_duration, 2),
|
|
283
|
+
) for k, v in self.execution_summary.breakdown.items()
|
|
284
|
+
}
|
|
285
|
+
)
|
|
281
286
|
return pg.format(time_info, verbose=False)
|
|
282
287
|
|
|
283
288
|
def _html_tree_view(
|
|
@@ -86,10 +86,8 @@ class ExampleHtmlGenerator(experiment_lib.Plugin):
|
|
|
86
86
|
return
|
|
87
87
|
|
|
88
88
|
try:
|
|
89
|
-
with pg.timeit() as t
|
|
90
|
-
|
|
91
|
-
with pg.io.open(dest_file, 'w') as dest:
|
|
92
|
-
dest.write(content)
|
|
89
|
+
with pg.timeit() as t:
|
|
90
|
+
pg.io.copy(src_file, dest_file)
|
|
93
91
|
experiment.info(
|
|
94
92
|
f'\'{example.id}.html\' copied in {t.elapse:.2f} seconds.'
|
|
95
93
|
)
|
|
@@ -101,6 +99,7 @@ class ExampleHtmlGenerator(experiment_lib.Plugin):
|
|
|
101
99
|
|
|
102
100
|
generate_example_html = current_run.generate_example_html
|
|
103
101
|
if (generate_example_html == 'all'
|
|
102
|
+
or runner.current_run.force_recompute_metrics
|
|
104
103
|
or (generate_example_html == 'new' and example.newly_processed)
|
|
105
104
|
or (isinstance(generate_example_html, list)
|
|
106
105
|
and example.id in generate_example_html)):
|
|
@@ -139,6 +139,7 @@ class RunnerBase(Runner):
|
|
|
139
139
|
self.current_run.examples_to_evaluate(experiment)
|
|
140
140
|
)
|
|
141
141
|
experiment.progress.start(total=num_examples_to_evaluate)
|
|
142
|
+
pg.io.mkdirs(self.current_run.output_dir(experiment))
|
|
142
143
|
else:
|
|
143
144
|
experiment.progress.start(total=len(experiment.leaf_nodes))
|
|
144
145
|
|
|
@@ -395,6 +396,7 @@ class RunnerBase(Runner):
|
|
|
395
396
|
item,
|
|
396
397
|
raise_if_has_error=self.current_run.raise_if_has_error,
|
|
397
398
|
reevaluate_upon_previous_errors=self.current_run.reevaluate_upon_previous_errors,
|
|
399
|
+
force_recompute_metrics=self.current_run.force_recompute_metrics,
|
|
398
400
|
)
|
|
399
401
|
self.on_example_complete(evaluation, item)
|
|
400
402
|
return item
|
|
@@ -60,6 +60,19 @@ class CheckpointMonitor(base.RunnerBase):
|
|
|
60
60
|
'The maximum number of threads to aggregate checkpoints.'
|
|
61
61
|
] = 128
|
|
62
62
|
|
|
63
|
+
bypass_old_ckpt_files_with_non_oop_errors: Annotated[
|
|
64
|
+
bool,
|
|
65
|
+
'If True, ignore old checkpoint files with non-oop errors.'
|
|
66
|
+
] = True
|
|
67
|
+
|
|
68
|
+
ckpt_start_time: Annotated[
|
|
69
|
+
float | None,
|
|
70
|
+
(
|
|
71
|
+
'The timestamp to treat checkpoint files modified before this '
|
|
72
|
+
'time as old.'
|
|
73
|
+
)
|
|
74
|
+
] = None
|
|
75
|
+
|
|
63
76
|
@dataclasses.dataclass
|
|
64
77
|
class _AggregationEntry:
|
|
65
78
|
evaluation: evaluation_lib.Evaluation
|
|
@@ -78,6 +91,9 @@ class CheckpointMonitor(base.RunnerBase):
|
|
|
78
91
|
self._aggregation_entries = []
|
|
79
92
|
self._aggregator_pool = None
|
|
80
93
|
self._error = None
|
|
94
|
+
if self.ckpt_start_time is None:
|
|
95
|
+
self.rebind(ckpt_start_time=time.time(), skip_notification=True)
|
|
96
|
+
self._ckpt_bypass_timestamp: dict[str, int] = {}
|
|
81
97
|
|
|
82
98
|
def start(self):
|
|
83
99
|
# Reset the experiment state before getting started.
|
|
@@ -165,6 +181,14 @@ class CheckpointMonitor(base.RunnerBase):
|
|
|
165
181
|
os.path.basename(filepath).split('.')[0].split('_')[-1]
|
|
166
182
|
)
|
|
167
183
|
if example_id in entry.example_ids_to_be_aggregated:
|
|
184
|
+
last_modified_time = pg.io.getmtime(filepath)
|
|
185
|
+
bypass_timestamp = self._ckpt_bypass_timestamp.get(filepath)
|
|
186
|
+
if (
|
|
187
|
+
bypass_timestamp is not None
|
|
188
|
+
and last_modified_time <= bypass_timestamp
|
|
189
|
+
):
|
|
190
|
+
continue
|
|
191
|
+
|
|
168
192
|
# Remove example ID from the set to avoid duplicate processing.
|
|
169
193
|
entry.example_ids_to_be_aggregated.remove(example_id)
|
|
170
194
|
entry.example_ids_being_aggregated.add(example_id)
|
|
@@ -177,7 +201,7 @@ class CheckpointMonitor(base.RunnerBase):
|
|
|
177
201
|
entry.example_ids_inprogress.add(example_id)
|
|
178
202
|
|
|
179
203
|
self._aggregator_pool.submit(
|
|
180
|
-
self._aggregate, entry, filepath, example_id
|
|
204
|
+
self._aggregate, entry, filepath, example_id, last_modified_time
|
|
181
205
|
)
|
|
182
206
|
pg.logging.info(
|
|
183
207
|
'[%s] Aggregating example %d from %s...',
|
|
@@ -196,7 +220,8 @@ class CheckpointMonitor(base.RunnerBase):
|
|
|
196
220
|
self,
|
|
197
221
|
entry: _AggregationEntry,
|
|
198
222
|
ckpt_filepath: str,
|
|
199
|
-
example_id: int
|
|
223
|
+
example_id: int,
|
|
224
|
+
last_modified_time: float,
|
|
200
225
|
):
|
|
201
226
|
"""Aggregate an example from a checkpoint file."""
|
|
202
227
|
try:
|
|
@@ -212,6 +237,25 @@ class CheckpointMonitor(base.RunnerBase):
|
|
|
212
237
|
# example processed multiple times. We only need to aggregate the last
|
|
213
238
|
# example.
|
|
214
239
|
example = loaded_examples[-1]
|
|
240
|
+
if (
|
|
241
|
+
self.bypass_old_ckpt_files_with_non_oop_errors
|
|
242
|
+
and last_modified_time < self.ckpt_start_time
|
|
243
|
+
and example.error is not None
|
|
244
|
+
and not example.error.tag.startswith('MappingError')
|
|
245
|
+
):
|
|
246
|
+
entry.example_ids_being_aggregated.remove(example_id)
|
|
247
|
+
entry.example_ids_to_be_aggregated.add(example_id)
|
|
248
|
+
self._ckpt_bypass_timestamp[ckpt_filepath] = last_modified_time
|
|
249
|
+
pg.logging.info(
|
|
250
|
+
'[%s] Bypassing old checkpoint file with non-oop errors (%s) '
|
|
251
|
+
'for example %d, last_modified_time: %s, ckpt_start_time: %s',
|
|
252
|
+
entry.evaluation.id,
|
|
253
|
+
ckpt_filepath,
|
|
254
|
+
example_id,
|
|
255
|
+
last_modified_time,
|
|
256
|
+
self.ckpt_start_time,
|
|
257
|
+
)
|
|
258
|
+
return
|
|
215
259
|
except BaseException as e: # pylint: disable=broad-except
|
|
216
260
|
error_info = pg.ErrorInfo.from_exception(e)
|
|
217
261
|
pg.logging.error(
|
|
@@ -229,9 +273,21 @@ class CheckpointMonitor(base.RunnerBase):
|
|
|
229
273
|
# This will skip processing but still allow metrics to be collected.
|
|
230
274
|
# `process` will never be called for evaluation, thus we do not
|
|
231
275
|
# need to setup/teardown evaluation.
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
276
|
+
try:
|
|
277
|
+
example = entry.evaluation.evaluate(
|
|
278
|
+
example, reevaluate_upon_previous_errors=False
|
|
279
|
+
)
|
|
280
|
+
except BaseException as e: # pylint: disable=broad-except
|
|
281
|
+
pg.logging.error(
|
|
282
|
+
'[%s] Unexpected error found during evaluating example %d from %s.',
|
|
283
|
+
entry.evaluation.id,
|
|
284
|
+
example_id,
|
|
285
|
+
ckpt_filepath,
|
|
286
|
+
)
|
|
287
|
+
self._error = e
|
|
288
|
+
entry.example_ids_being_aggregated.remove(example_id)
|
|
289
|
+
return
|
|
290
|
+
|
|
235
291
|
example.newly_processed = True
|
|
236
292
|
pg.logging.info(
|
|
237
293
|
'[%s] Successfully aggregated example %d from %s.',
|
|
@@ -13,8 +13,10 @@
|
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
import os
|
|
15
15
|
import tempfile
|
|
16
|
+
import time
|
|
16
17
|
import unittest
|
|
17
18
|
|
|
19
|
+
import langfun.core as lf
|
|
18
20
|
from langfun.core.eval.v2 import checkpointing
|
|
19
21
|
from langfun.core.eval.v2 import eval_test_helper
|
|
20
22
|
from langfun.core.eval.v2 import example as example_lib
|
|
@@ -33,6 +35,7 @@ class CheckpointMonitorTest(unittest.TestCase):
|
|
|
33
35
|
def test_aggregate(self):
|
|
34
36
|
exp = eval_test_helper.test_experiment()
|
|
35
37
|
root_dir = os.path.join(self.test_dir, 'test_aggregate')
|
|
38
|
+
ckpt_start_time = time.time()
|
|
36
39
|
run = exp.run(
|
|
37
40
|
root_dir,
|
|
38
41
|
runner='sequential',
|
|
@@ -55,6 +58,7 @@ class CheckpointMonitorTest(unittest.TestCase):
|
|
|
55
58
|
plugins=[plugin],
|
|
56
59
|
checkpoint_pattern='checkpoint_*.jsonl',
|
|
57
60
|
monitor_inprogress_files=True,
|
|
61
|
+
ckpt_start_time=ckpt_start_time,
|
|
58
62
|
)
|
|
59
63
|
monitor.run()
|
|
60
64
|
|
|
@@ -70,7 +74,50 @@ class CheckpointMonitorTest(unittest.TestCase):
|
|
|
70
74
|
for e in exp.leaf_nodes:
|
|
71
75
|
self.assertEqual(e.progress.num_completed, 10)
|
|
72
76
|
|
|
77
|
+
def test_ignore_old_ckpt_files_with_non_oop_errors(self):
|
|
78
|
+
exp = eval_test_helper.test_evaluation()
|
|
79
|
+
root_dir = os.path.join(self.test_dir, 'test_ignore_old_ckpt_files')
|
|
80
|
+
run = exp.run(
|
|
81
|
+
root_dir,
|
|
82
|
+
runner='sequential',
|
|
83
|
+
progress_tracker=None,
|
|
84
|
+
plugins=[
|
|
85
|
+
checkpointing.PerExampleCheckpointer(
|
|
86
|
+
checkpoint_filename='checkpoint.jsonl'
|
|
87
|
+
)
|
|
88
|
+
],
|
|
89
|
+
use_cache='no',
|
|
90
|
+
)
|
|
91
|
+
monitor = ckpt_monitor.CheckpointMonitor(
|
|
92
|
+
run,
|
|
93
|
+
plugins=[],
|
|
94
|
+
checkpoint_pattern='checkpoint_*.jsonl',
|
|
95
|
+
monitor_inprogress_files=True
|
|
96
|
+
)
|
|
97
|
+
monitor.start()
|
|
98
|
+
time.sleep(2)
|
|
99
|
+
# Example 6 is a non-oop error, we simulate a re-evaluation.
|
|
100
|
+
ex = example_lib.Example(
|
|
101
|
+
id=6, output=1, metric_metadata={'match': {'is_correct': True}},
|
|
102
|
+
start_time=time.time() - 2, end_time=time.time(),
|
|
103
|
+
usage_summary=lf.UsageSummary(),
|
|
104
|
+
execution_status={
|
|
105
|
+
'evaluate': pg.utils.TimeIt.Status(name='evaluate', elapse=1)
|
|
106
|
+
}
|
|
107
|
+
)
|
|
108
|
+
with pg.io.open_sequence(
|
|
109
|
+
run.output_path_for(exp, 'checkpoint_6.jsonl'),
|
|
110
|
+
mode='w'
|
|
111
|
+
) as f:
|
|
112
|
+
f.add(pg.to_json_str(ex))
|
|
113
|
+
print(time.time(), pg.io.listdir(run.output_dir(exp)))
|
|
114
|
+
monitor.join()
|
|
115
|
+
self.assertEqual(exp.progress.num_processed, 10)
|
|
116
|
+
self.assertEqual(exp.progress.num_completed, 10)
|
|
117
|
+
self.assertEqual(exp.progress.num_failed, 0)
|
|
118
|
+
|
|
73
119
|
def test_aggregate_with_filter(self):
|
|
120
|
+
ckpt_start_time = time.time()
|
|
74
121
|
exp = eval_test_helper.test_experiment()
|
|
75
122
|
root_dir = os.path.join(self.test_dir, 'test_aggregate_with_filter')
|
|
76
123
|
|
|
@@ -93,6 +140,7 @@ class CheckpointMonitorTest(unittest.TestCase):
|
|
|
93
140
|
run,
|
|
94
141
|
plugins=[plugin],
|
|
95
142
|
checkpoint_pattern='checkpoint_*.jsonl',
|
|
143
|
+
ckpt_start_time=ckpt_start_time,
|
|
96
144
|
)
|
|
97
145
|
monitor.run()
|
|
98
146
|
|
|
@@ -127,6 +175,7 @@ class CheckpointMonitorTest(unittest.TestCase):
|
|
|
127
175
|
if self.simulate_raise_on_experiment_complete:
|
|
128
176
|
raise ValueError('experiment complete error')
|
|
129
177
|
|
|
178
|
+
ckpt_start_time = time.time()
|
|
130
179
|
exp = eval_test_helper.test_evaluation()
|
|
131
180
|
root_dir = os.path.join(self.test_dir, 'test_plugin_raise')
|
|
132
181
|
|
|
@@ -148,6 +197,7 @@ class CheckpointMonitorTest(unittest.TestCase):
|
|
|
148
197
|
run,
|
|
149
198
|
plugins=[TestPlugin(simulate_raise_on_example_complete=True)],
|
|
150
199
|
checkpoint_pattern='checkpoint_*.jsonl',
|
|
200
|
+
ckpt_start_time=ckpt_start_time,
|
|
151
201
|
).run()
|
|
152
202
|
|
|
153
203
|
with self.assertRaisesRegex(ValueError, 'experiment complete error'):
|
|
@@ -155,6 +205,7 @@ class CheckpointMonitorTest(unittest.TestCase):
|
|
|
155
205
|
run,
|
|
156
206
|
plugins=[TestPlugin(simulate_raise_on_experiment_complete=True)],
|
|
157
207
|
checkpoint_pattern='checkpoint_*.jsonl',
|
|
208
|
+
ckpt_start_time=ckpt_start_time,
|
|
158
209
|
).run()
|
|
159
210
|
|
|
160
211
|
|
langfun/core/llms/__init__.py
CHANGED
|
@@ -43,6 +43,7 @@ from langfun.core.llms.azure_openai import AzureOpenAI
|
|
|
43
43
|
# Gemini models.
|
|
44
44
|
from langfun.core.llms.google_genai import GenAI
|
|
45
45
|
from langfun.core.llms.google_genai import Gemini3ProPreview
|
|
46
|
+
from langfun.core.llms.google_genai import Gemini3FlashPreview
|
|
46
47
|
from langfun.core.llms.google_genai import Gemini25Pro
|
|
47
48
|
from langfun.core.llms.google_genai import Gemini25Flash
|
|
48
49
|
from langfun.core.llms.google_genai import Gemini25ProPreview_20250605
|
|
@@ -65,6 +66,7 @@ from langfun.core.llms.google_genai import Gemini2ProExp_20250205
|
|
|
65
66
|
from langfun.core.llms.google_genai import Gemini2FlashThinkingExp_20250121
|
|
66
67
|
from langfun.core.llms.google_genai import GeminiExp_20241206
|
|
67
68
|
from langfun.core.llms.google_genai import Gemini25FlashImagePreview
|
|
69
|
+
from langfun.core.llms.google_genai import Gemini3ProImagePreview
|
|
68
70
|
|
|
69
71
|
from langfun.core.llms.vertexai import VertexAIGemini
|
|
70
72
|
from langfun.core.llms.vertexai import VertexAIGemini2Flash
|
|
@@ -92,6 +94,8 @@ from langfun.core.llms.vertexai import VertexAIGemini25Pro
|
|
|
92
94
|
from langfun.core.llms.vertexai import VertexAIGemini25Flash
|
|
93
95
|
from langfun.core.llms.vertexai import VertexAIGemini25FlashImagePreview
|
|
94
96
|
from langfun.core.llms.vertexai import VertexAIGemini3ProPreview
|
|
97
|
+
from langfun.core.llms.vertexai import VertexAIGemini3ProImagePreview
|
|
98
|
+
from langfun.core.llms.vertexai import VertexAIGemini3FlashPreview
|
|
95
99
|
|
|
96
100
|
# For backward compatibility.
|
|
97
101
|
GeminiPro1_5 = Gemini15Pro
|
|
@@ -158,6 +162,7 @@ from langfun.core.llms.openai import Gpt35
|
|
|
158
162
|
from langfun.core.llms.anthropic import Claude45
|
|
159
163
|
from langfun.core.llms.anthropic import Claude45Haiku_20251001
|
|
160
164
|
from langfun.core.llms.anthropic import Claude45Sonnet_20250929
|
|
165
|
+
from langfun.core.llms.anthropic import Claude45Opus_20251101
|
|
161
166
|
from langfun.core.llms.anthropic import Claude4
|
|
162
167
|
from langfun.core.llms.anthropic import Claude4Sonnet_20250514
|
|
163
168
|
from langfun.core.llms.anthropic import Claude4Opus_20250514
|
|
@@ -177,6 +182,7 @@ from langfun.core.llms.anthropic import Claude3Haiku_20240307
|
|
|
177
182
|
from langfun.core.llms.vertexai import VertexAIAnthropic
|
|
178
183
|
from langfun.core.llms.vertexai import VertexAIClaude45Haiku_20251001
|
|
179
184
|
from langfun.core.llms.vertexai import VertexAIClaude45Sonnet_20250929
|
|
185
|
+
from langfun.core.llms.vertexai import VertexAIClaude45Opus_20251101
|
|
180
186
|
from langfun.core.llms.vertexai import VertexAIClaude4Opus_20250514
|
|
181
187
|
from langfun.core.llms.vertexai import VertexAIClaude4Sonnet_20250514
|
|
182
188
|
from langfun.core.llms.vertexai import VertexAIClaude37Sonnet_20250219
|
langfun/core/llms/anthropic.py
CHANGED
|
@@ -113,6 +113,32 @@ SUPPORTED_MODELS = [
|
|
|
113
113
|
max_output_tokens_per_minute=400_000,
|
|
114
114
|
),
|
|
115
115
|
),
|
|
116
|
+
AnthropicModelInfo(
|
|
117
|
+
model_id='claude-opus-4-5-20251101',
|
|
118
|
+
provider='Anthropic',
|
|
119
|
+
in_service=True,
|
|
120
|
+
description='Claude 4.5 Opus model (11/01/2025).',
|
|
121
|
+
release_date=datetime.datetime(2025, 11, 1),
|
|
122
|
+
input_modalities=(
|
|
123
|
+
AnthropicModelInfo.INPUT_IMAGE_TYPES
|
|
124
|
+
+ AnthropicModelInfo.INPUT_DOC_TYPES
|
|
125
|
+
),
|
|
126
|
+
context_length=lf.ModelInfo.ContextLength(
|
|
127
|
+
max_input_tokens=200_000,
|
|
128
|
+
max_output_tokens=64_000,
|
|
129
|
+
),
|
|
130
|
+
pricing=lf.ModelInfo.Pricing(
|
|
131
|
+
cost_per_1m_cached_input_tokens=0.5,
|
|
132
|
+
cost_per_1m_input_tokens=5,
|
|
133
|
+
cost_per_1m_output_tokens=25,
|
|
134
|
+
),
|
|
135
|
+
rate_limits=AnthropicModelInfo.RateLimits(
|
|
136
|
+
# Tier 4 rate limits
|
|
137
|
+
max_requests_per_minute=2000,
|
|
138
|
+
max_input_tokens_per_minute=1_000_000,
|
|
139
|
+
max_output_tokens_per_minute=400_000,
|
|
140
|
+
),
|
|
141
|
+
),
|
|
116
142
|
AnthropicModelInfo(
|
|
117
143
|
model_id='claude-4-opus-20250514',
|
|
118
144
|
provider='Anthropic',
|
|
@@ -300,6 +326,32 @@ SUPPORTED_MODELS = [
|
|
|
300
326
|
max_output_tokens_per_minute=0,
|
|
301
327
|
),
|
|
302
328
|
),
|
|
329
|
+
AnthropicModelInfo(
|
|
330
|
+
model_id='claude-opus-4-5@20251101',
|
|
331
|
+
alias_for='claude-opus-4-5-20251101',
|
|
332
|
+
provider='VertexAI',
|
|
333
|
+
in_service=True,
|
|
334
|
+
description='Claude 4.5 Opus model served on VertexAI (11/01/2025).',
|
|
335
|
+
release_date=datetime.datetime(2025, 11, 1),
|
|
336
|
+
input_modalities=(
|
|
337
|
+
AnthropicModelInfo.INPUT_IMAGE_TYPES
|
|
338
|
+
+ AnthropicModelInfo.INPUT_DOC_TYPES
|
|
339
|
+
),
|
|
340
|
+
context_length=lf.ModelInfo.ContextLength(
|
|
341
|
+
max_input_tokens=200_000,
|
|
342
|
+
max_output_tokens=64_000,
|
|
343
|
+
),
|
|
344
|
+
pricing=lf.ModelInfo.Pricing(
|
|
345
|
+
cost_per_1m_cached_input_tokens=0.5,
|
|
346
|
+
cost_per_1m_input_tokens=5,
|
|
347
|
+
cost_per_1m_output_tokens=25,
|
|
348
|
+
),
|
|
349
|
+
rate_limits=AnthropicModelInfo.RateLimits(
|
|
350
|
+
max_requests_per_minute=100,
|
|
351
|
+
max_input_tokens_per_minute=1_000_000,
|
|
352
|
+
max_output_tokens_per_minute=80_000,
|
|
353
|
+
),
|
|
354
|
+
),
|
|
303
355
|
AnthropicModelInfo(
|
|
304
356
|
model_id='claude-opus-4@20250514',
|
|
305
357
|
alias_for='claude-opus-4-20250514',
|
|
@@ -834,6 +886,13 @@ class Claude45Sonnet_20250929(Claude45):
|
|
|
834
886
|
model = 'claude-sonnet-4-5-20250929'
|
|
835
887
|
|
|
836
888
|
|
|
889
|
+
# pylint: disable=invalid-name
|
|
890
|
+
class Claude45Opus_20251101(Claude45):
|
|
891
|
+
"""Claude 4.5 Opus model 20251101."""
|
|
892
|
+
|
|
893
|
+
model = 'claude-opus-4-5-20251101'
|
|
894
|
+
|
|
895
|
+
|
|
837
896
|
class Claude4(Anthropic):
|
|
838
897
|
"""Base class for Claude 4 models."""
|
|
839
898
|
|
langfun/core/llms/gemini.py
CHANGED
|
@@ -177,6 +177,55 @@ SUPPORTED_MODELS = [
|
|
|
177
177
|
max_tokens_per_minute=4_000_000,
|
|
178
178
|
),
|
|
179
179
|
),
|
|
180
|
+
# Gemini 3 Pro Image Preview
|
|
181
|
+
GeminiModelInfo(
|
|
182
|
+
model_id='gemini-3-pro-image-preview',
|
|
183
|
+
in_service=True,
|
|
184
|
+
experimental=True,
|
|
185
|
+
provider=pg.oneof(['Google GenAI', 'VertexAI']),
|
|
186
|
+
model_type='instruction-tuned',
|
|
187
|
+
description=(
|
|
188
|
+
'Gemini 3 Pro Image Preview for high-fidelity image generation,'
|
|
189
|
+
' editing, and visual reasoning.'
|
|
190
|
+
),
|
|
191
|
+
release_date=datetime.datetime(2025, 12, 9),
|
|
192
|
+
input_modalities=GeminiModelInfo.INPUT_IMAGE_TYPES
|
|
193
|
+
+ GeminiModelInfo.INPUT_DOC_TYPES,
|
|
194
|
+
context_length=lf.ModelInfo.ContextLength(
|
|
195
|
+
max_input_tokens=65_536,
|
|
196
|
+
max_output_tokens=32_768,
|
|
197
|
+
),
|
|
198
|
+
rate_limits=lf.ModelInfo.RateLimits(
|
|
199
|
+
max_requests_per_minute=200,
|
|
200
|
+
max_tokens_per_minute=1_000_000,
|
|
201
|
+
),
|
|
202
|
+
),
|
|
203
|
+
# Gemini 3 Flash Preview
|
|
204
|
+
GeminiModelInfo(
|
|
205
|
+
model_id='gemini-3-flash-preview',
|
|
206
|
+
in_service=True,
|
|
207
|
+
provider=pg.oneof(['Google GenAI', 'VertexAI']),
|
|
208
|
+
model_type='instruction-tuned',
|
|
209
|
+
description=(
|
|
210
|
+
'Gemini 3 Flash Preview: High-efficiency, low-latency multimodal'
|
|
211
|
+
' model optimized for agentic workflows.'
|
|
212
|
+
),
|
|
213
|
+
release_date=datetime.datetime(2025, 12, 17),
|
|
214
|
+
input_modalities=GeminiModelInfo.ALL_SUPPORTED_INPUT_TYPES,
|
|
215
|
+
context_length=lf.ModelInfo.ContextLength(
|
|
216
|
+
max_input_tokens=1_048_576,
|
|
217
|
+
max_output_tokens=65_536,
|
|
218
|
+
),
|
|
219
|
+
pricing=GeminiModelInfo.Pricing(
|
|
220
|
+
cost_per_1m_cached_input_tokens=0.05,
|
|
221
|
+
cost_per_1m_input_tokens=0.50,
|
|
222
|
+
cost_per_1m_output_tokens=3.00,
|
|
223
|
+
),
|
|
224
|
+
rate_limits=lf.ModelInfo.RateLimits(
|
|
225
|
+
max_requests_per_minute=2_000,
|
|
226
|
+
max_tokens_per_minute=4_000_000,
|
|
227
|
+
),
|
|
228
|
+
),
|
|
180
229
|
# Gemini 2.5 Flash
|
|
181
230
|
GeminiModelInfo(
|
|
182
231
|
model_id='gemini-2.5-flash',
|
|
@@ -834,7 +883,10 @@ class Gemini(rest.REST):
|
|
|
834
883
|
config['thinkingConfig'] = thinking_config_data
|
|
835
884
|
|
|
836
885
|
# This is the new feature since Gemini 3.
|
|
837
|
-
|
|
886
|
+
# Skip for image generation models as they don't support mediaResolution.
|
|
887
|
+
if self.model_id.startswith('gemini-3') and not (
|
|
888
|
+
self.response_modalities and 'IMAGE' in self.response_modalities
|
|
889
|
+
):
|
|
838
890
|
config['mediaResolution'] = 'MEDIA_RESOLUTION_HIGH'
|
|
839
891
|
|
|
840
892
|
if self.response_modalities:
|
|
@@ -125,6 +125,30 @@ class Gemini3ProPreview(GenAI):
|
|
|
125
125
|
model = 'gemini-3-pro-preview'
|
|
126
126
|
|
|
127
127
|
|
|
128
|
+
class Gemini3ProImagePreview(GenAI):
|
|
129
|
+
"""Gemini 3 Pro Image Preview model for high-fidelity image generation.
|
|
130
|
+
|
|
131
|
+
This model supports:
|
|
132
|
+
- Text-to-image generation
|
|
133
|
+
- Image editing (multimodal input)
|
|
134
|
+
- Visual reasoning
|
|
135
|
+
|
|
136
|
+
Key Requirements:
|
|
137
|
+
- responseModalities must include 'IMAGE'
|
|
138
|
+
- Supported aspect ratios: 1:1, 16:9, 9:16, 4:3, 3:4
|
|
139
|
+
- Image sizes: 1K (default), 2K, 4K
|
|
140
|
+
"""
|
|
141
|
+
|
|
142
|
+
model = 'gemini-3-pro-image-preview'
|
|
143
|
+
response_modalities = ['TEXT', 'IMAGE']
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
class Gemini3FlashPreview(GenAI):
|
|
147
|
+
"""Gemini 3 Flash Preview model."""
|
|
148
|
+
|
|
149
|
+
model = 'gemini-3-flash-preview'
|
|
150
|
+
|
|
151
|
+
|
|
128
152
|
class Gemini25FlashImagePreview(GenAI):
|
|
129
153
|
"""Gemini 2.5 Flash Image Preview model."""
|
|
130
154
|
model = 'gemini-2.5-flash-image-preview'
|
langfun/core/llms/vertexai.py
CHANGED
|
@@ -220,6 +220,33 @@ class VertexAIGemini3ProPreview(VertexAIGemini): # pylint: disable=invalid-name
|
|
|
220
220
|
location = 'global'
|
|
221
221
|
|
|
222
222
|
|
|
223
|
+
class VertexAIGemini3ProImagePreview(VertexAIGemini): # pylint: disable=invalid-name
|
|
224
|
+
"""Gemini 3 Pro Image Preview model for high-fidelity image generation.
|
|
225
|
+
|
|
226
|
+
This model supports:
|
|
227
|
+
- Text-to-image generation
|
|
228
|
+
- Image editing (multimodal input)
|
|
229
|
+
- Visual reasoning
|
|
230
|
+
|
|
231
|
+
Key Requirements:
|
|
232
|
+
- Uses v1beta1 API endpoint
|
|
233
|
+
- responseModalities must include 'IMAGE'
|
|
234
|
+
- Supported aspect ratios: 1:1, 16:9, 9:16, 4:3, 3:4
|
|
235
|
+
- Image sizes: 1K (default), 2K, 4K
|
|
236
|
+
"""
|
|
237
|
+
|
|
238
|
+
model = 'gemini-3-pro-image-preview'
|
|
239
|
+
location = 'global'
|
|
240
|
+
response_modalities = ['TEXT', 'IMAGE']
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
class VertexAIGemini3FlashPreview(VertexAIGemini): # pylint: disable=invalid-name
|
|
244
|
+
"""Gemini 3 Flash Preview model launched on 12/17/2025."""
|
|
245
|
+
|
|
246
|
+
model = 'gemini-3-flash-preview'
|
|
247
|
+
location = 'global'
|
|
248
|
+
|
|
249
|
+
|
|
223
250
|
class VertexAIGemini25Pro(VertexAIGemini): # pylint: disable=invalid-name
|
|
224
251
|
"""Gemini 2.5 Pro GA model launched on 06/17/2025."""
|
|
225
252
|
|
|
@@ -419,6 +446,12 @@ class VertexAIClaude45Sonnet_20250929(VertexAIAnthropic):
|
|
|
419
446
|
model = 'claude-sonnet-4-5@20250929'
|
|
420
447
|
|
|
421
448
|
|
|
449
|
+
class VertexAIClaude45Opus_20251101(VertexAIAnthropic):
|
|
450
|
+
"""Anthropic's Claude 4.5 Opus model on VertexAI."""
|
|
451
|
+
|
|
452
|
+
model = 'claude-opus-4-5@20251101'
|
|
453
|
+
|
|
454
|
+
|
|
422
455
|
class VertexAIClaude4Opus_20250514(VertexAIAnthropic):
|
|
423
456
|
"""Anthropic's Claude 4 Opus model on VertexAI."""
|
|
424
457
|
model = 'claude-opus-4@20250514'
|
langfun/core/modalities/mime.py
CHANGED
|
@@ -135,7 +135,20 @@ class Mime(lf.Modality):
|
|
|
135
135
|
raise lf.ModalityError(
|
|
136
136
|
f'MIME type {self.mime_type!r} cannot be converted to text.'
|
|
137
137
|
)
|
|
138
|
-
|
|
138
|
+
content = self.to_bytes()
|
|
139
|
+
# Try UTF-8 first (most common encoding).
|
|
140
|
+
try:
|
|
141
|
+
return content.decode('utf-8')
|
|
142
|
+
except UnicodeDecodeError:
|
|
143
|
+
pass
|
|
144
|
+
# Check for UTF-16 BOM (0xff 0xfe or 0xfe 0xff).
|
|
145
|
+
if content[:2] in (b'\xff\xfe', b'\xfe\xff'):
|
|
146
|
+
try:
|
|
147
|
+
return content.decode('utf-16')
|
|
148
|
+
except UnicodeDecodeError:
|
|
149
|
+
pass
|
|
150
|
+
# Fallback: decode with error replacement to avoid crashing.
|
|
151
|
+
return content.decode('utf-8', errors='replace')
|
|
139
152
|
|
|
140
153
|
def is_compatible(
|
|
141
154
|
self, mime_types: str | Iterable[str]
|
|
@@ -163,5 +163,53 @@ class CustomMimeTest(unittest.TestCase):
|
|
|
163
163
|
)
|
|
164
164
|
|
|
165
165
|
|
|
166
|
+
class ToTextEncodingTest(unittest.TestCase):
|
|
167
|
+
"""Tests for to_text() encoding handling."""
|
|
168
|
+
|
|
169
|
+
def test_utf8_decoding(self):
|
|
170
|
+
"""Test that valid UTF-8 content is decoded correctly."""
|
|
171
|
+
content = mime.Custom('text/plain', b'Hello, World!')
|
|
172
|
+
self.assertEqual(content.to_text(), 'Hello, World!')
|
|
173
|
+
|
|
174
|
+
# UTF-8 with multi-byte characters.
|
|
175
|
+
utf8_content = 'こんにちは'.encode('utf-8')
|
|
176
|
+
content = mime.Custom('text/plain', utf8_content)
|
|
177
|
+
self.assertEqual(content.to_text(), 'こんにちは')
|
|
178
|
+
|
|
179
|
+
def test_utf16_le_bom_decoding(self):
|
|
180
|
+
"""Test that UTF-16 Little Endian with BOM is decoded correctly."""
|
|
181
|
+
# UTF-16 LE BOM: 0xff 0xfe
|
|
182
|
+
utf16_le_content = 'Hello'.encode('utf-16-le')
|
|
183
|
+
content_with_bom = b'\xff\xfe' + utf16_le_content
|
|
184
|
+
content = mime.Custom('text/plain', content_with_bom)
|
|
185
|
+
self.assertEqual(content.to_text(), 'Hello')
|
|
186
|
+
|
|
187
|
+
def test_utf16_be_bom_decoding(self):
|
|
188
|
+
"""Test that UTF-16 Big Endian with BOM is decoded correctly."""
|
|
189
|
+
# UTF-16 BE BOM: 0xfe 0xff
|
|
190
|
+
utf16_be_content = 'Hello'.encode('utf-16-be')
|
|
191
|
+
content_with_bom = b'\xfe\xff' + utf16_be_content
|
|
192
|
+
content = mime.Custom('text/plain', content_with_bom)
|
|
193
|
+
self.assertEqual(content.to_text(), 'Hello')
|
|
194
|
+
|
|
195
|
+
def test_invalid_bytes_fallback_with_replacement(self):
|
|
196
|
+
"""Test that invalid bytes are replaced with replacement character."""
|
|
197
|
+
# 0xff alone is invalid in UTF-8 and doesn't have UTF-16 BOM pattern.
|
|
198
|
+
invalid_content = b'\xff\xfdHello'
|
|
199
|
+
content = mime.Custom('text/plain', invalid_content)
|
|
200
|
+
result = content.to_text()
|
|
201
|
+
# Invalid bytes should be replaced with U+FFFD (replacement character).
|
|
202
|
+
self.assertIn('\ufffd', result)
|
|
203
|
+
self.assertIn('Hello', result)
|
|
204
|
+
|
|
205
|
+
def test_binary_mime_type_raises_error(self):
|
|
206
|
+
"""Test that binary MIME types raise ModalityError."""
|
|
207
|
+
content = mime.Custom('application/octet-stream', b'\x00\x01\x02')
|
|
208
|
+
with self.assertRaisesRegex(
|
|
209
|
+
lf.ModalityError, 'cannot be converted to text'
|
|
210
|
+
):
|
|
211
|
+
content.to_text()
|
|
212
|
+
|
|
213
|
+
|
|
166
214
|
if __name__ == '__main__':
|
|
167
215
|
unittest.main()
|
|
@@ -35,7 +35,7 @@ langfun/core/subscription_test.py,sha256=Y4ZdbZEwm83YNZBxHff0QR4QUa4rdaNXA3_jfIc
|
|
|
35
35
|
langfun/core/template.py,sha256=KIRLEhijPf5UP5auJKf9x6HKKW2E1Ki83Dcs9W8eKs8,29571
|
|
36
36
|
langfun/core/template_test.py,sha256=K7gx1CsDlau2CCvrE4BeKV26n0GyovhbsadWf8pDMek,20400
|
|
37
37
|
langfun/core/agentic/__init__.py,sha256=ajI1SGcQWXfBp2MFH13Fr9OkSN4slSKDlJSHPDp4P_c,1573
|
|
38
|
-
langfun/core/agentic/action.py,sha256=
|
|
38
|
+
langfun/core/agentic/action.py,sha256=R3KQNzyuv2ME7ciK7PP_mB_5HaKTyFGMLeaCIZRRn5A,72385
|
|
39
39
|
langfun/core/agentic/action_eval.py,sha256=Mjk5QBFInjIK3VlDR2RT__pugmtAW-iv-SMtH5GMcNo,5258
|
|
40
40
|
langfun/core/agentic/action_eval_test.py,sha256=7AkOwNbUX-ZgR1R0a7bvUZ5abNTUV7blf_8Mnrwb-II,2811
|
|
41
41
|
langfun/core/agentic/action_test.py,sha256=sKaU8IZ9whgBeV7oMB-0s8TdQ6PqDkI_eT9Yw3EZSYU,24123
|
|
@@ -68,42 +68,42 @@ langfun/core/eval/patching.py,sha256=wJqqML_z_hXQQ65f9oJpdtiNEkUvwWWdNgGiIcV1Jq4
|
|
|
68
68
|
langfun/core/eval/patching_test.py,sha256=8kCd54Egjju22FMgtJuxEsrXkW8ifs-UUBHtrCG1L6w,4775
|
|
69
69
|
langfun/core/eval/scoring.py,sha256=1C7e7gR8Wai7M9oBXRZifntxy5HEik5qjVo9gY8B7KI,6423
|
|
70
70
|
langfun/core/eval/scoring_test.py,sha256=UcBH0R6vAovZ0A4yM22s5cBHL1qVKASubrbu1t8dYBw,4529
|
|
71
|
-
langfun/core/eval/v2/__init__.py,sha256=
|
|
72
|
-
langfun/core/eval/v2/checkpointing.py,sha256=
|
|
71
|
+
langfun/core/eval/v2/__init__.py,sha256=XbkBqoyJBH_khtAS01gP6_V4KnWLY3bFJ7D0rtHa1BU,1878
|
|
72
|
+
langfun/core/eval/v2/checkpointing.py,sha256=_WJydQn43LDHTpjyt7qIh5XQl1--Jvq03lrwktcgy6U,15526
|
|
73
73
|
langfun/core/eval/v2/checkpointing_test.py,sha256=s_E94dOPNO1zYzXyQI37wvCF3suez-r4Nls9popN58w,9787
|
|
74
74
|
langfun/core/eval/v2/config_saver.py,sha256=nsuG0pqTikIlsL-Mij6swteUBif-zxJUdGxTHZsOVeQ,1205
|
|
75
75
|
langfun/core/eval/v2/config_saver_test.py,sha256=OD0zl26YHjNibFD67YxwrZ7-zT9V7p-3zLDItWBAgic,1261
|
|
76
76
|
langfun/core/eval/v2/eval_test_helper.py,sha256=baew3-cqomy1p7mF1_Xw7AvEWUwCimi3J7-8Ay3eEPo,6539
|
|
77
|
-
langfun/core/eval/v2/evaluation.py,sha256=
|
|
77
|
+
langfun/core/eval/v2/evaluation.py,sha256=1T0lxTu9gy329Mq4ii16ktARbtvbBGY9IUtsUIkNXeY,30740
|
|
78
78
|
langfun/core/eval/v2/evaluation_test.py,sha256=gurFzSfPECZ_FMQOnf3bzKOHmQ7C4IUxEfbyZy50bjM,7966
|
|
79
79
|
langfun/core/eval/v2/example.py,sha256=VZeBqMWnfEtn1mmdPW2w2u2XbAWVll1q1-50qL8DjS8,11606
|
|
80
80
|
langfun/core/eval/v2/example_test.py,sha256=RwtBcUumPBWynA8BLMoZetSHdgvFywlHXuyvInf1y_s,3576
|
|
81
|
-
langfun/core/eval/v2/experiment.py,sha256=
|
|
81
|
+
langfun/core/eval/v2/experiment.py,sha256=_tmahrRoFxSf8oPzsS-VcxqbCT2KvYafUoQuBP4yQ4s,37256
|
|
82
82
|
langfun/core/eval/v2/experiment_test.py,sha256=7prE4ASKlbwQIXiLzEqjgaF4yQDL7KjxX-dBUPT84VA,14145
|
|
83
83
|
langfun/core/eval/v2/metric_values.py,sha256=WAL1BdHaU_oq7d_k1KyjhiQDK32dNLSyn1L2yEkz0o4,6040
|
|
84
84
|
langfun/core/eval/v2/metric_values_test.py,sha256=5ffwnqrbLIBh1hdUl3L9mpJlUvsmd2VQ8UWPOJcQj4s,3630
|
|
85
85
|
langfun/core/eval/v2/metrics.py,sha256=cdFqrhRlxqpBk_04Mmhk21NcOD0kor5H0iFX54_rO4s,14486
|
|
86
86
|
langfun/core/eval/v2/metrics_test.py,sha256=gf8hT5V5OeM-Ah-Wa4aLtgrYZmlMStKPjEhCTS0VMHQ,6812
|
|
87
|
-
langfun/core/eval/v2/progress.py,sha256
|
|
87
|
+
langfun/core/eval/v2/progress.py,sha256=-kYzdiVbw56eb8mc0yCS_2d5aIUdZ1oVi1QWBuhFW74,11764
|
|
88
88
|
langfun/core/eval/v2/progress_test.py,sha256=MzJ7wa65XYZ0chArA-lSg1eRSvQ_TzZJIHMk85Kwz7o,3208
|
|
89
89
|
langfun/core/eval/v2/progress_tracking.py,sha256=yMYlOMJF8M4FUhyjGRkM6O6TXiMwKPsEn3wbpftxcss,6376
|
|
90
90
|
langfun/core/eval/v2/progress_tracking_test.py,sha256=37v42y4kh2GfDXBrkugEupW6IRAzA774wwPJaOyefUs,2597
|
|
91
|
-
langfun/core/eval/v2/reporting.py,sha256
|
|
91
|
+
langfun/core/eval/v2/reporting.py,sha256=U6ToanG_y-zFZ1W1CRaju6T_zLlbmknLR148nIvl8mw,8949
|
|
92
92
|
langfun/core/eval/v2/reporting_test.py,sha256=q3LBfPk7jvEWXB3sdk2CycbMKqNRyXhs5z6BokfwDIE,6096
|
|
93
93
|
langfun/core/eval/v2/runners/__init__.py,sha256=2TcCLW32OsmXQINcVKa2ZJY8Ca7j3NnT0yy9hXYUDn8,1115
|
|
94
|
-
langfun/core/eval/v2/runners/base.py,sha256=
|
|
94
|
+
langfun/core/eval/v2/runners/base.py,sha256=8KZlkSuuzlSxeoQfsG32OM6E2WUbtgNuz9p5MyfVFQY,14146
|
|
95
95
|
langfun/core/eval/v2/runners/beam.py,sha256=LQK9bZCFJR9j9DJ-mAudhphumItGwXc5bbGwadl9kxY,11782
|
|
96
96
|
langfun/core/eval/v2/runners/beam_test.py,sha256=cI5WaQQObnRrPnGjED3OFT3JXYOE3thQ640H08TG_dw,5306
|
|
97
|
-
langfun/core/eval/v2/runners/ckpt_monitor.py,sha256=
|
|
98
|
-
langfun/core/eval/v2/runners/ckpt_monitor_test.py,sha256=
|
|
97
|
+
langfun/core/eval/v2/runners/ckpt_monitor.py,sha256=KaaDYvHNOewUrJqJ4FHjdMeS7okpX7FYdjCx558joPU,12071
|
|
98
|
+
langfun/core/eval/v2/runners/ckpt_monitor_test.py,sha256=Xqd30PF0XIOrqBSZ53_7ozxYR3Wc3SiIaKuwwj1AXQ8,7176
|
|
99
99
|
langfun/core/eval/v2/runners/debug.py,sha256=ExsBcAvmhFsaaS3VLjxE70HImHe2YVs0IpoefM01onY,1442
|
|
100
100
|
langfun/core/eval/v2/runners/debug_test.py,sha256=kDWs4Fu7itzBxbRwFc-UKEP2hAV0iVFp2wWkEuZNEcg,2577
|
|
101
101
|
langfun/core/eval/v2/runners/parallel.py,sha256=PSdOY3i2ot94TWVCZY0iJSWFAT0CCxa1wxk7KpI_GfI,7794
|
|
102
102
|
langfun/core/eval/v2/runners/parallel_test.py,sha256=8M8OTpsDd-wQYZRRSPCYGkwjt7gUvkgze8NMCTKydUw,6146
|
|
103
103
|
langfun/core/eval/v2/runners/sequential.py,sha256=hebMZd6EVraY9zAwariT9WfsWQyX5AYuRsFdRo-knKU,1631
|
|
104
104
|
langfun/core/eval/v2/runners/sequential_test.py,sha256=apbNC0-Pi6r17_OQlHqqOZM0OVo1mZlaPk2B4vUteRg,6064
|
|
105
|
-
langfun/core/llms/__init__.py,sha256=
|
|
106
|
-
langfun/core/llms/anthropic.py,sha256=
|
|
105
|
+
langfun/core/llms/__init__.py,sha256=BPPV18zMOvLuAhStVVbef13Pe59CsEWGo9YvRAyeFAo,10750
|
|
106
|
+
langfun/core/llms/anthropic.py,sha256=HwgzvMkVdQwmwfDtI_X6lxW0gBxsbYAdMKes8u8Jv_Y,33224
|
|
107
107
|
langfun/core/llms/anthropic_test.py,sha256=qA9vByp_cwwXNlXzcwHpPWFnO9lfFo8NKfDi5nBNqgI,9052
|
|
108
108
|
langfun/core/llms/azure_openai.py,sha256=LEc7-ay2fOOCwwL3SfxDr3KCdH8-2i1EtD-PBvr4kfk,2777
|
|
109
109
|
langfun/core/llms/azure_openai_test.py,sha256=lkMZkQdJBV97fTM4C4z8qNfvr6spgiN5G4hvVUIVr0M,1735
|
|
@@ -113,9 +113,9 @@ langfun/core/llms/deepseek.py,sha256=jQsotTUk4161EJIcoQOV7iOWBZfQ3Ukh9GOh31A0HYU
|
|
|
113
113
|
langfun/core/llms/deepseek_test.py,sha256=DvROWPlDuow5E1lfoSkhyGt_ELA19JoQoDsTnRgDtTg,1847
|
|
114
114
|
langfun/core/llms/fake.py,sha256=NH8Zlezmx3eacao4D7wihrZjRuyBJuHR5rdyp94PrAw,4409
|
|
115
115
|
langfun/core/llms/fake_test.py,sha256=lC-C2TpEsnf2kmZpa3OiH2H944I4hMWTAaHEXzRj1DU,7855
|
|
116
|
-
langfun/core/llms/gemini.py,sha256=
|
|
116
|
+
langfun/core/llms/gemini.py,sha256=6ys2nm9WnhyO27YY3c6AI7reOs-rMk7yPBBh9EC07-s,34225
|
|
117
117
|
langfun/core/llms/gemini_test.py,sha256=bv-Ulv3vjGhxd8nJD_UDhWDMK3K3TM7b5powBcYrv1c,10844
|
|
118
|
-
langfun/core/llms/google_genai.py,sha256=
|
|
118
|
+
langfun/core/llms/google_genai.py,sha256=YObrVUhHJcGCiUlL4LuoGyjEiyf_WM2y4iBxQIV0GHY,7096
|
|
119
119
|
langfun/core/llms/google_genai_test.py,sha256=NKNtpebArQ9ZR7Qsnhd2prFIpMjleojy6o6VMXkJ1zY,1502
|
|
120
120
|
langfun/core/llms/groq.py,sha256=O-kv2_R_IkC8wGIT086xin8jYi7QnsakPCGVLR58lMw,12517
|
|
121
121
|
langfun/core/llms/groq_test.py,sha256=P4EgexCqsh4K2x11w0UL_vz-YYNaPdQU0WsDAdnTRQ8,2045
|
|
@@ -127,7 +127,7 @@ langfun/core/llms/openai_compatible_test.py,sha256=8yr_jGmHCDyMwp-VcJwThFgh7B_56
|
|
|
127
127
|
langfun/core/llms/openai_test.py,sha256=1o5rxiHZj-UEgugWN8JmfJtznhUmDywy6dU3Euax-Ts,2639
|
|
128
128
|
langfun/core/llms/rest.py,sha256=eR-M1st5ZnzuitICyYfxSRcmQWmy_eeOoe2bHLalzN0,5351
|
|
129
129
|
langfun/core/llms/rest_test.py,sha256=_zM7nV8DEVyoXNiQOnuwJ917mWjki0614H88rNmDboE,5020
|
|
130
|
-
langfun/core/llms/vertexai.py,sha256=
|
|
130
|
+
langfun/core/llms/vertexai.py,sha256=zQO-zi1HVLwLJU9r5_hmCvyfxdox7FKT_43XjILRwvA,22489
|
|
131
131
|
langfun/core/llms/vertexai_test.py,sha256=_e-acnNBAf9C3WO6i1b2J_mhRzdDdYQTorD9hIVZKOg,5034
|
|
132
132
|
langfun/core/llms/cache/__init__.py,sha256=QAo3InUMDM_YpteNnVCSejI4zOsnjSMWKJKzkb3VY64,993
|
|
133
133
|
langfun/core/llms/cache/base.py,sha256=qLGlEMi5cfsDxRTsOWrmwbxjvvwUaq4Y8MxlXr69wpw,5060
|
|
@@ -150,8 +150,8 @@ langfun/core/modalities/audio.py,sha256=cb95FzDE-IIQf7kXy7D4AAXtziQF0FYkZUe4pw5E
|
|
|
150
150
|
langfun/core/modalities/audio_test.py,sha256=tW1vEy-Cumhf-HgDgCxlSNZqgJb2HTgqOixGWLiwOmw,2065
|
|
151
151
|
langfun/core/modalities/image.py,sha256=SS_FSrYSjb1uL0IAVyUu2TZX2-FcI_x9ZTB8im5Amzk,4030
|
|
152
152
|
langfun/core/modalities/image_test.py,sha256=91LpEOvr_v6SGdtzSuCw3ks62L8vxJVIZwgAKxk7UmY,8476
|
|
153
|
-
langfun/core/modalities/mime.py,sha256=
|
|
154
|
-
langfun/core/modalities/mime_test.py,sha256=
|
|
153
|
+
langfun/core/modalities/mime.py,sha256=9YK-uRGYN6YG3ux7zSYl5XGZEDLBiXyTax1cLMhissY,11255
|
|
154
|
+
langfun/core/modalities/mime_test.py,sha256=e6p-XW47yNXbvPS2R4-0afZd84bOqCc6DnIzqZCkPZk,8391
|
|
155
155
|
langfun/core/modalities/pdf.py,sha256=rc-uIKRVkTTa0j7jC6WRwKM9WqiS5NxF-H6PPunVeXM,1231
|
|
156
156
|
langfun/core/modalities/pdf_test.py,sha256=ulZ0FbnlsU0wkrdckJ4ONZPTYRyMPO9Aob1UO6FXygk,1950
|
|
157
157
|
langfun/core/modalities/video.py,sha256=ZopyDf-8bi0V-QZDAg-_8S3HkMNiEQL9aWmGuI6Fkrs,1506
|
|
@@ -210,8 +210,8 @@ langfun/env/event_handlers/event_logger.py,sha256=ga8RN8qjwtAOCnV_MnhNPTktN8EJ-x
|
|
|
210
210
|
langfun/env/event_handlers/event_logger_test.py,sha256=qSAcirtRz00H-1RL9ShELBiZKiPxsk_v6cVA6XdAk4k,9274
|
|
211
211
|
langfun/env/event_handlers/metric_writer.py,sha256=7ZrUp0rYvs7TfNpQ16Xbxg8vp-6ZbjuJ-qrhVSbhv2I,21085
|
|
212
212
|
langfun/env/event_handlers/metric_writer_test.py,sha256=bjdYXoXMPWpWz_-HUPM6vFP1ez5G386u0fmPfe-SR_M,5952
|
|
213
|
-
langfun-0.1.2.
|
|
214
|
-
langfun-0.1.2.
|
|
215
|
-
langfun-0.1.2.
|
|
216
|
-
langfun-0.1.2.
|
|
217
|
-
langfun-0.1.2.
|
|
213
|
+
langfun-0.1.2.dev202601030804.dist-info/licenses/LICENSE,sha256=WNHhf_5RCaeuKWyq_K39vmp9F28LxKsB4SpomwSZ2L0,11357
|
|
214
|
+
langfun-0.1.2.dev202601030804.dist-info/METADATA,sha256=FRXTPzGp4Bi8NFAGVHUUWOaEuO-5nUIy-X2y1CH_bds,7522
|
|
215
|
+
langfun-0.1.2.dev202601030804.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
216
|
+
langfun-0.1.2.dev202601030804.dist-info/top_level.txt,sha256=RhlEkHxs1qtzmmtWSwYoLVJAc1YrbPtxQ52uh8Z9VvY,8
|
|
217
|
+
langfun-0.1.2.dev202601030804.dist-info/RECORD,,
|
|
File without changes
|
{langfun-0.1.2.dev202512040805.dist-info → langfun-0.1.2.dev202601030804.dist-info}/licenses/LICENSE
RENAMED
|
File without changes
|
{langfun-0.1.2.dev202512040805.dist-info → langfun-0.1.2.dev202601030804.dist-info}/top_level.txt
RENAMED
|
File without changes
|