langfun 0.1.2.dev202512040805__py3-none-any.whl → 0.1.2.dev202512150805__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langfun/core/eval/v2/__init__.py +1 -0
- langfun/core/eval/v2/checkpointing.py +73 -40
- langfun/core/eval/v2/evaluation.py +10 -2
- langfun/core/eval/v2/experiment.py +11 -5
- langfun/core/eval/v2/reporting.py +2 -4
- langfun/core/eval/v2/runners/base.py +1 -0
- langfun/core/eval/v2/runners/ckpt_monitor.py +61 -5
- langfun/core/eval/v2/runners/ckpt_monitor_test.py +51 -0
- langfun/core/llms/__init__.py +2 -0
- langfun/core/llms/gemini.py +27 -1
- langfun/core/llms/google_genai.py +18 -0
- langfun/core/llms/vertexai.py +20 -0
- langfun/core/modalities/mime.py +14 -1
- langfun/core/modalities/mime_test.py +48 -0
- {langfun-0.1.2.dev202512040805.dist-info → langfun-0.1.2.dev202512150805.dist-info}/METADATA +1 -1
- {langfun-0.1.2.dev202512040805.dist-info → langfun-0.1.2.dev202512150805.dist-info}/RECORD +19 -19
- {langfun-0.1.2.dev202512040805.dist-info → langfun-0.1.2.dev202512150805.dist-info}/WHEEL +0 -0
- {langfun-0.1.2.dev202512040805.dist-info → langfun-0.1.2.dev202512150805.dist-info}/licenses/LICENSE +0 -0
- {langfun-0.1.2.dev202512040805.dist-info → langfun-0.1.2.dev202512150805.dist-info}/top_level.txt +0 -0
langfun/core/eval/v2/__init__.py
CHANGED
|
@@ -41,6 +41,7 @@ from langfun.core.eval.v2.checkpointing import PerExampleCheckpointer
|
|
|
41
41
|
from langfun.core.eval.v2.reporting import HtmlReporter
|
|
42
42
|
from langfun.core.eval.v2.reporting import ExampleHtmlGenerator
|
|
43
43
|
|
|
44
|
+
# Google-internal imports.
|
|
44
45
|
|
|
45
46
|
# pylint: enable=g-bad-import-order
|
|
46
47
|
# pylint: enable=g-importing-member
|
|
@@ -38,7 +38,7 @@ class Checkpointer(experiment_lib.Plugin):
|
|
|
38
38
|
later. When an experiment starts, the checkpointer loads any previously saved
|
|
39
39
|
examples from an earlier run (or a warm-start run) into `experiment.state`,
|
|
40
40
|
so the runner can skip processing them again.
|
|
41
|
-
Subclasses should implement `
|
|
41
|
+
Subclasses should implement `_list_checkpoint_files` to identify
|
|
42
42
|
checkpoint files to load, and `_save_example` to save a newly processed
|
|
43
43
|
example.
|
|
44
44
|
"""
|
|
@@ -131,7 +131,7 @@ class Checkpointer(experiment_lib.Plugin):
|
|
|
131
131
|
experiment: Experiment,
|
|
132
132
|
) -> None:
|
|
133
133
|
"""Creates the checkpoint file."""
|
|
134
|
-
ckpt_files = self.
|
|
134
|
+
ckpt_files = self._list_checkpoint_files(runner, experiment)
|
|
135
135
|
experiment.info(f'Found {len(ckpt_files)} checkpoint files to load.')
|
|
136
136
|
|
|
137
137
|
# Load the checkpoint files in parallel.
|
|
@@ -141,18 +141,18 @@ class Checkpointer(experiment_lib.Plugin):
|
|
|
141
141
|
experiment
|
|
142
142
|
)
|
|
143
143
|
context = dict(counter=0, counter_lock=threading.Lock())
|
|
144
|
-
copy_ckpt = current_run.input_root != current_run.output_root
|
|
145
144
|
|
|
146
145
|
def _load_state(ckpt_file):
|
|
147
146
|
error = None
|
|
148
147
|
with pg.timeit() as t:
|
|
149
148
|
try:
|
|
150
|
-
experiment.load_state(
|
|
151
|
-
|
|
149
|
+
loaded_examples = experiment.load_state(
|
|
150
|
+
ckpt_file,
|
|
152
151
|
filter=lambda x: x.id in examples_to_load,
|
|
153
152
|
load_example_metadata=lambda x: x.id in examples_to_load_metadata,
|
|
154
153
|
)
|
|
155
154
|
except BaseException as e: # pylint: disable=broad-except
|
|
155
|
+
loaded_examples = []
|
|
156
156
|
error = e
|
|
157
157
|
finally:
|
|
158
158
|
with context['counter_lock']:
|
|
@@ -170,22 +170,18 @@ class Checkpointer(experiment_lib.Plugin):
|
|
|
170
170
|
f'Skipping the file. ({progress_str})'
|
|
171
171
|
)
|
|
172
172
|
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
except BaseException as e: # pylint: disable=broad-except
|
|
186
|
-
experiment.warning(
|
|
187
|
-
f'Failed to copy checkpoint {ckpt_file!r}: {e}.'
|
|
188
|
-
)
|
|
173
|
+
output_ckpt_file = current_run.output_path_for(
|
|
174
|
+
experiment, os.path.basename(ckpt_file)
|
|
175
|
+
)
|
|
176
|
+
if ckpt_file != output_ckpt_file and any(
|
|
177
|
+
e for e in loaded_examples if not e.has_error
|
|
178
|
+
):
|
|
179
|
+
# Write the error-free warm-start examples to the output checkpoint
|
|
180
|
+
# file.
|
|
181
|
+
with SequenceWriter(output_ckpt_file) as writer:
|
|
182
|
+
for example in loaded_examples:
|
|
183
|
+
if not example.has_error:
|
|
184
|
+
writer.add(example)
|
|
189
185
|
|
|
190
186
|
_ = list(
|
|
191
187
|
lf.concurrent_map(
|
|
@@ -197,10 +193,10 @@ class Checkpointer(experiment_lib.Plugin):
|
|
|
197
193
|
)
|
|
198
194
|
|
|
199
195
|
@abc.abstractmethod
|
|
200
|
-
def
|
|
196
|
+
def _list_checkpoint_files(
|
|
201
197
|
self, runner: Runner, experiment: Experiment
|
|
202
198
|
) -> list[str]:
|
|
203
|
-
"""Lists the checkpoint
|
|
199
|
+
"""Lists the checkpoint file paths to restore."""
|
|
204
200
|
|
|
205
201
|
@abc.abstractmethod
|
|
206
202
|
def _save_example(
|
|
@@ -226,22 +222,41 @@ class PerExampleCheckpointer(Checkpointer):
|
|
|
226
222
|
self._checkpoint_file_prefix = prefix
|
|
227
223
|
self._checkpoint_file_ext = ext
|
|
228
224
|
|
|
229
|
-
def
|
|
225
|
+
def _list_checkpoint_files(
|
|
230
226
|
self, runner: Runner, experiment: Experiment
|
|
231
227
|
) -> list[str]:
|
|
232
|
-
|
|
233
|
-
|
|
228
|
+
|
|
229
|
+
def _list_checkpoints_from(ckpt_dir: str, examples_to_load: set[int]):
|
|
230
|
+
ckpt_files = []
|
|
231
|
+
if pg.io.path_exists(ckpt_dir):
|
|
232
|
+
regex = re.compile(
|
|
233
|
+
f'{self._checkpoint_file_prefix}_(\\d+){self._checkpoint_file_ext}'
|
|
234
|
+
.replace('.', '\\.')
|
|
235
|
+
)
|
|
236
|
+
for filename in pg.io.listdir(ckpt_dir):
|
|
237
|
+
match = regex.match(filename)
|
|
238
|
+
if match and int(match.group(1)) in examples_to_load:
|
|
239
|
+
examples_to_load.remove(int(match.group(1)))
|
|
240
|
+
ckpt_files.append(os.path.join(ckpt_dir, filename))
|
|
241
|
+
return ckpt_files
|
|
242
|
+
|
|
234
243
|
examples_to_load = runner.current_run.examples_to_load(experiment)
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
244
|
+
|
|
245
|
+
# Take output directory as the first priority to checkpoints processed in
|
|
246
|
+
# this run.
|
|
247
|
+
ckpt_files = _list_checkpoints_from(
|
|
248
|
+
runner.current_run.output_dir(experiment), examples_to_load
|
|
249
|
+
)
|
|
250
|
+
# If the input and output directories are different, also load from the
|
|
251
|
+
# input directory.
|
|
252
|
+
if (examples_to_load
|
|
253
|
+
and runner.current_run.input_root != runner.current_run.output_root):
|
|
254
|
+
ckpt_files.extend(
|
|
255
|
+
_list_checkpoints_from(
|
|
256
|
+
runner.current_run.input_dir(experiment), examples_to_load
|
|
257
|
+
)
|
|
239
258
|
)
|
|
240
|
-
|
|
241
|
-
match = regex.match(filename)
|
|
242
|
-
if match and int(match.group(1)) in examples_to_load:
|
|
243
|
-
filenames.append(filename)
|
|
244
|
-
return filenames
|
|
259
|
+
return ckpt_files
|
|
245
260
|
|
|
246
261
|
def _save_example(
|
|
247
262
|
self,
|
|
@@ -341,13 +356,24 @@ class BulkCheckpointer(Checkpointer):
|
|
|
341
356
|
if self._sequence_writer is not None:
|
|
342
357
|
self._sequence_writer[experiment.id] = sequence_writer
|
|
343
358
|
|
|
344
|
-
def
|
|
359
|
+
def _list_checkpoint_files(
|
|
345
360
|
self, runner: Runner, experiment: Experiment
|
|
346
361
|
) -> list[str]:
|
|
347
|
-
if
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
362
|
+
# Always honor the output directory if it's present, as it contains both
|
|
363
|
+
# the warm-started examples and newly processed examples.
|
|
364
|
+
output_ckpt_file = runner.current_run.output_path_for(
|
|
365
|
+
experiment, self.checkpoint_filename
|
|
366
|
+
)
|
|
367
|
+
if pg.io.path_exists(output_ckpt_file):
|
|
368
|
+
return [output_ckpt_file]
|
|
369
|
+
|
|
370
|
+
if runner.current_run.input_root != runner.current_run.output_root:
|
|
371
|
+
input_ckpt_file = runner.current_run.input_path_for(
|
|
372
|
+
experiment, self.checkpoint_filename
|
|
373
|
+
)
|
|
374
|
+
if pg.io.path_exists(input_ckpt_file):
|
|
375
|
+
return [input_ckpt_file]
|
|
376
|
+
print('CCC', experiment.hash, [])
|
|
351
377
|
return []
|
|
352
378
|
|
|
353
379
|
def on_experiment_complete(
|
|
@@ -441,5 +467,12 @@ class SequenceWriter:
|
|
|
441
467
|
self._sequence_writer = None
|
|
442
468
|
pg.io.rename(self._tmp_path, self._path)
|
|
443
469
|
|
|
470
|
+
def __enter__(self):
|
|
471
|
+
return self
|
|
472
|
+
|
|
473
|
+
def __exit__(self, *args, **kwargs):
|
|
474
|
+
del args, kwargs
|
|
475
|
+
self.close()
|
|
476
|
+
|
|
444
477
|
def __del__(self):
|
|
445
478
|
self.close()
|
|
@@ -114,6 +114,13 @@ class Evaluation(experiment_lib.Experiment):
|
|
|
114
114
|
self._log_entries = []
|
|
115
115
|
self._log_lock = threading.Lock()
|
|
116
116
|
|
|
117
|
+
def _identity(self) -> str:
|
|
118
|
+
"""Returns the definition of the evaluation."""
|
|
119
|
+
return self.format(
|
|
120
|
+
compact=True, hide_default_values=True, use_inferred=True,
|
|
121
|
+
exclude_keys=('plugins', 'progress', 'usage_summary')
|
|
122
|
+
)
|
|
123
|
+
|
|
117
124
|
#
|
|
118
125
|
# Handling evaluation hierarchy (materialized vs. hyper evaluations).
|
|
119
126
|
#
|
|
@@ -379,10 +386,10 @@ class Evaluation(experiment_lib.Experiment):
|
|
|
379
386
|
load_example_metadata: bool = True,
|
|
380
387
|
filter: Callable[[example_lib.Example], bool] | None = None, # pylint: disable=redefined-builtin
|
|
381
388
|
raise_if_not_exist: bool = False
|
|
382
|
-
) ->
|
|
389
|
+
) -> list[example_lib.Example]:
|
|
383
390
|
"""Loads saved state from a sequence IO file."""
|
|
384
391
|
if pg.io.path_exists(state_file):
|
|
385
|
-
self._state.load(
|
|
392
|
+
return self._state.load(
|
|
386
393
|
state_file,
|
|
387
394
|
example_input_by_id=self.example_input_by_id,
|
|
388
395
|
load_example_metadata=load_example_metadata,
|
|
@@ -390,6 +397,7 @@ class Evaluation(experiment_lib.Experiment):
|
|
|
390
397
|
)
|
|
391
398
|
elif raise_if_not_exist:
|
|
392
399
|
raise ValueError(f'State file {state_file} does not exist.')
|
|
400
|
+
return []
|
|
393
401
|
|
|
394
402
|
def _reset(self) -> None:
|
|
395
403
|
"""Resets the state of the evaluation."""
|
|
@@ -268,11 +268,11 @@ class Experiment(lf.Component, pg.views.HtmlTreeView.Extension):
|
|
|
268
268
|
@functools.cached_property
|
|
269
269
|
def hash(self) -> str:
|
|
270
270
|
"""A 8-byte MD5 hash computed from experiment identity."""
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
271
|
+
return hashlib.md5(self._identity().encode()).hexdigest()[:8]
|
|
272
|
+
|
|
273
|
+
@abc.abstractmethod
|
|
274
|
+
def _identity(self) -> str:
|
|
275
|
+
"""Returns the identity of the experiment."""
|
|
276
276
|
|
|
277
277
|
@classmethod
|
|
278
278
|
def link(cls, path: str) -> str:
|
|
@@ -691,6 +691,12 @@ class Suite(Experiment):
|
|
|
691
691
|
"""Returns whether the task is a leaf."""
|
|
692
692
|
return False
|
|
693
693
|
|
|
694
|
+
def _identity(self) -> str:
|
|
695
|
+
"""Returns the definition of the evaluation."""
|
|
696
|
+
return '[' + ', '.join(
|
|
697
|
+
[child._identity() for child in self.children] # pylint: disable=protected-access
|
|
698
|
+
) + ']'
|
|
699
|
+
|
|
694
700
|
|
|
695
701
|
class RunId(pg.Object):
|
|
696
702
|
"""Structured repreesentation a experiment run ID."""
|
|
@@ -86,10 +86,8 @@ class ExampleHtmlGenerator(experiment_lib.Plugin):
|
|
|
86
86
|
return
|
|
87
87
|
|
|
88
88
|
try:
|
|
89
|
-
with pg.timeit() as t
|
|
90
|
-
|
|
91
|
-
with pg.io.open(dest_file, 'w') as dest:
|
|
92
|
-
dest.write(content)
|
|
89
|
+
with pg.timeit() as t:
|
|
90
|
+
pg.io.copy(src_file, dest_file)
|
|
93
91
|
experiment.info(
|
|
94
92
|
f'\'{example.id}.html\' copied in {t.elapse:.2f} seconds.'
|
|
95
93
|
)
|
|
@@ -139,6 +139,7 @@ class RunnerBase(Runner):
|
|
|
139
139
|
self.current_run.examples_to_evaluate(experiment)
|
|
140
140
|
)
|
|
141
141
|
experiment.progress.start(total=num_examples_to_evaluate)
|
|
142
|
+
pg.io.mkdirs(self.current_run.output_dir(experiment))
|
|
142
143
|
else:
|
|
143
144
|
experiment.progress.start(total=len(experiment.leaf_nodes))
|
|
144
145
|
|
|
@@ -60,6 +60,19 @@ class CheckpointMonitor(base.RunnerBase):
|
|
|
60
60
|
'The maximum number of threads to aggregate checkpoints.'
|
|
61
61
|
] = 128
|
|
62
62
|
|
|
63
|
+
bypass_old_ckpt_files_with_non_oop_errors: Annotated[
|
|
64
|
+
bool,
|
|
65
|
+
'If True, ignore old checkpoint files with non-oop errors.'
|
|
66
|
+
] = True
|
|
67
|
+
|
|
68
|
+
ckpt_start_time: Annotated[
|
|
69
|
+
float | None,
|
|
70
|
+
(
|
|
71
|
+
'The timestamp to treat checkpoint files modified before this '
|
|
72
|
+
'time as old.'
|
|
73
|
+
)
|
|
74
|
+
] = None
|
|
75
|
+
|
|
63
76
|
@dataclasses.dataclass
|
|
64
77
|
class _AggregationEntry:
|
|
65
78
|
evaluation: evaluation_lib.Evaluation
|
|
@@ -78,6 +91,9 @@ class CheckpointMonitor(base.RunnerBase):
|
|
|
78
91
|
self._aggregation_entries = []
|
|
79
92
|
self._aggregator_pool = None
|
|
80
93
|
self._error = None
|
|
94
|
+
if self.ckpt_start_time is None:
|
|
95
|
+
self.rebind(ckpt_start_time=time.time(), skip_notification=True)
|
|
96
|
+
self._ckpt_bypass_timestamp: dict[str, int] = {}
|
|
81
97
|
|
|
82
98
|
def start(self):
|
|
83
99
|
# Reset the experiment state before getting started.
|
|
@@ -165,6 +181,14 @@ class CheckpointMonitor(base.RunnerBase):
|
|
|
165
181
|
os.path.basename(filepath).split('.')[0].split('_')[-1]
|
|
166
182
|
)
|
|
167
183
|
if example_id in entry.example_ids_to_be_aggregated:
|
|
184
|
+
last_modified_time = pg.io.getmtime(filepath)
|
|
185
|
+
bypass_timestamp = self._ckpt_bypass_timestamp.get(filepath)
|
|
186
|
+
if (
|
|
187
|
+
bypass_timestamp is not None
|
|
188
|
+
and last_modified_time <= bypass_timestamp
|
|
189
|
+
):
|
|
190
|
+
continue
|
|
191
|
+
|
|
168
192
|
# Remove example ID from the set to avoid duplicate processing.
|
|
169
193
|
entry.example_ids_to_be_aggregated.remove(example_id)
|
|
170
194
|
entry.example_ids_being_aggregated.add(example_id)
|
|
@@ -177,7 +201,7 @@ class CheckpointMonitor(base.RunnerBase):
|
|
|
177
201
|
entry.example_ids_inprogress.add(example_id)
|
|
178
202
|
|
|
179
203
|
self._aggregator_pool.submit(
|
|
180
|
-
self._aggregate, entry, filepath, example_id
|
|
204
|
+
self._aggregate, entry, filepath, example_id, last_modified_time
|
|
181
205
|
)
|
|
182
206
|
pg.logging.info(
|
|
183
207
|
'[%s] Aggregating example %d from %s...',
|
|
@@ -196,7 +220,8 @@ class CheckpointMonitor(base.RunnerBase):
|
|
|
196
220
|
self,
|
|
197
221
|
entry: _AggregationEntry,
|
|
198
222
|
ckpt_filepath: str,
|
|
199
|
-
example_id: int
|
|
223
|
+
example_id: int,
|
|
224
|
+
last_modified_time: float,
|
|
200
225
|
):
|
|
201
226
|
"""Aggregate an example from a checkpoint file."""
|
|
202
227
|
try:
|
|
@@ -212,6 +237,25 @@ class CheckpointMonitor(base.RunnerBase):
|
|
|
212
237
|
# example processed multiple times. We only need to aggregate the last
|
|
213
238
|
# example.
|
|
214
239
|
example = loaded_examples[-1]
|
|
240
|
+
if (
|
|
241
|
+
self.bypass_old_ckpt_files_with_non_oop_errors
|
|
242
|
+
and last_modified_time < self.ckpt_start_time
|
|
243
|
+
and example.error is not None
|
|
244
|
+
and not example.error.tag.startswith('MappingError')
|
|
245
|
+
):
|
|
246
|
+
entry.example_ids_being_aggregated.remove(example_id)
|
|
247
|
+
entry.example_ids_to_be_aggregated.add(example_id)
|
|
248
|
+
self._ckpt_bypass_timestamp[ckpt_filepath] = last_modified_time
|
|
249
|
+
pg.logging.info(
|
|
250
|
+
'[%s] Bypassing old checkpoint file with non-oop errors (%s) '
|
|
251
|
+
'for example %d, last_modified_time: %s, ckpt_start_time: %s',
|
|
252
|
+
entry.evaluation.id,
|
|
253
|
+
ckpt_filepath,
|
|
254
|
+
example_id,
|
|
255
|
+
last_modified_time,
|
|
256
|
+
self.ckpt_start_time,
|
|
257
|
+
)
|
|
258
|
+
return
|
|
215
259
|
except BaseException as e: # pylint: disable=broad-except
|
|
216
260
|
error_info = pg.ErrorInfo.from_exception(e)
|
|
217
261
|
pg.logging.error(
|
|
@@ -229,9 +273,21 @@ class CheckpointMonitor(base.RunnerBase):
|
|
|
229
273
|
# This will skip processing but still allow metrics to be collected.
|
|
230
274
|
# `process` will never be called for evaluation, thus we do not
|
|
231
275
|
# need to setup/teardown evaluation.
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
276
|
+
try:
|
|
277
|
+
example = entry.evaluation.evaluate(
|
|
278
|
+
example, reevaluate_upon_previous_errors=False
|
|
279
|
+
)
|
|
280
|
+
except BaseException as e: # pylint: disable=broad-except
|
|
281
|
+
pg.logging.error(
|
|
282
|
+
'[%s] Unexpected error found during evaluating example %d from %s.',
|
|
283
|
+
entry.evaluation.id,
|
|
284
|
+
example_id,
|
|
285
|
+
ckpt_filepath,
|
|
286
|
+
)
|
|
287
|
+
self._error = e
|
|
288
|
+
entry.example_ids_being_aggregated.remove(example_id)
|
|
289
|
+
return
|
|
290
|
+
|
|
235
291
|
example.newly_processed = True
|
|
236
292
|
pg.logging.info(
|
|
237
293
|
'[%s] Successfully aggregated example %d from %s.',
|
|
@@ -13,8 +13,10 @@
|
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
import os
|
|
15
15
|
import tempfile
|
|
16
|
+
import time
|
|
16
17
|
import unittest
|
|
17
18
|
|
|
19
|
+
import langfun.core as lf
|
|
18
20
|
from langfun.core.eval.v2 import checkpointing
|
|
19
21
|
from langfun.core.eval.v2 import eval_test_helper
|
|
20
22
|
from langfun.core.eval.v2 import example as example_lib
|
|
@@ -33,6 +35,7 @@ class CheckpointMonitorTest(unittest.TestCase):
|
|
|
33
35
|
def test_aggregate(self):
|
|
34
36
|
exp = eval_test_helper.test_experiment()
|
|
35
37
|
root_dir = os.path.join(self.test_dir, 'test_aggregate')
|
|
38
|
+
ckpt_start_time = time.time()
|
|
36
39
|
run = exp.run(
|
|
37
40
|
root_dir,
|
|
38
41
|
runner='sequential',
|
|
@@ -55,6 +58,7 @@ class CheckpointMonitorTest(unittest.TestCase):
|
|
|
55
58
|
plugins=[plugin],
|
|
56
59
|
checkpoint_pattern='checkpoint_*.jsonl',
|
|
57
60
|
monitor_inprogress_files=True,
|
|
61
|
+
ckpt_start_time=ckpt_start_time,
|
|
58
62
|
)
|
|
59
63
|
monitor.run()
|
|
60
64
|
|
|
@@ -70,7 +74,50 @@ class CheckpointMonitorTest(unittest.TestCase):
|
|
|
70
74
|
for e in exp.leaf_nodes:
|
|
71
75
|
self.assertEqual(e.progress.num_completed, 10)
|
|
72
76
|
|
|
77
|
+
def test_ignore_old_ckpt_files_with_non_oop_errors(self):
|
|
78
|
+
exp = eval_test_helper.test_evaluation()
|
|
79
|
+
root_dir = os.path.join(self.test_dir, 'test_ignore_old_ckpt_files')
|
|
80
|
+
run = exp.run(
|
|
81
|
+
root_dir,
|
|
82
|
+
runner='sequential',
|
|
83
|
+
progress_tracker=None,
|
|
84
|
+
plugins=[
|
|
85
|
+
checkpointing.PerExampleCheckpointer(
|
|
86
|
+
checkpoint_filename='checkpoint.jsonl'
|
|
87
|
+
)
|
|
88
|
+
],
|
|
89
|
+
use_cache='no',
|
|
90
|
+
)
|
|
91
|
+
monitor = ckpt_monitor.CheckpointMonitor(
|
|
92
|
+
run,
|
|
93
|
+
plugins=[],
|
|
94
|
+
checkpoint_pattern='checkpoint_*.jsonl',
|
|
95
|
+
monitor_inprogress_files=True
|
|
96
|
+
)
|
|
97
|
+
monitor.start()
|
|
98
|
+
time.sleep(2)
|
|
99
|
+
# Example 6 is a non-oop error, we simulate a re-evaluation.
|
|
100
|
+
ex = example_lib.Example(
|
|
101
|
+
id=6, output=1, metric_metadata={'match': {'is_correct': True}},
|
|
102
|
+
start_time=time.time() - 2, end_time=time.time(),
|
|
103
|
+
usage_summary=lf.UsageSummary(),
|
|
104
|
+
execution_status={
|
|
105
|
+
'evaluate': pg.utils.TimeIt.Status(name='evaluate', elapse=1)
|
|
106
|
+
}
|
|
107
|
+
)
|
|
108
|
+
with pg.io.open_sequence(
|
|
109
|
+
run.output_path_for(exp, 'checkpoint_6.jsonl'),
|
|
110
|
+
mode='w'
|
|
111
|
+
) as f:
|
|
112
|
+
f.add(pg.to_json_str(ex))
|
|
113
|
+
print(time.time(), pg.io.listdir(run.output_dir(exp)))
|
|
114
|
+
monitor.join()
|
|
115
|
+
self.assertEqual(exp.progress.num_processed, 10)
|
|
116
|
+
self.assertEqual(exp.progress.num_completed, 10)
|
|
117
|
+
self.assertEqual(exp.progress.num_failed, 0)
|
|
118
|
+
|
|
73
119
|
def test_aggregate_with_filter(self):
|
|
120
|
+
ckpt_start_time = time.time()
|
|
74
121
|
exp = eval_test_helper.test_experiment()
|
|
75
122
|
root_dir = os.path.join(self.test_dir, 'test_aggregate_with_filter')
|
|
76
123
|
|
|
@@ -93,6 +140,7 @@ class CheckpointMonitorTest(unittest.TestCase):
|
|
|
93
140
|
run,
|
|
94
141
|
plugins=[plugin],
|
|
95
142
|
checkpoint_pattern='checkpoint_*.jsonl',
|
|
143
|
+
ckpt_start_time=ckpt_start_time,
|
|
96
144
|
)
|
|
97
145
|
monitor.run()
|
|
98
146
|
|
|
@@ -127,6 +175,7 @@ class CheckpointMonitorTest(unittest.TestCase):
|
|
|
127
175
|
if self.simulate_raise_on_experiment_complete:
|
|
128
176
|
raise ValueError('experiment complete error')
|
|
129
177
|
|
|
178
|
+
ckpt_start_time = time.time()
|
|
130
179
|
exp = eval_test_helper.test_evaluation()
|
|
131
180
|
root_dir = os.path.join(self.test_dir, 'test_plugin_raise')
|
|
132
181
|
|
|
@@ -148,6 +197,7 @@ class CheckpointMonitorTest(unittest.TestCase):
|
|
|
148
197
|
run,
|
|
149
198
|
plugins=[TestPlugin(simulate_raise_on_example_complete=True)],
|
|
150
199
|
checkpoint_pattern='checkpoint_*.jsonl',
|
|
200
|
+
ckpt_start_time=ckpt_start_time,
|
|
151
201
|
).run()
|
|
152
202
|
|
|
153
203
|
with self.assertRaisesRegex(ValueError, 'experiment complete error'):
|
|
@@ -155,6 +205,7 @@ class CheckpointMonitorTest(unittest.TestCase):
|
|
|
155
205
|
run,
|
|
156
206
|
plugins=[TestPlugin(simulate_raise_on_experiment_complete=True)],
|
|
157
207
|
checkpoint_pattern='checkpoint_*.jsonl',
|
|
208
|
+
ckpt_start_time=ckpt_start_time,
|
|
158
209
|
).run()
|
|
159
210
|
|
|
160
211
|
|
langfun/core/llms/__init__.py
CHANGED
|
@@ -65,6 +65,7 @@ from langfun.core.llms.google_genai import Gemini2ProExp_20250205
|
|
|
65
65
|
from langfun.core.llms.google_genai import Gemini2FlashThinkingExp_20250121
|
|
66
66
|
from langfun.core.llms.google_genai import GeminiExp_20241206
|
|
67
67
|
from langfun.core.llms.google_genai import Gemini25FlashImagePreview
|
|
68
|
+
from langfun.core.llms.google_genai import Gemini3ProImagePreview
|
|
68
69
|
|
|
69
70
|
from langfun.core.llms.vertexai import VertexAIGemini
|
|
70
71
|
from langfun.core.llms.vertexai import VertexAIGemini2Flash
|
|
@@ -92,6 +93,7 @@ from langfun.core.llms.vertexai import VertexAIGemini25Pro
|
|
|
92
93
|
from langfun.core.llms.vertexai import VertexAIGemini25Flash
|
|
93
94
|
from langfun.core.llms.vertexai import VertexAIGemini25FlashImagePreview
|
|
94
95
|
from langfun.core.llms.vertexai import VertexAIGemini3ProPreview
|
|
96
|
+
from langfun.core.llms.vertexai import VertexAIGemini3ProImagePreview
|
|
95
97
|
|
|
96
98
|
# For backward compatibility.
|
|
97
99
|
GeminiPro1_5 = Gemini15Pro
|
langfun/core/llms/gemini.py
CHANGED
|
@@ -177,6 +177,29 @@ SUPPORTED_MODELS = [
|
|
|
177
177
|
max_tokens_per_minute=4_000_000,
|
|
178
178
|
),
|
|
179
179
|
),
|
|
180
|
+
# Gemini 3 Pro Image Preview
|
|
181
|
+
GeminiModelInfo(
|
|
182
|
+
model_id='gemini-3-pro-image-preview',
|
|
183
|
+
in_service=True,
|
|
184
|
+
experimental=True,
|
|
185
|
+
provider=pg.oneof(['Google GenAI', 'VertexAI']),
|
|
186
|
+
model_type='instruction-tuned',
|
|
187
|
+
description=(
|
|
188
|
+
'Gemini 3 Pro Image Preview for high-fidelity image generation,'
|
|
189
|
+
' editing, and visual reasoning.'
|
|
190
|
+
),
|
|
191
|
+
release_date=datetime.datetime(2025, 12, 9),
|
|
192
|
+
input_modalities=GeminiModelInfo.INPUT_IMAGE_TYPES
|
|
193
|
+
+ GeminiModelInfo.INPUT_DOC_TYPES,
|
|
194
|
+
context_length=lf.ModelInfo.ContextLength(
|
|
195
|
+
max_input_tokens=65_536,
|
|
196
|
+
max_output_tokens=32_768,
|
|
197
|
+
),
|
|
198
|
+
rate_limits=lf.ModelInfo.RateLimits(
|
|
199
|
+
max_requests_per_minute=200,
|
|
200
|
+
max_tokens_per_minute=1_000_000,
|
|
201
|
+
),
|
|
202
|
+
),
|
|
180
203
|
# Gemini 2.5 Flash
|
|
181
204
|
GeminiModelInfo(
|
|
182
205
|
model_id='gemini-2.5-flash',
|
|
@@ -834,7 +857,10 @@ class Gemini(rest.REST):
|
|
|
834
857
|
config['thinkingConfig'] = thinking_config_data
|
|
835
858
|
|
|
836
859
|
# This is the new feature since Gemini 3.
|
|
837
|
-
|
|
860
|
+
# Skip for image generation models as they don't support mediaResolution.
|
|
861
|
+
if self.model_id.startswith('gemini-3') and not (
|
|
862
|
+
self.response_modalities and 'IMAGE' in self.response_modalities
|
|
863
|
+
):
|
|
838
864
|
config['mediaResolution'] = 'MEDIA_RESOLUTION_HIGH'
|
|
839
865
|
|
|
840
866
|
if self.response_modalities:
|
|
@@ -125,6 +125,24 @@ class Gemini3ProPreview(GenAI):
|
|
|
125
125
|
model = 'gemini-3-pro-preview'
|
|
126
126
|
|
|
127
127
|
|
|
128
|
+
class Gemini3ProImagePreview(GenAI):
|
|
129
|
+
"""Gemini 3 Pro Image Preview model for high-fidelity image generation.
|
|
130
|
+
|
|
131
|
+
This model supports:
|
|
132
|
+
- Text-to-image generation
|
|
133
|
+
- Image editing (multimodal input)
|
|
134
|
+
- Visual reasoning
|
|
135
|
+
|
|
136
|
+
Key Requirements:
|
|
137
|
+
- responseModalities must include 'IMAGE'
|
|
138
|
+
- Supported aspect ratios: 1:1, 16:9, 9:16, 4:3, 3:4
|
|
139
|
+
- Image sizes: 1K (default), 2K, 4K
|
|
140
|
+
"""
|
|
141
|
+
|
|
142
|
+
model = 'gemini-3-pro-image-preview'
|
|
143
|
+
response_modalities = ['TEXT', 'IMAGE']
|
|
144
|
+
|
|
145
|
+
|
|
128
146
|
class Gemini25FlashImagePreview(GenAI):
|
|
129
147
|
"""Gemini 2.5 Flash Image Preview model."""
|
|
130
148
|
model = 'gemini-2.5-flash-image-preview'
|
langfun/core/llms/vertexai.py
CHANGED
|
@@ -220,6 +220,26 @@ class VertexAIGemini3ProPreview(VertexAIGemini): # pylint: disable=invalid-name
|
|
|
220
220
|
location = 'global'
|
|
221
221
|
|
|
222
222
|
|
|
223
|
+
class VertexAIGemini3ProImagePreview(VertexAIGemini): # pylint: disable=invalid-name
|
|
224
|
+
"""Gemini 3 Pro Image Preview model for high-fidelity image generation.
|
|
225
|
+
|
|
226
|
+
This model supports:
|
|
227
|
+
- Text-to-image generation
|
|
228
|
+
- Image editing (multimodal input)
|
|
229
|
+
- Visual reasoning
|
|
230
|
+
|
|
231
|
+
Key Requirements:
|
|
232
|
+
- Uses v1beta1 API endpoint
|
|
233
|
+
- responseModalities must include 'IMAGE'
|
|
234
|
+
- Supported aspect ratios: 1:1, 16:9, 9:16, 4:3, 3:4
|
|
235
|
+
- Image sizes: 1K (default), 2K, 4K
|
|
236
|
+
"""
|
|
237
|
+
|
|
238
|
+
model = 'gemini-3-pro-image-preview'
|
|
239
|
+
location = 'global'
|
|
240
|
+
response_modalities = ['TEXT', 'IMAGE']
|
|
241
|
+
|
|
242
|
+
|
|
223
243
|
class VertexAIGemini25Pro(VertexAIGemini): # pylint: disable=invalid-name
|
|
224
244
|
"""Gemini 2.5 Pro GA model launched on 06/17/2025."""
|
|
225
245
|
|
langfun/core/modalities/mime.py
CHANGED
|
@@ -135,7 +135,20 @@ class Mime(lf.Modality):
|
|
|
135
135
|
raise lf.ModalityError(
|
|
136
136
|
f'MIME type {self.mime_type!r} cannot be converted to text.'
|
|
137
137
|
)
|
|
138
|
-
|
|
138
|
+
content = self.to_bytes()
|
|
139
|
+
# Try UTF-8 first (most common encoding).
|
|
140
|
+
try:
|
|
141
|
+
return content.decode('utf-8')
|
|
142
|
+
except UnicodeDecodeError:
|
|
143
|
+
pass
|
|
144
|
+
# Check for UTF-16 BOM (0xff 0xfe or 0xfe 0xff).
|
|
145
|
+
if content[:2] in (b'\xff\xfe', b'\xfe\xff'):
|
|
146
|
+
try:
|
|
147
|
+
return content.decode('utf-16')
|
|
148
|
+
except UnicodeDecodeError:
|
|
149
|
+
pass
|
|
150
|
+
# Fallback: decode with error replacement to avoid crashing.
|
|
151
|
+
return content.decode('utf-8', errors='replace')
|
|
139
152
|
|
|
140
153
|
def is_compatible(
|
|
141
154
|
self, mime_types: str | Iterable[str]
|
|
@@ -163,5 +163,53 @@ class CustomMimeTest(unittest.TestCase):
|
|
|
163
163
|
)
|
|
164
164
|
|
|
165
165
|
|
|
166
|
+
class ToTextEncodingTest(unittest.TestCase):
|
|
167
|
+
"""Tests for to_text() encoding handling."""
|
|
168
|
+
|
|
169
|
+
def test_utf8_decoding(self):
|
|
170
|
+
"""Test that valid UTF-8 content is decoded correctly."""
|
|
171
|
+
content = mime.Custom('text/plain', b'Hello, World!')
|
|
172
|
+
self.assertEqual(content.to_text(), 'Hello, World!')
|
|
173
|
+
|
|
174
|
+
# UTF-8 with multi-byte characters.
|
|
175
|
+
utf8_content = 'こんにちは'.encode('utf-8')
|
|
176
|
+
content = mime.Custom('text/plain', utf8_content)
|
|
177
|
+
self.assertEqual(content.to_text(), 'こんにちは')
|
|
178
|
+
|
|
179
|
+
def test_utf16_le_bom_decoding(self):
|
|
180
|
+
"""Test that UTF-16 Little Endian with BOM is decoded correctly."""
|
|
181
|
+
# UTF-16 LE BOM: 0xff 0xfe
|
|
182
|
+
utf16_le_content = 'Hello'.encode('utf-16-le')
|
|
183
|
+
content_with_bom = b'\xff\xfe' + utf16_le_content
|
|
184
|
+
content = mime.Custom('text/plain', content_with_bom)
|
|
185
|
+
self.assertEqual(content.to_text(), 'Hello')
|
|
186
|
+
|
|
187
|
+
def test_utf16_be_bom_decoding(self):
|
|
188
|
+
"""Test that UTF-16 Big Endian with BOM is decoded correctly."""
|
|
189
|
+
# UTF-16 BE BOM: 0xfe 0xff
|
|
190
|
+
utf16_be_content = 'Hello'.encode('utf-16-be')
|
|
191
|
+
content_with_bom = b'\xfe\xff' + utf16_be_content
|
|
192
|
+
content = mime.Custom('text/plain', content_with_bom)
|
|
193
|
+
self.assertEqual(content.to_text(), 'Hello')
|
|
194
|
+
|
|
195
|
+
def test_invalid_bytes_fallback_with_replacement(self):
|
|
196
|
+
"""Test that invalid bytes are replaced with replacement character."""
|
|
197
|
+
# 0xff alone is invalid in UTF-8 and doesn't have UTF-16 BOM pattern.
|
|
198
|
+
invalid_content = b'\xff\xfdHello'
|
|
199
|
+
content = mime.Custom('text/plain', invalid_content)
|
|
200
|
+
result = content.to_text()
|
|
201
|
+
# Invalid bytes should be replaced with U+FFFD (replacement character).
|
|
202
|
+
self.assertIn('\ufffd', result)
|
|
203
|
+
self.assertIn('Hello', result)
|
|
204
|
+
|
|
205
|
+
def test_binary_mime_type_raises_error(self):
|
|
206
|
+
"""Test that binary MIME types raise ModalityError."""
|
|
207
|
+
content = mime.Custom('application/octet-stream', b'\x00\x01\x02')
|
|
208
|
+
with self.assertRaisesRegex(
|
|
209
|
+
lf.ModalityError, 'cannot be converted to text'
|
|
210
|
+
):
|
|
211
|
+
content.to_text()
|
|
212
|
+
|
|
213
|
+
|
|
166
214
|
if __name__ == '__main__':
|
|
167
215
|
unittest.main()
|
|
@@ -68,17 +68,17 @@ langfun/core/eval/patching.py,sha256=wJqqML_z_hXQQ65f9oJpdtiNEkUvwWWdNgGiIcV1Jq4
|
|
|
68
68
|
langfun/core/eval/patching_test.py,sha256=8kCd54Egjju22FMgtJuxEsrXkW8ifs-UUBHtrCG1L6w,4775
|
|
69
69
|
langfun/core/eval/scoring.py,sha256=1C7e7gR8Wai7M9oBXRZifntxy5HEik5qjVo9gY8B7KI,6423
|
|
70
70
|
langfun/core/eval/scoring_test.py,sha256=UcBH0R6vAovZ0A4yM22s5cBHL1qVKASubrbu1t8dYBw,4529
|
|
71
|
-
langfun/core/eval/v2/__init__.py,sha256=
|
|
72
|
-
langfun/core/eval/v2/checkpointing.py,sha256=
|
|
71
|
+
langfun/core/eval/v2/__init__.py,sha256=XbkBqoyJBH_khtAS01gP6_V4KnWLY3bFJ7D0rtHa1BU,1878
|
|
72
|
+
langfun/core/eval/v2/checkpointing.py,sha256=ui4kOwOo_yu_ONzOho9Ri36NJOmYGqD1gYa6o1U7L9o,15463
|
|
73
73
|
langfun/core/eval/v2/checkpointing_test.py,sha256=s_E94dOPNO1zYzXyQI37wvCF3suez-r4Nls9popN58w,9787
|
|
74
74
|
langfun/core/eval/v2/config_saver.py,sha256=nsuG0pqTikIlsL-Mij6swteUBif-zxJUdGxTHZsOVeQ,1205
|
|
75
75
|
langfun/core/eval/v2/config_saver_test.py,sha256=OD0zl26YHjNibFD67YxwrZ7-zT9V7p-3zLDItWBAgic,1261
|
|
76
76
|
langfun/core/eval/v2/eval_test_helper.py,sha256=baew3-cqomy1p7mF1_Xw7AvEWUwCimi3J7-8Ay3eEPo,6539
|
|
77
|
-
langfun/core/eval/v2/evaluation.py,sha256=
|
|
77
|
+
langfun/core/eval/v2/evaluation.py,sha256=1T0lxTu9gy329Mq4ii16ktARbtvbBGY9IUtsUIkNXeY,30740
|
|
78
78
|
langfun/core/eval/v2/evaluation_test.py,sha256=gurFzSfPECZ_FMQOnf3bzKOHmQ7C4IUxEfbyZy50bjM,7966
|
|
79
79
|
langfun/core/eval/v2/example.py,sha256=VZeBqMWnfEtn1mmdPW2w2u2XbAWVll1q1-50qL8DjS8,11606
|
|
80
80
|
langfun/core/eval/v2/example_test.py,sha256=RwtBcUumPBWynA8BLMoZetSHdgvFywlHXuyvInf1y_s,3576
|
|
81
|
-
langfun/core/eval/v2/experiment.py,sha256=
|
|
81
|
+
langfun/core/eval/v2/experiment.py,sha256=NpVRkMRi4IXt1qx9b3k_hwHfVLkBrvtYRlMH3ID8FBA,36758
|
|
82
82
|
langfun/core/eval/v2/experiment_test.py,sha256=7prE4ASKlbwQIXiLzEqjgaF4yQDL7KjxX-dBUPT84VA,14145
|
|
83
83
|
langfun/core/eval/v2/metric_values.py,sha256=WAL1BdHaU_oq7d_k1KyjhiQDK32dNLSyn1L2yEkz0o4,6040
|
|
84
84
|
langfun/core/eval/v2/metric_values_test.py,sha256=5ffwnqrbLIBh1hdUl3L9mpJlUvsmd2VQ8UWPOJcQj4s,3630
|
|
@@ -88,21 +88,21 @@ langfun/core/eval/v2/progress.py,sha256=Cd79j8fhumW5QOuISiSXOJKOZ5-I9IkmGLgvqRmo
|
|
|
88
88
|
langfun/core/eval/v2/progress_test.py,sha256=MzJ7wa65XYZ0chArA-lSg1eRSvQ_TzZJIHMk85Kwz7o,3208
|
|
89
89
|
langfun/core/eval/v2/progress_tracking.py,sha256=yMYlOMJF8M4FUhyjGRkM6O6TXiMwKPsEn3wbpftxcss,6376
|
|
90
90
|
langfun/core/eval/v2/progress_tracking_test.py,sha256=37v42y4kh2GfDXBrkugEupW6IRAzA774wwPJaOyefUs,2597
|
|
91
|
-
langfun/core/eval/v2/reporting.py,sha256
|
|
91
|
+
langfun/core/eval/v2/reporting.py,sha256=Z_tt_EfApPa-AcfYmfZ2818fk8eWK-EGl1fYlgxpCAk,8895
|
|
92
92
|
langfun/core/eval/v2/reporting_test.py,sha256=q3LBfPk7jvEWXB3sdk2CycbMKqNRyXhs5z6BokfwDIE,6096
|
|
93
93
|
langfun/core/eval/v2/runners/__init__.py,sha256=2TcCLW32OsmXQINcVKa2ZJY8Ca7j3NnT0yy9hXYUDn8,1115
|
|
94
|
-
langfun/core/eval/v2/runners/base.py,sha256=
|
|
94
|
+
langfun/core/eval/v2/runners/base.py,sha256=_ixOIxGxrrNKDLBxJlfjLHCzlkjxKUkJY_MO3CmzM14,14072
|
|
95
95
|
langfun/core/eval/v2/runners/beam.py,sha256=LQK9bZCFJR9j9DJ-mAudhphumItGwXc5bbGwadl9kxY,11782
|
|
96
96
|
langfun/core/eval/v2/runners/beam_test.py,sha256=cI5WaQQObnRrPnGjED3OFT3JXYOE3thQ640H08TG_dw,5306
|
|
97
|
-
langfun/core/eval/v2/runners/ckpt_monitor.py,sha256=
|
|
98
|
-
langfun/core/eval/v2/runners/ckpt_monitor_test.py,sha256=
|
|
97
|
+
langfun/core/eval/v2/runners/ckpt_monitor.py,sha256=KaaDYvHNOewUrJqJ4FHjdMeS7okpX7FYdjCx558joPU,12071
|
|
98
|
+
langfun/core/eval/v2/runners/ckpt_monitor_test.py,sha256=Xqd30PF0XIOrqBSZ53_7ozxYR3Wc3SiIaKuwwj1AXQ8,7176
|
|
99
99
|
langfun/core/eval/v2/runners/debug.py,sha256=ExsBcAvmhFsaaS3VLjxE70HImHe2YVs0IpoefM01onY,1442
|
|
100
100
|
langfun/core/eval/v2/runners/debug_test.py,sha256=kDWs4Fu7itzBxbRwFc-UKEP2hAV0iVFp2wWkEuZNEcg,2577
|
|
101
101
|
langfun/core/eval/v2/runners/parallel.py,sha256=PSdOY3i2ot94TWVCZY0iJSWFAT0CCxa1wxk7KpI_GfI,7794
|
|
102
102
|
langfun/core/eval/v2/runners/parallel_test.py,sha256=8M8OTpsDd-wQYZRRSPCYGkwjt7gUvkgze8NMCTKydUw,6146
|
|
103
103
|
langfun/core/eval/v2/runners/sequential.py,sha256=hebMZd6EVraY9zAwariT9WfsWQyX5AYuRsFdRo-knKU,1631
|
|
104
104
|
langfun/core/eval/v2/runners/sequential_test.py,sha256=apbNC0-Pi6r17_OQlHqqOZM0OVo1mZlaPk2B4vUteRg,6064
|
|
105
|
-
langfun/core/llms/__init__.py,sha256=
|
|
105
|
+
langfun/core/llms/__init__.py,sha256=KU00R0906yLWjSg_tquCna1CU_6z4XOIKMhLzzGE-Zc,10489
|
|
106
106
|
langfun/core/llms/anthropic.py,sha256=6uE1EC9YWtbiFwZNNPEFv-QzeGQQ7G27kheTTE15Ewg,31175
|
|
107
107
|
langfun/core/llms/anthropic_test.py,sha256=qA9vByp_cwwXNlXzcwHpPWFnO9lfFo8NKfDi5nBNqgI,9052
|
|
108
108
|
langfun/core/llms/azure_openai.py,sha256=LEc7-ay2fOOCwwL3SfxDr3KCdH8-2i1EtD-PBvr4kfk,2777
|
|
@@ -113,9 +113,9 @@ langfun/core/llms/deepseek.py,sha256=jQsotTUk4161EJIcoQOV7iOWBZfQ3Ukh9GOh31A0HYU
|
|
|
113
113
|
langfun/core/llms/deepseek_test.py,sha256=DvROWPlDuow5E1lfoSkhyGt_ELA19JoQoDsTnRgDtTg,1847
|
|
114
114
|
langfun/core/llms/fake.py,sha256=NH8Zlezmx3eacao4D7wihrZjRuyBJuHR5rdyp94PrAw,4409
|
|
115
115
|
langfun/core/llms/fake_test.py,sha256=lC-C2TpEsnf2kmZpa3OiH2H944I4hMWTAaHEXzRj1DU,7855
|
|
116
|
-
langfun/core/llms/gemini.py,sha256=
|
|
116
|
+
langfun/core/llms/gemini.py,sha256=_GMcbkfaSWkMGiK1d8DfpQzRiSCZrd092VhBMfRZ9H0,33243
|
|
117
117
|
langfun/core/llms/gemini_test.py,sha256=bv-Ulv3vjGhxd8nJD_UDhWDMK3K3TM7b5powBcYrv1c,10844
|
|
118
|
-
langfun/core/llms/google_genai.py,sha256=
|
|
118
|
+
langfun/core/llms/google_genai.py,sha256=hodpibBtcxg8pU-XrEsPBkhzGsjSYrEUHyz0w9RWwCc,6986
|
|
119
119
|
langfun/core/llms/google_genai_test.py,sha256=NKNtpebArQ9ZR7Qsnhd2prFIpMjleojy6o6VMXkJ1zY,1502
|
|
120
120
|
langfun/core/llms/groq.py,sha256=O-kv2_R_IkC8wGIT086xin8jYi7QnsakPCGVLR58lMw,12517
|
|
121
121
|
langfun/core/llms/groq_test.py,sha256=P4EgexCqsh4K2x11w0UL_vz-YYNaPdQU0WsDAdnTRQ8,2045
|
|
@@ -127,7 +127,7 @@ langfun/core/llms/openai_compatible_test.py,sha256=8yr_jGmHCDyMwp-VcJwThFgh7B_56
|
|
|
127
127
|
langfun/core/llms/openai_test.py,sha256=1o5rxiHZj-UEgugWN8JmfJtznhUmDywy6dU3Euax-Ts,2639
|
|
128
128
|
langfun/core/llms/rest.py,sha256=eR-M1st5ZnzuitICyYfxSRcmQWmy_eeOoe2bHLalzN0,5351
|
|
129
129
|
langfun/core/llms/rest_test.py,sha256=_zM7nV8DEVyoXNiQOnuwJ917mWjki0614H88rNmDboE,5020
|
|
130
|
-
langfun/core/llms/vertexai.py,sha256=
|
|
130
|
+
langfun/core/llms/vertexai.py,sha256=KjiMrEjWgoJct9QQTQKQ_8fzZ5SbpVNDyZpvHgHZj3g,22134
|
|
131
131
|
langfun/core/llms/vertexai_test.py,sha256=_e-acnNBAf9C3WO6i1b2J_mhRzdDdYQTorD9hIVZKOg,5034
|
|
132
132
|
langfun/core/llms/cache/__init__.py,sha256=QAo3InUMDM_YpteNnVCSejI4zOsnjSMWKJKzkb3VY64,993
|
|
133
133
|
langfun/core/llms/cache/base.py,sha256=qLGlEMi5cfsDxRTsOWrmwbxjvvwUaq4Y8MxlXr69wpw,5060
|
|
@@ -150,8 +150,8 @@ langfun/core/modalities/audio.py,sha256=cb95FzDE-IIQf7kXy7D4AAXtziQF0FYkZUe4pw5E
|
|
|
150
150
|
langfun/core/modalities/audio_test.py,sha256=tW1vEy-Cumhf-HgDgCxlSNZqgJb2HTgqOixGWLiwOmw,2065
|
|
151
151
|
langfun/core/modalities/image.py,sha256=SS_FSrYSjb1uL0IAVyUu2TZX2-FcI_x9ZTB8im5Amzk,4030
|
|
152
152
|
langfun/core/modalities/image_test.py,sha256=91LpEOvr_v6SGdtzSuCw3ks62L8vxJVIZwgAKxk7UmY,8476
|
|
153
|
-
langfun/core/modalities/mime.py,sha256=
|
|
154
|
-
langfun/core/modalities/mime_test.py,sha256=
|
|
153
|
+
langfun/core/modalities/mime.py,sha256=9YK-uRGYN6YG3ux7zSYl5XGZEDLBiXyTax1cLMhissY,11255
|
|
154
|
+
langfun/core/modalities/mime_test.py,sha256=e6p-XW47yNXbvPS2R4-0afZd84bOqCc6DnIzqZCkPZk,8391
|
|
155
155
|
langfun/core/modalities/pdf.py,sha256=rc-uIKRVkTTa0j7jC6WRwKM9WqiS5NxF-H6PPunVeXM,1231
|
|
156
156
|
langfun/core/modalities/pdf_test.py,sha256=ulZ0FbnlsU0wkrdckJ4ONZPTYRyMPO9Aob1UO6FXygk,1950
|
|
157
157
|
langfun/core/modalities/video.py,sha256=ZopyDf-8bi0V-QZDAg-_8S3HkMNiEQL9aWmGuI6Fkrs,1506
|
|
@@ -210,8 +210,8 @@ langfun/env/event_handlers/event_logger.py,sha256=ga8RN8qjwtAOCnV_MnhNPTktN8EJ-x
|
|
|
210
210
|
langfun/env/event_handlers/event_logger_test.py,sha256=qSAcirtRz00H-1RL9ShELBiZKiPxsk_v6cVA6XdAk4k,9274
|
|
211
211
|
langfun/env/event_handlers/metric_writer.py,sha256=7ZrUp0rYvs7TfNpQ16Xbxg8vp-6ZbjuJ-qrhVSbhv2I,21085
|
|
212
212
|
langfun/env/event_handlers/metric_writer_test.py,sha256=bjdYXoXMPWpWz_-HUPM6vFP1ez5G386u0fmPfe-SR_M,5952
|
|
213
|
-
langfun-0.1.2.
|
|
214
|
-
langfun-0.1.2.
|
|
215
|
-
langfun-0.1.2.
|
|
216
|
-
langfun-0.1.2.
|
|
217
|
-
langfun-0.1.2.
|
|
213
|
+
langfun-0.1.2.dev202512150805.dist-info/licenses/LICENSE,sha256=WNHhf_5RCaeuKWyq_K39vmp9F28LxKsB4SpomwSZ2L0,11357
|
|
214
|
+
langfun-0.1.2.dev202512150805.dist-info/METADATA,sha256=AEsimQbtMKxj8Kja2fIIgEXhoBsmqLwq-1PF3i_WlFg,7522
|
|
215
|
+
langfun-0.1.2.dev202512150805.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
216
|
+
langfun-0.1.2.dev202512150805.dist-info/top_level.txt,sha256=RhlEkHxs1qtzmmtWSwYoLVJAc1YrbPtxQ52uh8Z9VvY,8
|
|
217
|
+
langfun-0.1.2.dev202512150805.dist-info/RECORD,,
|
|
File without changes
|
{langfun-0.1.2.dev202512040805.dist-info → langfun-0.1.2.dev202512150805.dist-info}/licenses/LICENSE
RENAMED
|
File without changes
|
{langfun-0.1.2.dev202512040805.dist-info → langfun-0.1.2.dev202512150805.dist-info}/top_level.txt
RENAMED
|
File without changes
|