langfun 0.1.2.dev202510230805__py3-none-any.whl → 0.1.2.dev202510250803__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langfun might be problematic. Click here for more details.

Files changed (44) hide show
  1. langfun/core/concurrent_test.py +1 -0
  2. langfun/core/data/conversion/anthropic_test.py +8 -6
  3. langfun/core/data/conversion/gemini_test.py +12 -9
  4. langfun/core/data/conversion/openai.py +134 -30
  5. langfun/core/data/conversion/openai_test.py +161 -17
  6. langfun/core/eval/base_test.py +4 -4
  7. langfun/core/eval/v2/progress_tracking_test.py +3 -0
  8. langfun/core/langfunc_test.py +6 -4
  9. langfun/core/language_model.py +15 -6
  10. langfun/core/language_model_test.py +9 -3
  11. langfun/core/llms/__init__.py +7 -1
  12. langfun/core/llms/anthropic.py +130 -0
  13. langfun/core/llms/cache/base.py +3 -1
  14. langfun/core/llms/cache/in_memory_test.py +14 -4
  15. langfun/core/llms/deepseek.py +1 -1
  16. langfun/core/llms/gemini.py +2 -5
  17. langfun/core/llms/groq.py +1 -1
  18. langfun/core/llms/llama_cpp.py +1 -1
  19. langfun/core/llms/openai.py +7 -2
  20. langfun/core/llms/openai_compatible.py +136 -27
  21. langfun/core/llms/openai_compatible_test.py +207 -20
  22. langfun/core/llms/openai_test.py +0 -2
  23. langfun/core/llms/vertexai.py +12 -2
  24. langfun/core/message.py +78 -44
  25. langfun/core/message_test.py +56 -81
  26. langfun/core/modalities/__init__.py +8 -0
  27. langfun/core/modalities/mime.py +9 -0
  28. langfun/core/modality.py +104 -27
  29. langfun/core/modality_test.py +42 -12
  30. langfun/core/sampling_test.py +20 -4
  31. langfun/core/structured/completion.py +2 -7
  32. langfun/core/structured/completion_test.py +23 -43
  33. langfun/core/structured/mapping.py +4 -13
  34. langfun/core/structured/querying.py +13 -11
  35. langfun/core/structured/querying_test.py +65 -29
  36. langfun/core/template.py +39 -13
  37. langfun/core/template_test.py +83 -17
  38. langfun/env/event_handlers/metric_writer_test.py +3 -3
  39. langfun/env/load_balancers_test.py +2 -2
  40. {langfun-0.1.2.dev202510230805.dist-info → langfun-0.1.2.dev202510250803.dist-info}/METADATA +1 -1
  41. {langfun-0.1.2.dev202510230805.dist-info → langfun-0.1.2.dev202510250803.dist-info}/RECORD +44 -44
  42. {langfun-0.1.2.dev202510230805.dist-info → langfun-0.1.2.dev202510250803.dist-info}/WHEEL +0 -0
  43. {langfun-0.1.2.dev202510230805.dist-info → langfun-0.1.2.dev202510250803.dist-info}/licenses/LICENSE +0 -0
  44. {langfun-0.1.2.dev202510230805.dist-info → langfun-0.1.2.dev202510250803.dist-info}/top_level.txt +0 -0
@@ -608,6 +608,7 @@ class ConcurrentMapTest(unittest.TestCase):
608
608
  ],
609
609
  )
610
610
  concurrent.ProgressBar.uninstall(bar_id)
611
+ concurrent.ProgressBar.refresh()
611
612
  self.assertIn('100%', string_io.getvalue())
612
613
 
613
614
 
@@ -253,14 +253,16 @@ class AnthropicConversionTest(unittest.TestCase):
253
253
  )
254
254
  self.assertEqual(
255
255
  m.text,
256
- 'What are the common words from <<[[obj0]]>> and <<[[obj1]]>> ?'
256
+ 'What are the common words from <<[[image:dc6e1e43]]>> and'
257
+ ' <<[[pdf:5daf5f31]]>> ?'
257
258
  )
258
- self.assertIsInstance(m.obj0, lf_modalities.Image)
259
- self.assertEqual(m.obj0.mime_type, 'image/png')
260
- self.assertEqual(m.obj0.to_bytes(), image_content)
259
+ modalities = m.modalities()
260
+ self.assertIsInstance(modalities[0], lf_modalities.Image)
261
+ self.assertEqual(modalities[0].mime_type, 'image/png')
262
+ self.assertEqual(modalities[0].content, image_content)
261
263
 
262
- self.assertIsInstance(m.obj1, lf_modalities.PDF)
263
- self.assertEqual(m.obj1.to_bytes(), pdf_content)
264
+ self.assertIsInstance(modalities[1], lf_modalities.PDF)
265
+ self.assertEqual(modalities[1].content, pdf_content)
264
266
 
265
267
 
266
268
  if __name__ == '__main__':
@@ -225,19 +225,22 @@ class GeminiConversionTest(unittest.TestCase):
225
225
  self.assertEqual(
226
226
  m.text,
227
227
  (
228
- 'What are the common words from <<[[obj0]]>> , <<[[obj1]]>> '
229
- 'and <<[[obj2]]>> ?'
228
+ 'What are the common words from <<[[image:dc6e1e43]]>> , '
229
+ '<<[[pdf:4dc12e93]]>> and <<[[video:7e169565]]>> ?'
230
230
  )
231
231
  )
232
- self.assertIsInstance(m.obj0, lf_modalities.Image)
233
- self.assertEqual(m.obj0.mime_type, 'image/png')
234
- self.assertEqual(m.obj0.to_bytes(), image_content)
232
+ self.assertIsInstance(m.modalities()[0], lf_modalities.Image)
233
+ self.assertEqual(m.modalities()[0].mime_type, 'image/png')
234
+ self.assertEqual(m.modalities()[0].to_bytes(), image_content)
235
235
 
236
- self.assertIsInstance(m.obj1, lf_modalities.PDF)
237
- self.assertEqual(m.obj1.uri, 'https://my.pdf')
236
+ self.assertIsInstance(m.modalities()[1], lf_modalities.PDF)
237
+ self.assertEqual(m.modalities()[1].uri, 'https://my.pdf')
238
238
 
239
- self.assertIsInstance(m.obj2, lf_modalities.Video)
240
- self.assertEqual(m.obj2.uri, 'https://www.youtube.com/watch?v=abcd')
239
+ self.assertIsInstance(m.modalities()[2], lf_modalities.Video)
240
+ self.assertEqual(
241
+ m.modalities()[2].uri,
242
+ 'https://www.youtube.com/watch?v=abcd'
243
+ )
241
244
 
242
245
 
243
246
  if __name__ == '__main__':
@@ -19,10 +19,13 @@ import langfun.core as lf
19
19
  from langfun.core import modalities as lf_modalities
20
20
 
21
21
 
22
- class OpenAIMessageConverter(lf.MessageConverter):
23
- """Converter to OpenAI API."""
22
+ class OpenAIChatCompletionAPIMessageConverter(lf.MessageConverter):
23
+ """Converter to OpenAI ChatCompletion API.
24
24
 
25
- FORMAT_ID = 'openai'
25
+ See https://platform.openai.com/docs/api-reference/chat
26
+ """
27
+
28
+ FORMAT_ID = 'openai_chat_completion_api'
26
29
 
27
30
  chunk_preprocessor: Annotated[
28
31
  Callable[[str | lf.Modality], Any] | None,
@@ -41,22 +44,29 @@ class OpenAIMessageConverter(lf.MessageConverter):
41
44
  chunk = self.chunk_preprocessor(chunk)
42
45
  if chunk is None:
43
46
  continue
44
-
45
- if isinstance(chunk, str):
46
- item = dict(type='text', text=chunk)
47
- elif isinstance(chunk, lf_modalities.Image):
48
- item = dict(
49
- type='image_url', image_url=dict(url=chunk.embeddable_uri)
50
- )
51
- # TODO(daiyip): Support audio_input.
52
- else:
53
- raise ValueError(f'Unsupported content type: {chunk!r}.')
54
- parts.append(item)
47
+ parts.append(self.chunk_to_json(type(message), chunk))
55
48
  return dict(
56
49
  role=self.get_role(message),
57
50
  content=parts,
58
51
  )
59
52
 
53
+ def chunk_to_json(
54
+ self,
55
+ message_cls: type[lf.Message],
56
+ chunk: str | lf.Modality
57
+ ) -> dict[str, Any]:
58
+ """Converts a Langfun chunk to OpenAI chunk."""
59
+ del message_cls
60
+ if isinstance(chunk, str):
61
+ return dict(type='text', text=chunk)
62
+ elif isinstance(chunk, lf_modalities.Image):
63
+ return dict(
64
+ type='image_url', image_url=dict(url=chunk.embeddable_uri)
65
+ )
66
+ # TODO(daiyip): Support audio_input.
67
+ else:
68
+ raise ValueError(f'Unsupported content type: {chunk!r}.')
69
+
60
70
  def get_role(self, message: lf.Message) -> str:
61
71
  """Returns the role of the message."""
62
72
  if isinstance(message, lf.SystemMessage):
@@ -92,40 +102,134 @@ class OpenAIMessageConverter(lf.MessageConverter):
92
102
  assert isinstance(content, list)
93
103
  chunks = []
94
104
  for item in content:
95
- t = self._safe_read(item, 'type')
96
- if t == 'text':
97
- chunk = self._safe_read(item, 'text')
98
- elif t == 'image_url':
99
- chunk = lf_modalities.Image.from_uri(
100
- self._safe_read(self._safe_read(item, 'image_url'), 'url')
101
- )
102
- else:
103
- raise ValueError(f'Unsupported content type: {item!r}.')
104
- chunks.append(chunk)
105
+ chunks.append(self.json_to_chunk(item))
105
106
  return message_cls.from_chunks(chunks)
106
107
 
108
+ def json_to_chunk(self, json: dict[str, Any]) -> str | lf.Modality:
109
+ """Returns a Langfun chunk from OpenAI chunk JSON."""
110
+ t = self._safe_read(json, 'type')
111
+ if t == 'text':
112
+ return self._safe_read(json, 'text')
113
+ elif t == 'image_url':
114
+ return lf_modalities.Image.from_uri(
115
+ self._safe_read(self._safe_read(json, 'image_url'), 'url')
116
+ )
117
+ else:
118
+ raise ValueError(f'Unsupported content type: {json!r}.')
119
+
107
120
 
108
- def _as_openai_format(
121
+ def _as_openai_chat_completion_api_format(
109
122
  self,
110
123
  chunk_preprocessor: Callable[[str | lf.Modality], Any] | None = None,
111
124
  **kwargs
112
125
  ) -> dict[str, Any]:
113
126
  """Returns an OpenAI format message."""
114
- return OpenAIMessageConverter(
127
+ return OpenAIChatCompletionAPIMessageConverter(
115
128
  chunk_preprocessor=chunk_preprocessor, **kwargs
116
129
  ).to_value(self)
117
130
 
118
131
 
119
132
  @classmethod
120
- def _from_openai_format(
133
+ def _from_openai_chat_completion_api_format(
121
134
  cls,
122
135
  openai_message: dict[str, Any],
123
136
  **kwargs
124
137
  ) -> lf.Message:
125
138
  """Creates a Langfun message from the OpenAI format message."""
126
139
  del cls
127
- return OpenAIMessageConverter(**kwargs).from_value(openai_message)
140
+ return OpenAIChatCompletionAPIMessageConverter(
141
+ **kwargs
142
+ ).from_value(openai_message)
128
143
 
129
144
  # Set shortcut methods in lf.Message.
130
- lf.Message.as_openai_format = _as_openai_format
131
- lf.Message.from_openai_format = _from_openai_format
145
+ lf.Message.as_openai_chat_completion_api_format = (
146
+ _as_openai_chat_completion_api_format
147
+ )
148
+
149
+ lf.Message.from_openai_chat_completion_api_format = (
150
+ _from_openai_chat_completion_api_format
151
+ )
152
+
153
+
154
+ #
155
+ # OpenAI Responses API message converter.
156
+ #
157
+
158
+
159
+ class OpenAIResponsesAPIMessageConverter(
160
+ OpenAIChatCompletionAPIMessageConverter
161
+ ):
162
+ """Converter to OpenAI Responses API.
163
+
164
+ See https://platform.openai.com/docs/api-reference/responses/create
165
+ """
166
+
167
+ FORMAT_ID = 'openai_responses_api'
168
+
169
+ def to_value(self, message: lf.Message) -> dict[str, Any]:
170
+ """Converts a Langfun message to OpenAI API."""
171
+ message_json = super().to_value(message)
172
+ message_json['type'] = 'message'
173
+ return message_json
174
+
175
+ def chunk_to_json(
176
+ self,
177
+ message_cls: type[lf.Message],
178
+ chunk: str | lf.Modality
179
+ ) -> dict[str, Any]:
180
+ """Converts a Langfun chunk to OpenAI chunk."""
181
+ source = 'output' if issubclass(message_cls, lf.AIMessage) else 'input'
182
+
183
+ if isinstance(chunk, str):
184
+ return dict(type=f'{source}_text', text=chunk)
185
+ elif isinstance(chunk, lf_modalities.Image):
186
+ return dict(
187
+ type=f'{source}_image', image_url=chunk.embeddable_uri
188
+ )
189
+ # TODO(daiyip): Support audio_input.
190
+ else:
191
+ raise ValueError(f'Unsupported content type: {chunk!r}.')
192
+
193
+ def json_to_chunk(self, json: dict[str, Any]) -> str | lf.Modality:
194
+ """Returns a Langfun chunk from OpenAI chunk JSON."""
195
+ t = self._safe_read(json, 'type')
196
+ if t in ('input_text', 'output_text'):
197
+ return self._safe_read(json, 'text')
198
+ elif t in ('input_image', 'output_image'):
199
+ return lf_modalities.Image.from_uri(self._safe_read(json, 'image_url'))
200
+ else:
201
+ raise ValueError(f'Unsupported content type: {json!r}.')
202
+
203
+
204
+ def _as_openai_responses_api_format(
205
+ self,
206
+ chunk_preprocessor: Callable[[str | lf.Modality], Any] | None = None,
207
+ **kwargs
208
+ ) -> dict[str, Any]:
209
+ """Returns an OpenAI format message."""
210
+ return OpenAIResponsesAPIMessageConverter(
211
+ chunk_preprocessor=chunk_preprocessor, **kwargs
212
+ ).to_value(self)
213
+
214
+
215
+ @classmethod
216
+ def _from_openai_responses_api_format(
217
+ cls,
218
+ openai_message: dict[str, Any],
219
+ **kwargs
220
+ ) -> lf.Message:
221
+ """Creates a Langfun message from the OpenAI format message."""
222
+ del cls
223
+ return OpenAIResponsesAPIMessageConverter(
224
+ **kwargs
225
+ ).from_value(openai_message)
226
+
227
+
228
+ # Set shortcut methods in lf.Message.
229
+ lf.Message.as_openai_responses_api_format = (
230
+ _as_openai_responses_api_format
231
+ )
232
+
233
+ lf.Message.from_openai_responses_api_format = (
234
+ _from_openai_responses_api_format
235
+ )
@@ -30,25 +30,25 @@ image_content = (
30
30
  )
31
31
 
32
32
 
33
- class OpenAIConversionTest(unittest.TestCase):
33
+ class OpenAIChatCompletionAPIConverterTest(unittest.TestCase):
34
34
 
35
35
  def test_as_format_with_role(self):
36
36
  self.assertEqual(
37
- lf.UserMessage('hi').as_format('openai'),
37
+ lf.UserMessage('hi').as_format('openai_chat_completion_api'),
38
38
  {
39
39
  'role': 'user',
40
40
  'content': [{'type': 'text', 'text': 'hi'}],
41
41
  },
42
42
  )
43
43
  self.assertEqual(
44
- lf.AIMessage('hi').as_format('openai'),
44
+ lf.AIMessage('hi').as_format('openai_chat_completion_api'),
45
45
  {
46
46
  'role': 'assistant',
47
47
  'content': [{'type': 'text', 'text': 'hi'}],
48
48
  },
49
49
  )
50
50
  self.assertEqual(
51
- lf.SystemMessage('hi').as_format('openai'),
51
+ lf.SystemMessage('hi').as_format('openai_chat_completion_api'),
52
52
  {
53
53
  'role': 'system',
54
54
  'content': [{'type': 'text', 'text': 'hi'}],
@@ -60,7 +60,7 @@ class OpenAIConversionTest(unittest.TestCase):
60
60
  lf.Template(
61
61
  'What is this {{image}}?',
62
62
  image=lf_modalities.Image.from_bytes(image_content)
63
- ).render().as_format('openai'),
63
+ ).render().as_format('openai_chat_completion_api'),
64
64
  {
65
65
  'role': 'user',
66
66
  'content': [
@@ -90,7 +90,7 @@ class OpenAIConversionTest(unittest.TestCase):
90
90
  lf.Template(
91
91
  'What is this {{image}}?',
92
92
  image=lf_modalities.Image.from_bytes(image_content)
93
- ).render().as_openai_format(
93
+ ).render().as_openai_chat_completion_api_format(
94
94
  chunk_preprocessor=lambda x: x if isinstance(x, str) else None
95
95
  ),
96
96
  {
@@ -114,7 +114,7 @@ class OpenAIConversionTest(unittest.TestCase):
114
114
  {
115
115
  'content': 'this is a text',
116
116
  },
117
- format='openai',
117
+ format='openai_chat_completion_api',
118
118
  ),
119
119
  lf.AIMessage('this is a text'),
120
120
  )
@@ -126,7 +126,7 @@ class OpenAIConversionTest(unittest.TestCase):
126
126
  'role': 'user',
127
127
  'content': [{'type': 'text', 'text': 'hi'}],
128
128
  },
129
- format='openai',
129
+ format='openai_chat_completion_api',
130
130
  ),
131
131
  lf.UserMessage('hi'),
132
132
  )
@@ -136,7 +136,7 @@ class OpenAIConversionTest(unittest.TestCase):
136
136
  'role': 'assistant',
137
137
  'content': [{'type': 'text', 'text': 'hi'}],
138
138
  },
139
- format='openai',
139
+ format='openai_chat_completion_api',
140
140
  ),
141
141
  lf.AIMessage('hi'),
142
142
  )
@@ -146,7 +146,7 @@ class OpenAIConversionTest(unittest.TestCase):
146
146
  'role': 'system',
147
147
  'content': [{'type': 'text', 'text': 'hi'}],
148
148
  },
149
- format='openai',
149
+ format='openai_chat_completion_api',
150
150
  ),
151
151
  lf.SystemMessage('hi'),
152
152
  )
@@ -156,21 +156,165 @@ class OpenAIConversionTest(unittest.TestCase):
156
156
  'role': 'function',
157
157
  'content': [{'type': 'text', 'text': 'hi'}],
158
158
  },
159
- format='openai',
159
+ format='openai_chat_completion_api',
160
160
  )
161
161
 
162
162
  def test_from_value_with_image(self):
163
- m = lf.Message.from_openai_format(
163
+ image = lf_modalities.Image.from_bytes(image_content)
164
+ m = lf.Message.from_openai_chat_completion_api_format(
165
+ lf.Template(
166
+ 'What is this {{image}}?',
167
+ image=image
168
+ ).render().as_format('openai_chat_completion_api'),
169
+ )
170
+ self.assertEqual(m.text, f'What is this <<[[{image.id}]]>> ?')
171
+ self.assertIsInstance(m.images[0], lf_modalities.Image)
172
+ self.assertEqual(m.images[0].mime_type, 'image/png')
173
+ self.assertEqual(m.images[0].to_bytes(), image_content)
174
+
175
+
176
+ class OpenAIResponsesAPIMessageConverterTest(unittest.TestCase):
177
+
178
+ def test_as_format_with_role(self):
179
+ self.assertEqual(
180
+ lf.UserMessage('hi').as_format('openai_responses_api'),
181
+ {
182
+ 'type': 'message',
183
+ 'role': 'user',
184
+ 'content': [{'type': 'input_text', 'text': 'hi'}],
185
+ },
186
+ )
187
+ self.assertEqual(
188
+ lf.AIMessage('hi').as_format('openai_responses_api'),
189
+ {
190
+ 'type': 'message',
191
+ 'role': 'assistant',
192
+ 'content': [{'type': 'output_text', 'text': 'hi'}],
193
+ },
194
+ )
195
+ self.assertEqual(
196
+ lf.SystemMessage('hi').as_format('openai_responses_api'),
197
+ {
198
+ 'type': 'message',
199
+ 'role': 'system',
200
+ 'content': [{'type': 'input_text', 'text': 'hi'}],
201
+ },
202
+ )
203
+
204
+ def test_as_format_with_image(self):
205
+ self.assertEqual(
164
206
  lf.Template(
165
207
  'What is this {{image}}?',
166
208
  image=lf_modalities.Image.from_bytes(image_content)
167
- ).render().as_format('openai'),
209
+ ).render().as_format('openai_responses_api'),
210
+ {
211
+ 'type': 'message',
212
+ 'role': 'user',
213
+ 'content': [
214
+ {
215
+ 'type': 'input_text',
216
+ 'text': 'What is this'
217
+ },
218
+ {
219
+ 'type': 'input_image',
220
+ 'image_url': (
221
+ 'data:image/png;base64,'
222
+ + base64.b64encode(image_content).decode('utf-8')
223
+ )
224
+ },
225
+ {
226
+ 'type': 'input_text',
227
+ 'text': '?'
228
+ }
229
+ ],
230
+ },
168
231
  )
169
- self.assertEqual(m.text, 'What is this <<[[obj0]]>> ?')
170
- self.assertIsInstance(m.obj0, lf_modalities.Image)
171
- self.assertEqual(m.obj0.mime_type, 'image/png')
172
- self.assertEqual(m.obj0.to_bytes(), image_content)
173
232
 
233
+ def test_as_format_with_chunk_preprocessor(self):
234
+ self.assertEqual(
235
+ lf.Template(
236
+ 'What is this {{image}}?',
237
+ image=lf_modalities.Image.from_bytes(image_content)
238
+ ).render().as_openai_responses_api_format(
239
+ chunk_preprocessor=lambda x: x if isinstance(x, str) else None
240
+ ),
241
+ {
242
+ 'type': 'message',
243
+ 'role': 'user',
244
+ 'content': [
245
+ {
246
+ 'type': 'input_text',
247
+ 'text': 'What is this'
248
+ },
249
+ {
250
+ 'type': 'input_text',
251
+ 'text': '?'
252
+ }
253
+ ],
254
+ },
255
+ )
256
+
257
+ def test_from_value_with_simple_text(self):
258
+ self.assertEqual(
259
+ lf.Message.from_value(
260
+ {
261
+ 'content': 'this is a text',
262
+ },
263
+ format='openai_responses_api',
264
+ ),
265
+ lf.AIMessage('this is a text'),
266
+ )
267
+
268
+ def test_from_value_with_role(self):
269
+ self.assertEqual(
270
+ lf.Message.from_value(
271
+ {
272
+ 'role': 'user',
273
+ 'content': [{'type': 'input_text', 'text': 'hi'}],
274
+ },
275
+ format='openai_responses_api',
276
+ ),
277
+ lf.UserMessage('hi'),
278
+ )
279
+ self.assertEqual(
280
+ lf.Message.from_value(
281
+ {
282
+ 'role': 'assistant',
283
+ 'content': [{'type': 'output_text', 'text': 'hi'}],
284
+ },
285
+ format='openai_responses_api',
286
+ ),
287
+ lf.AIMessage('hi'),
288
+ )
289
+ self.assertEqual(
290
+ lf.Message.from_value(
291
+ {
292
+ 'role': 'system',
293
+ 'content': [{'type': 'input_text', 'text': 'hi'}],
294
+ },
295
+ format='openai_responses_api',
296
+ ),
297
+ lf.SystemMessage('hi'),
298
+ )
299
+ with self.assertRaisesRegex(ValueError, 'Unsupported role: .*'):
300
+ lf.Message.from_value(
301
+ {
302
+ 'role': 'function',
303
+ 'content': [{'type': 'input_text', 'text': 'hi'}],
304
+ },
305
+ format='openai_responses_api',
306
+ )
307
+
308
+ def test_from_value_with_image(self):
309
+ image = lf_modalities.Image.from_bytes(image_content)
310
+ m = lf.Message.from_openai_responses_api_format(
311
+ lf.Template(
312
+ 'What is this {{image}}?', image=image
313
+ ).render().as_format('openai_responses_api'),
314
+ )
315
+ self.assertEqual(m.text, f'What is this <<[[{image.id}]]>> ?')
316
+ self.assertIsInstance(m.modalities()[0], lf_modalities.Image)
317
+ self.assertEqual(m.modalities()[0].content, image_content)
174
318
 
175
319
  if __name__ == '__main__':
176
320
  unittest.main()
@@ -101,7 +101,7 @@ class EvaluationTest(unittest.TestCase):
101
101
  self.assertEqual(s.dir, os.path.join(s.root_dir, s.id))
102
102
  self.assertEqual(s.hash, s.clone().hash)
103
103
  # Test persistent hash.
104
- self.assertEqual(s.hash, 'e43392e4')
104
+ self.assertEqual(s.hash, 'ee958159')
105
105
  self.assertEqual(
106
106
  s.hash, s.clone(override={'max_workers': 2, 'lm.timeout': 20}).hash
107
107
  )
@@ -211,7 +211,7 @@ class EvaluationTest(unittest.TestCase):
211
211
  s.result,
212
212
  dict(
213
213
  experiment_setup=dict(
214
- id='Evaluation@2fbf1b05',
214
+ id='Evaluation@27a702cb',
215
215
  dir=s.dir,
216
216
  model='StaticSequence',
217
217
  prompt_template='{{example.question}}',
@@ -376,7 +376,7 @@ class EvaluationTest(unittest.TestCase):
376
376
  s.children[0].dir, os.path.join(s.root_dir, s.children[0].id)
377
377
  )
378
378
  # Test persistent hash.
379
- self.assertEqual(s.hash, 'de23bf31')
379
+ self.assertEqual(s.hash, 'f47532a7')
380
380
 
381
381
  summary = s.run(verbose=True)
382
382
  self.assertEqual(len(summary.evaluations), 2)
@@ -526,7 +526,7 @@ class SuiteTest(unittest.TestCase):
526
526
  lm=lm
527
527
  )
528
528
  # Test for persistent hash.
529
- self.assertEqual(s.hash, '1c42f93e')
529
+ self.assertEqual(s.hash, '4bd6a2f5')
530
530
  s.run()
531
531
  expected = {
532
532
  s.children[0].id: dict(
@@ -18,6 +18,7 @@ import sys
18
18
  import tempfile
19
19
  import unittest
20
20
 
21
+ from langfun.core import concurrent as lf_concurrent
21
22
  from langfun.core import console as lf_console
22
23
  from langfun.core.eval.v2 import eval_test_helper
23
24
  from langfun.core.eval.v2 import progress_tracking # pylint: disable=unused-import
@@ -51,6 +52,7 @@ class TqdmProgressTrackerTest(unittest.TestCase):
51
52
  with contextlib.redirect_stderr(string_io):
52
53
  _ = experiment.run(root_dir, 'new', plugins=[])
53
54
  sys.stderr.flush()
55
+ lf_concurrent.ProgressBar.refresh()
54
56
  self.assertIn('All: 100%', string_io.getvalue())
55
57
 
56
58
  def test_with_example_ids(self):
@@ -62,6 +64,7 @@ class TqdmProgressTrackerTest(unittest.TestCase):
62
64
  with contextlib.redirect_stderr(string_io):
63
65
  _ = experiment.run(root_dir, 'new', example_ids=[1], plugins=[])
64
66
  sys.stderr.flush()
67
+ lf_concurrent.ProgressBar.refresh()
65
68
  self.assertIn('All: 100%', string_io.getvalue())
66
69
 
67
70
 
@@ -82,7 +82,7 @@ class LangFuncCallTest(unittest.TestCase):
82
82
 
83
83
  i = l.render()
84
84
  self.assertEqual(i, 'Hello')
85
- self.assertEqual(i, message.UserMessage('Hello'))
85
+ self.assertEqual(i, message.UserMessage('Hello', __template_input__={}))
86
86
  self.assertEqual(i.tags, ['rendered'])
87
87
 
88
88
  r = l()
@@ -96,7 +96,9 @@ class LangFuncCallTest(unittest.TestCase):
96
96
  self.assertEqual(r.tags, ['lm-response', 'lm-output'])
97
97
  self.assertEqual(
98
98
  r.source,
99
- message.UserMessage('Hello', metadata=dict(cache_seed=0))
99
+ message.UserMessage(
100
+ 'Hello', metadata=dict(cache_seed=0, __template_input__={})
101
+ )
100
102
  )
101
103
  self.assertEqual(r.source.tags, ['rendered', 'lm-input'])
102
104
 
@@ -107,8 +109,8 @@ class LangFuncCallTest(unittest.TestCase):
107
109
  ' lm=ExcitedEchoer(sampling_options=LMSamplingOptions(temperature=None,'
108
110
  ' max_tokens=None, n=1, top_k=40, top_p=None, stop=None,'
109
111
  ' random_seed=None, logprobs=False, top_logprobs=None,'
110
- ' max_thinking_tokens=None, reasoning_effort=None), cache=None,'
111
- ' max_concurrency=None, timeout=120.0, max_attempts=5,'
112
+ ' max_thinking_tokens=None, reasoning_effort=None, extras={}),'
113
+ ' cache=None, max_concurrency=None, timeout=120.0, max_attempts=5,'
112
114
  ' retry_interval=(5, 60), exponential_backoff=True,'
113
115
  ' max_retry_interval=300, debug=False))',
114
116
  )
@@ -584,6 +584,15 @@ class LMSamplingOptions(component.Component):
584
584
  ),
585
585
  ] = None
586
586
 
587
+ extras: Annotated[
588
+ dict[str, Any],
589
+ (
590
+ 'Extra arguments (e.g. configuration for tool calls) to pass to '
591
+ 'the model. This is model-specific, please check model '
592
+ 'implementation to see how to use this.'
593
+ ),
594
+ ] = {}
595
+
587
596
  def cache_key(self) -> tuple[Any, ...]:
588
597
  """Returns a tuple of current values as cache key."""
589
598
  return (
@@ -1244,11 +1253,11 @@ class LanguageModel(component.Component):
1244
1253
  title=f'\n[{call_counter}] PROMPT SENT TO LM{title_suffix}:',
1245
1254
  color='green',
1246
1255
  )
1247
- referred_modalities = prompt.referred_modalities()
1248
- if referred_modalities:
1256
+ if prompt.referred_modalities:
1249
1257
  console.write(
1250
1258
  pg.object_utils.kvlist_str(
1251
- [(k, repr(v), None) for k, v in referred_modalities.items()]
1259
+ [(k, repr(v), None)
1260
+ for k, v in prompt.referred_modalities.items()]
1252
1261
  ),
1253
1262
  title=f'\n[{call_counter}] MODALITY OBJECTS SENT TO LM:',
1254
1263
  color='green',
@@ -1334,9 +1343,9 @@ class LanguageModel(component.Component):
1334
1343
  color='green',
1335
1344
  )
1336
1345
  if isinstance(prompt, list):
1337
- referred_modalities_lst = [p.referred_modalities() for p in prompt]
1346
+ referred_modalities_lst = [p.referred_modalities for p in prompt]
1338
1347
  else:
1339
- referred_modalities_lst = [prompt.referred_modalities(),]
1348
+ referred_modalities_lst = [prompt.referred_modalities,]
1340
1349
  if referred_modalities_lst:
1341
1350
  for referred_modalities in referred_modalities_lst:
1342
1351
  console.write(
@@ -1411,7 +1420,7 @@ class LanguageModel(component.Component):
1411
1420
  title=f'\n[{call_counter}] PROMPT TO TOKENIZE:',
1412
1421
  color='green',
1413
1422
  )
1414
- referred_modalities_lst = [prompt.referred_modalities(),]
1423
+ referred_modalities_lst = [prompt.referred_modalities,]
1415
1424
  if referred_modalities_lst:
1416
1425
  for referred_modalities in referred_modalities_lst:
1417
1426
  console.write(