langfun 0.1.2.dev202509120804__py3-none-any.whl → 0.1.2.dev202512150805__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (162) hide show
  1. langfun/__init__.py +1 -1
  2. langfun/core/__init__.py +7 -1
  3. langfun/core/agentic/__init__.py +8 -1
  4. langfun/core/agentic/action.py +740 -112
  5. langfun/core/agentic/action_eval.py +9 -2
  6. langfun/core/agentic/action_test.py +189 -24
  7. langfun/core/async_support.py +104 -5
  8. langfun/core/async_support_test.py +23 -0
  9. langfun/core/coding/python/correction.py +19 -9
  10. langfun/core/coding/python/execution.py +14 -12
  11. langfun/core/coding/python/generation.py +21 -16
  12. langfun/core/coding/python/sandboxing.py +23 -3
  13. langfun/core/component.py +42 -3
  14. langfun/core/concurrent.py +70 -6
  15. langfun/core/concurrent_test.py +9 -2
  16. langfun/core/console.py +1 -1
  17. langfun/core/data/conversion/anthropic.py +12 -3
  18. langfun/core/data/conversion/anthropic_test.py +8 -6
  19. langfun/core/data/conversion/gemini.py +11 -2
  20. langfun/core/data/conversion/gemini_test.py +48 -9
  21. langfun/core/data/conversion/openai.py +145 -31
  22. langfun/core/data/conversion/openai_test.py +161 -17
  23. langfun/core/eval/base.py +48 -44
  24. langfun/core/eval/base_test.py +5 -5
  25. langfun/core/eval/matching.py +5 -2
  26. langfun/core/eval/patching.py +3 -3
  27. langfun/core/eval/scoring.py +4 -3
  28. langfun/core/eval/v2/__init__.py +3 -0
  29. langfun/core/eval/v2/checkpointing.py +148 -46
  30. langfun/core/eval/v2/checkpointing_test.py +9 -2
  31. langfun/core/eval/v2/config_saver.py +37 -0
  32. langfun/core/eval/v2/config_saver_test.py +36 -0
  33. langfun/core/eval/v2/eval_test_helper.py +104 -3
  34. langfun/core/eval/v2/evaluation.py +102 -19
  35. langfun/core/eval/v2/evaluation_test.py +9 -3
  36. langfun/core/eval/v2/example.py +50 -40
  37. langfun/core/eval/v2/example_test.py +16 -8
  38. langfun/core/eval/v2/experiment.py +95 -20
  39. langfun/core/eval/v2/experiment_test.py +19 -0
  40. langfun/core/eval/v2/metric_values.py +31 -3
  41. langfun/core/eval/v2/metric_values_test.py +32 -0
  42. langfun/core/eval/v2/metrics.py +157 -44
  43. langfun/core/eval/v2/metrics_test.py +39 -18
  44. langfun/core/eval/v2/progress.py +31 -1
  45. langfun/core/eval/v2/progress_test.py +27 -0
  46. langfun/core/eval/v2/progress_tracking.py +13 -5
  47. langfun/core/eval/v2/progress_tracking_test.py +9 -1
  48. langfun/core/eval/v2/reporting.py +88 -71
  49. langfun/core/eval/v2/reporting_test.py +24 -6
  50. langfun/core/eval/v2/runners/__init__.py +30 -0
  51. langfun/core/eval/v2/{runners.py → runners/base.py} +73 -180
  52. langfun/core/eval/v2/runners/beam.py +354 -0
  53. langfun/core/eval/v2/runners/beam_test.py +153 -0
  54. langfun/core/eval/v2/runners/ckpt_monitor.py +350 -0
  55. langfun/core/eval/v2/runners/ckpt_monitor_test.py +213 -0
  56. langfun/core/eval/v2/runners/debug.py +40 -0
  57. langfun/core/eval/v2/runners/debug_test.py +76 -0
  58. langfun/core/eval/v2/runners/parallel.py +243 -0
  59. langfun/core/eval/v2/runners/parallel_test.py +182 -0
  60. langfun/core/eval/v2/runners/sequential.py +47 -0
  61. langfun/core/eval/v2/runners/sequential_test.py +169 -0
  62. langfun/core/langfunc.py +45 -130
  63. langfun/core/langfunc_test.py +7 -5
  64. langfun/core/language_model.py +189 -36
  65. langfun/core/language_model_test.py +54 -3
  66. langfun/core/llms/__init__.py +14 -1
  67. langfun/core/llms/anthropic.py +157 -2
  68. langfun/core/llms/azure_openai.py +29 -17
  69. langfun/core/llms/cache/base.py +25 -3
  70. langfun/core/llms/cache/in_memory.py +48 -7
  71. langfun/core/llms/cache/in_memory_test.py +14 -4
  72. langfun/core/llms/compositional.py +25 -1
  73. langfun/core/llms/deepseek.py +30 -2
  74. langfun/core/llms/fake.py +32 -1
  75. langfun/core/llms/gemini.py +90 -12
  76. langfun/core/llms/gemini_test.py +110 -0
  77. langfun/core/llms/google_genai.py +52 -1
  78. langfun/core/llms/groq.py +28 -3
  79. langfun/core/llms/llama_cpp.py +23 -4
  80. langfun/core/llms/openai.py +120 -3
  81. langfun/core/llms/openai_compatible.py +148 -27
  82. langfun/core/llms/openai_compatible_test.py +207 -20
  83. langfun/core/llms/openai_test.py +0 -2
  84. langfun/core/llms/rest.py +16 -1
  85. langfun/core/llms/vertexai.py +78 -8
  86. langfun/core/logging.py +1 -1
  87. langfun/core/mcp/__init__.py +10 -0
  88. langfun/core/mcp/client.py +177 -0
  89. langfun/core/mcp/client_test.py +71 -0
  90. langfun/core/mcp/session.py +241 -0
  91. langfun/core/mcp/session_test.py +54 -0
  92. langfun/core/mcp/testing/simple_mcp_client.py +33 -0
  93. langfun/core/mcp/testing/simple_mcp_server.py +33 -0
  94. langfun/core/mcp/tool.py +254 -0
  95. langfun/core/mcp/tool_test.py +197 -0
  96. langfun/core/memory.py +1 -0
  97. langfun/core/message.py +160 -55
  98. langfun/core/message_test.py +65 -81
  99. langfun/core/modalities/__init__.py +8 -0
  100. langfun/core/modalities/audio.py +21 -1
  101. langfun/core/modalities/image.py +73 -3
  102. langfun/core/modalities/image_test.py +116 -0
  103. langfun/core/modalities/mime.py +78 -4
  104. langfun/core/modalities/mime_test.py +59 -0
  105. langfun/core/modalities/pdf.py +19 -1
  106. langfun/core/modalities/video.py +21 -1
  107. langfun/core/modality.py +167 -29
  108. langfun/core/modality_test.py +42 -12
  109. langfun/core/natural_language.py +1 -1
  110. langfun/core/sampling.py +4 -4
  111. langfun/core/sampling_test.py +20 -4
  112. langfun/core/structured/__init__.py +2 -24
  113. langfun/core/structured/completion.py +34 -44
  114. langfun/core/structured/completion_test.py +23 -43
  115. langfun/core/structured/description.py +54 -50
  116. langfun/core/structured/function_generation.py +29 -12
  117. langfun/core/structured/mapping.py +81 -37
  118. langfun/core/structured/parsing.py +95 -79
  119. langfun/core/structured/parsing_test.py +0 -3
  120. langfun/core/structured/querying.py +230 -154
  121. langfun/core/structured/querying_test.py +69 -33
  122. langfun/core/structured/schema/__init__.py +49 -0
  123. langfun/core/structured/schema/base.py +664 -0
  124. langfun/core/structured/schema/base_test.py +531 -0
  125. langfun/core/structured/schema/json.py +174 -0
  126. langfun/core/structured/schema/json_test.py +121 -0
  127. langfun/core/structured/schema/python.py +316 -0
  128. langfun/core/structured/schema/python_test.py +410 -0
  129. langfun/core/structured/schema_generation.py +33 -14
  130. langfun/core/structured/scoring.py +47 -36
  131. langfun/core/structured/tokenization.py +26 -11
  132. langfun/core/subscription.py +2 -2
  133. langfun/core/template.py +175 -50
  134. langfun/core/template_test.py +123 -17
  135. langfun/env/__init__.py +43 -0
  136. langfun/env/base_environment.py +827 -0
  137. langfun/env/base_environment_test.py +473 -0
  138. langfun/env/base_feature.py +304 -0
  139. langfun/env/base_feature_test.py +228 -0
  140. langfun/env/base_sandbox.py +842 -0
  141. langfun/env/base_sandbox_test.py +1235 -0
  142. langfun/env/event_handlers/__init__.py +14 -0
  143. langfun/env/event_handlers/chain.py +233 -0
  144. langfun/env/event_handlers/chain_test.py +253 -0
  145. langfun/env/event_handlers/event_logger.py +472 -0
  146. langfun/env/event_handlers/event_logger_test.py +304 -0
  147. langfun/env/event_handlers/metric_writer.py +726 -0
  148. langfun/env/event_handlers/metric_writer_test.py +214 -0
  149. langfun/env/interface.py +1640 -0
  150. langfun/env/interface_test.py +153 -0
  151. langfun/env/load_balancers.py +59 -0
  152. langfun/env/load_balancers_test.py +141 -0
  153. langfun/env/test_utils.py +507 -0
  154. {langfun-0.1.2.dev202509120804.dist-info → langfun-0.1.2.dev202512150805.dist-info}/METADATA +7 -3
  155. langfun-0.1.2.dev202512150805.dist-info/RECORD +217 -0
  156. langfun/core/eval/v2/runners_test.py +0 -343
  157. langfun/core/structured/schema.py +0 -987
  158. langfun/core/structured/schema_test.py +0 -982
  159. langfun-0.1.2.dev202509120804.dist-info/RECORD +0 -172
  160. {langfun-0.1.2.dev202509120804.dist-info → langfun-0.1.2.dev202512150805.dist-info}/WHEEL +0 -0
  161. {langfun-0.1.2.dev202509120804.dist-info → langfun-0.1.2.dev202512150805.dist-info}/licenses/LICENSE +0 -0
  162. {langfun-0.1.2.dev202509120804.dist-info → langfun-0.1.2.dev202512150805.dist-info}/top_level.txt +0 -0
@@ -19,17 +19,25 @@ import langfun.core as lf
19
19
  from langfun.core import modalities as lf_modalities
20
20
 
21
21
 
22
- class OpenAIMessageConverter(lf.MessageConverter):
23
- """Converter to OpenAI API."""
22
+ class OpenAIChatCompletionAPIMessageConverter(lf.MessageConverter):
23
+ """Converter for OpenAI Chat Completion API.
24
24
 
25
- FORMAT_ID = 'openai'
25
+ This converter translates `lf.Message` objects into the JSON format
26
+ required by the OpenAI Chat Completions API
27
+ (https://platform.openai.com/docs/api-reference/chat) and vice versa.
28
+ It handles text and image modalities, mapping Langfun roles to OpenAI
29
+ roles ('system', 'user', 'assistant'). An optional `chunk_preprocessor`
30
+ can be provided to modify or filter chunks before conversion.
31
+ """
32
+
33
+ FORMAT_ID = 'openai_chat_completion_api'
26
34
 
27
35
  chunk_preprocessor: Annotated[
28
36
  Callable[[str | lf.Modality], Any] | None,
29
37
  (
30
38
  'Chunk preprocessor for Langfun chunk to OpenAI chunk conversion. '
31
39
  'It will be applied before each Langfun chunk is converted. '
32
- 'If returns None, the chunk will be skipped.'
40
+ 'If it returns None, the chunk will be skipped.'
33
41
  )
34
42
  ] = None
35
43
 
@@ -41,22 +49,29 @@ class OpenAIMessageConverter(lf.MessageConverter):
41
49
  chunk = self.chunk_preprocessor(chunk)
42
50
  if chunk is None:
43
51
  continue
44
-
45
- if isinstance(chunk, str):
46
- item = dict(type='text', text=chunk)
47
- elif isinstance(chunk, lf_modalities.Image):
48
- item = dict(
49
- type='image_url', image_url=dict(url=chunk.embeddable_uri)
50
- )
51
- # TODO(daiyip): Support audio_input.
52
- else:
53
- raise ValueError(f'Unsupported content type: {chunk!r}.')
54
- parts.append(item)
52
+ parts.append(self.chunk_to_json(type(message), chunk))
55
53
  return dict(
56
54
  role=self.get_role(message),
57
55
  content=parts,
58
56
  )
59
57
 
58
+ def chunk_to_json(
59
+ self,
60
+ message_cls: type[lf.Message],
61
+ chunk: str | lf.Modality
62
+ ) -> dict[str, Any]:
63
+ """Converts a Langfun chunk to OpenAI chunk."""
64
+ del message_cls
65
+ if isinstance(chunk, str):
66
+ return dict(type='text', text=chunk)
67
+ elif isinstance(chunk, lf_modalities.Image):
68
+ return dict(
69
+ type='image_url', image_url=dict(url=chunk.embeddable_uri)
70
+ )
71
+ # TODO(daiyip): Support audio_input.
72
+ else:
73
+ raise ValueError(f'Unsupported content type: {chunk!r}.')
74
+
60
75
  def get_role(self, message: lf.Message) -> str:
61
76
  """Returns the role of the message."""
62
77
  if isinstance(message, lf.SystemMessage):
@@ -92,40 +107,139 @@ class OpenAIMessageConverter(lf.MessageConverter):
92
107
  assert isinstance(content, list)
93
108
  chunks = []
94
109
  for item in content:
95
- t = self._safe_read(item, 'type')
96
- if t == 'text':
97
- chunk = self._safe_read(item, 'text')
98
- elif t == 'image_url':
99
- chunk = lf_modalities.Image.from_uri(
100
- self._safe_read(self._safe_read(item, 'image_url'), 'url')
101
- )
102
- else:
103
- raise ValueError(f'Unsupported content type: {item!r}.')
104
- chunks.append(chunk)
110
+ chunks.append(self.json_to_chunk(item))
105
111
  return message_cls.from_chunks(chunks)
106
112
 
113
+ def json_to_chunk(self, json: dict[str, Any]) -> str | lf.Modality:
114
+ """Returns a Langfun chunk from OpenAI chunk JSON."""
115
+ t = self._safe_read(json, 'type')
116
+ if t == 'text':
117
+ return self._safe_read(json, 'text')
118
+ elif t == 'image_url':
119
+ return lf_modalities.Image.from_uri(
120
+ self._safe_read(self._safe_read(json, 'image_url'), 'url')
121
+ )
122
+ else:
123
+ raise ValueError(f'Unsupported content type: {json!r}.')
124
+
107
125
 
108
- def _as_openai_format(
126
+ def _as_openai_chat_completion_api_format(
109
127
  self,
110
128
  chunk_preprocessor: Callable[[str | lf.Modality], Any] | None = None,
111
129
  **kwargs
112
130
  ) -> dict[str, Any]:
113
131
  """Returns an OpenAI format message."""
114
- return OpenAIMessageConverter(
132
+ return OpenAIChatCompletionAPIMessageConverter(
115
133
  chunk_preprocessor=chunk_preprocessor, **kwargs
116
134
  ).to_value(self)
117
135
 
118
136
 
119
137
  @classmethod
120
- def _from_openai_format(
138
+ def _from_openai_chat_completion_api_format(
121
139
  cls,
122
140
  openai_message: dict[str, Any],
123
141
  **kwargs
124
142
  ) -> lf.Message:
125
143
  """Creates a Langfun message from the OpenAI format message."""
126
144
  del cls
127
- return OpenAIMessageConverter(**kwargs).from_value(openai_message)
145
+ return OpenAIChatCompletionAPIMessageConverter(
146
+ **kwargs
147
+ ).from_value(openai_message)
128
148
 
129
149
  # Set shortcut methods in lf.Message.
130
- lf.Message.as_openai_format = _as_openai_format
131
- lf.Message.from_openai_format = _from_openai_format
150
+ lf.Message.as_openai_chat_completion_api_format = (
151
+ _as_openai_chat_completion_api_format
152
+ )
153
+
154
+ lf.Message.from_openai_chat_completion_api_format = (
155
+ _from_openai_chat_completion_api_format
156
+ )
157
+
158
+
159
+ #
160
+ # OpenAI Responses API message converter.
161
+ #
162
+
163
+
164
+ class OpenAIResponsesAPIMessageConverter(
165
+ OpenAIChatCompletionAPIMessageConverter
166
+ ):
167
+ """Converter for OpenAI Responses API.
168
+
169
+ This converter translates `lf.Message` objects into the JSON format
170
+ required by the OpenAI Responses API
171
+ (https://platform.openai.com/docs/api-reference/responses/create),
172
+ which is used for human-in-the-loop rating, and vice versa.
173
+ It extends `OpenAIChatCompletionAPIMessageConverter` but uses different
174
+ type names for content chunks (e.g., 'input_text', 'output_image').
175
+ """
176
+
177
+ FORMAT_ID = 'openai_responses_api'
178
+
179
+ def to_value(self, message: lf.Message) -> dict[str, Any]:
180
+ """Converts a Langfun message to OpenAI API."""
181
+ message_json = super().to_value(message)
182
+ message_json['type'] = 'message'
183
+ return message_json
184
+
185
+ def chunk_to_json(
186
+ self,
187
+ message_cls: type[lf.Message],
188
+ chunk: str | lf.Modality
189
+ ) -> dict[str, Any]:
190
+ """Converts a Langfun chunk to OpenAI chunk."""
191
+ source = 'output' if issubclass(message_cls, lf.AIMessage) else 'input'
192
+
193
+ if isinstance(chunk, str):
194
+ return dict(type=f'{source}_text', text=chunk)
195
+ elif isinstance(chunk, lf_modalities.Image):
196
+ return dict(
197
+ type=f'{source}_image', image_url=chunk.embeddable_uri
198
+ )
199
+ # TODO(daiyip): Support audio_input.
200
+ else:
201
+ raise ValueError(f'Unsupported content type: {chunk!r}.')
202
+
203
+ def json_to_chunk(self, json: dict[str, Any]) -> str | lf.Modality:
204
+ """Returns a Langfun chunk from OpenAI chunk JSON."""
205
+ t = self._safe_read(json, 'type')
206
+ if t in ('input_text', 'output_text'):
207
+ return self._safe_read(json, 'text')
208
+ elif t in ('input_image', 'output_image'):
209
+ return lf_modalities.Image.from_uri(self._safe_read(json, 'image_url'))
210
+ else:
211
+ raise ValueError(f'Unsupported content type: {json!r}.')
212
+
213
+
214
+ def _as_openai_responses_api_format(
215
+ self,
216
+ chunk_preprocessor: Callable[[str | lf.Modality], Any] | None = None,
217
+ **kwargs
218
+ ) -> dict[str, Any]:
219
+ """Returns an OpenAI format message."""
220
+ return OpenAIResponsesAPIMessageConverter(
221
+ chunk_preprocessor=chunk_preprocessor, **kwargs
222
+ ).to_value(self)
223
+
224
+
225
+ @classmethod
226
+ def _from_openai_responses_api_format(
227
+ cls,
228
+ openai_message: dict[str, Any],
229
+ **kwargs
230
+ ) -> lf.Message:
231
+ """Creates a Langfun message from the OpenAI format message."""
232
+ del cls
233
+ return OpenAIResponsesAPIMessageConverter(
234
+ **kwargs
235
+ ).from_value(openai_message)
236
+
237
+
238
+ # Set shortcut methods in lf.Message.
239
+ lf.Message.as_openai_responses_api_format = (
240
+ _as_openai_responses_api_format
241
+ )
242
+
243
+ lf.Message.from_openai_responses_api_format = (
244
+ _from_openai_responses_api_format
245
+ )
@@ -30,25 +30,25 @@ image_content = (
30
30
  )
31
31
 
32
32
 
33
- class OpenAIConversionTest(unittest.TestCase):
33
+ class OpenAIChatCompletionAPIConverterTest(unittest.TestCase):
34
34
 
35
35
  def test_as_format_with_role(self):
36
36
  self.assertEqual(
37
- lf.UserMessage('hi').as_format('openai'),
37
+ lf.UserMessage('hi').as_format('openai_chat_completion_api'),
38
38
  {
39
39
  'role': 'user',
40
40
  'content': [{'type': 'text', 'text': 'hi'}],
41
41
  },
42
42
  )
43
43
  self.assertEqual(
44
- lf.AIMessage('hi').as_format('openai'),
44
+ lf.AIMessage('hi').as_format('openai_chat_completion_api'),
45
45
  {
46
46
  'role': 'assistant',
47
47
  'content': [{'type': 'text', 'text': 'hi'}],
48
48
  },
49
49
  )
50
50
  self.assertEqual(
51
- lf.SystemMessage('hi').as_format('openai'),
51
+ lf.SystemMessage('hi').as_format('openai_chat_completion_api'),
52
52
  {
53
53
  'role': 'system',
54
54
  'content': [{'type': 'text', 'text': 'hi'}],
@@ -60,7 +60,7 @@ class OpenAIConversionTest(unittest.TestCase):
60
60
  lf.Template(
61
61
  'What is this {{image}}?',
62
62
  image=lf_modalities.Image.from_bytes(image_content)
63
- ).render().as_format('openai'),
63
+ ).render().as_format('openai_chat_completion_api'),
64
64
  {
65
65
  'role': 'user',
66
66
  'content': [
@@ -90,7 +90,7 @@ class OpenAIConversionTest(unittest.TestCase):
90
90
  lf.Template(
91
91
  'What is this {{image}}?',
92
92
  image=lf_modalities.Image.from_bytes(image_content)
93
- ).render().as_openai_format(
93
+ ).render().as_openai_chat_completion_api_format(
94
94
  chunk_preprocessor=lambda x: x if isinstance(x, str) else None
95
95
  ),
96
96
  {
@@ -114,7 +114,7 @@ class OpenAIConversionTest(unittest.TestCase):
114
114
  {
115
115
  'content': 'this is a text',
116
116
  },
117
- format='openai',
117
+ format='openai_chat_completion_api',
118
118
  ),
119
119
  lf.AIMessage('this is a text'),
120
120
  )
@@ -126,7 +126,7 @@ class OpenAIConversionTest(unittest.TestCase):
126
126
  'role': 'user',
127
127
  'content': [{'type': 'text', 'text': 'hi'}],
128
128
  },
129
- format='openai',
129
+ format='openai_chat_completion_api',
130
130
  ),
131
131
  lf.UserMessage('hi'),
132
132
  )
@@ -136,7 +136,7 @@ class OpenAIConversionTest(unittest.TestCase):
136
136
  'role': 'assistant',
137
137
  'content': [{'type': 'text', 'text': 'hi'}],
138
138
  },
139
- format='openai',
139
+ format='openai_chat_completion_api',
140
140
  ),
141
141
  lf.AIMessage('hi'),
142
142
  )
@@ -146,7 +146,7 @@ class OpenAIConversionTest(unittest.TestCase):
146
146
  'role': 'system',
147
147
  'content': [{'type': 'text', 'text': 'hi'}],
148
148
  },
149
- format='openai',
149
+ format='openai_chat_completion_api',
150
150
  ),
151
151
  lf.SystemMessage('hi'),
152
152
  )
@@ -156,21 +156,165 @@ class OpenAIConversionTest(unittest.TestCase):
156
156
  'role': 'function',
157
157
  'content': [{'type': 'text', 'text': 'hi'}],
158
158
  },
159
- format='openai',
159
+ format='openai_chat_completion_api',
160
160
  )
161
161
 
162
162
  def test_from_value_with_image(self):
163
- m = lf.Message.from_openai_format(
163
+ image = lf_modalities.Image.from_bytes(image_content)
164
+ m = lf.Message.from_openai_chat_completion_api_format(
165
+ lf.Template(
166
+ 'What is this {{image}}?',
167
+ image=image
168
+ ).render().as_format('openai_chat_completion_api'),
169
+ )
170
+ self.assertEqual(m.text, f'What is this <<[[{image.id}]]>> ?')
171
+ self.assertIsInstance(m.images[0], lf_modalities.Image)
172
+ self.assertEqual(m.images[0].mime_type, 'image/png')
173
+ self.assertEqual(m.images[0].to_bytes(), image_content)
174
+
175
+
176
+ class OpenAIResponsesAPIMessageConverterTest(unittest.TestCase):
177
+
178
+ def test_as_format_with_role(self):
179
+ self.assertEqual(
180
+ lf.UserMessage('hi').as_format('openai_responses_api'),
181
+ {
182
+ 'type': 'message',
183
+ 'role': 'user',
184
+ 'content': [{'type': 'input_text', 'text': 'hi'}],
185
+ },
186
+ )
187
+ self.assertEqual(
188
+ lf.AIMessage('hi').as_format('openai_responses_api'),
189
+ {
190
+ 'type': 'message',
191
+ 'role': 'assistant',
192
+ 'content': [{'type': 'output_text', 'text': 'hi'}],
193
+ },
194
+ )
195
+ self.assertEqual(
196
+ lf.SystemMessage('hi').as_format('openai_responses_api'),
197
+ {
198
+ 'type': 'message',
199
+ 'role': 'system',
200
+ 'content': [{'type': 'input_text', 'text': 'hi'}],
201
+ },
202
+ )
203
+
204
+ def test_as_format_with_image(self):
205
+ self.assertEqual(
164
206
  lf.Template(
165
207
  'What is this {{image}}?',
166
208
  image=lf_modalities.Image.from_bytes(image_content)
167
- ).render().as_format('openai'),
209
+ ).render().as_format('openai_responses_api'),
210
+ {
211
+ 'type': 'message',
212
+ 'role': 'user',
213
+ 'content': [
214
+ {
215
+ 'type': 'input_text',
216
+ 'text': 'What is this'
217
+ },
218
+ {
219
+ 'type': 'input_image',
220
+ 'image_url': (
221
+ 'data:image/png;base64,'
222
+ + base64.b64encode(image_content).decode('utf-8')
223
+ )
224
+ },
225
+ {
226
+ 'type': 'input_text',
227
+ 'text': '?'
228
+ }
229
+ ],
230
+ },
168
231
  )
169
- self.assertEqual(m.text, 'What is this <<[[obj0]]>> ?')
170
- self.assertIsInstance(m.obj0, lf_modalities.Image)
171
- self.assertEqual(m.obj0.mime_type, 'image/png')
172
- self.assertEqual(m.obj0.to_bytes(), image_content)
173
232
 
233
+ def test_as_format_with_chunk_preprocessor(self):
234
+ self.assertEqual(
235
+ lf.Template(
236
+ 'What is this {{image}}?',
237
+ image=lf_modalities.Image.from_bytes(image_content)
238
+ ).render().as_openai_responses_api_format(
239
+ chunk_preprocessor=lambda x: x if isinstance(x, str) else None
240
+ ),
241
+ {
242
+ 'type': 'message',
243
+ 'role': 'user',
244
+ 'content': [
245
+ {
246
+ 'type': 'input_text',
247
+ 'text': 'What is this'
248
+ },
249
+ {
250
+ 'type': 'input_text',
251
+ 'text': '?'
252
+ }
253
+ ],
254
+ },
255
+ )
256
+
257
+ def test_from_value_with_simple_text(self):
258
+ self.assertEqual(
259
+ lf.Message.from_value(
260
+ {
261
+ 'content': 'this is a text',
262
+ },
263
+ format='openai_responses_api',
264
+ ),
265
+ lf.AIMessage('this is a text'),
266
+ )
267
+
268
+ def test_from_value_with_role(self):
269
+ self.assertEqual(
270
+ lf.Message.from_value(
271
+ {
272
+ 'role': 'user',
273
+ 'content': [{'type': 'input_text', 'text': 'hi'}],
274
+ },
275
+ format='openai_responses_api',
276
+ ),
277
+ lf.UserMessage('hi'),
278
+ )
279
+ self.assertEqual(
280
+ lf.Message.from_value(
281
+ {
282
+ 'role': 'assistant',
283
+ 'content': [{'type': 'output_text', 'text': 'hi'}],
284
+ },
285
+ format='openai_responses_api',
286
+ ),
287
+ lf.AIMessage('hi'),
288
+ )
289
+ self.assertEqual(
290
+ lf.Message.from_value(
291
+ {
292
+ 'role': 'system',
293
+ 'content': [{'type': 'input_text', 'text': 'hi'}],
294
+ },
295
+ format='openai_responses_api',
296
+ ),
297
+ lf.SystemMessage('hi'),
298
+ )
299
+ with self.assertRaisesRegex(ValueError, 'Unsupported role: .*'):
300
+ lf.Message.from_value(
301
+ {
302
+ 'role': 'function',
303
+ 'content': [{'type': 'input_text', 'text': 'hi'}],
304
+ },
305
+ format='openai_responses_api',
306
+ )
307
+
308
+ def test_from_value_with_image(self):
309
+ image = lf_modalities.Image.from_bytes(image_content)
310
+ m = lf.Message.from_openai_responses_api_format(
311
+ lf.Template(
312
+ 'What is this {{image}}?', image=image
313
+ ).render().as_format('openai_responses_api'),
314
+ )
315
+ self.assertEqual(m.text, f'What is this <<[[{image.id}]]>> ?')
316
+ self.assertIsInstance(m.modalities()[0], lf_modalities.Image)
317
+ self.assertEqual(m.modalities()[0].content, image_content)
174
318
 
175
319
  if __name__ == '__main__':
176
320
  unittest.main()