langfun 0.1.2.dev202510240805__py3-none-any.whl → 0.1.2.dev202510250803__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langfun might be problematic. Click here for more details.
- langfun/core/concurrent_test.py +1 -0
- langfun/core/data/conversion/anthropic_test.py +8 -6
- langfun/core/data/conversion/gemini_test.py +12 -9
- langfun/core/data/conversion/openai.py +134 -30
- langfun/core/data/conversion/openai_test.py +161 -17
- langfun/core/eval/v2/progress_tracking_test.py +3 -0
- langfun/core/langfunc_test.py +4 -2
- langfun/core/language_model.py +6 -6
- langfun/core/language_model_test.py +9 -3
- langfun/core/llms/__init__.py +2 -1
- langfun/core/llms/cache/base.py +3 -1
- langfun/core/llms/cache/in_memory_test.py +14 -4
- langfun/core/llms/deepseek.py +1 -1
- langfun/core/llms/groq.py +1 -1
- langfun/core/llms/llama_cpp.py +1 -1
- langfun/core/llms/openai.py +7 -2
- langfun/core/llms/openai_compatible.py +134 -27
- langfun/core/llms/openai_compatible_test.py +207 -20
- langfun/core/llms/openai_test.py +0 -2
- langfun/core/llms/vertexai.py +2 -2
- langfun/core/message.py +78 -44
- langfun/core/message_test.py +56 -81
- langfun/core/modalities/__init__.py +8 -0
- langfun/core/modalities/mime.py +9 -0
- langfun/core/modality.py +104 -27
- langfun/core/modality_test.py +42 -12
- langfun/core/sampling_test.py +20 -4
- langfun/core/structured/completion.py +2 -7
- langfun/core/structured/completion_test.py +23 -43
- langfun/core/structured/mapping.py +4 -13
- langfun/core/structured/querying.py +13 -11
- langfun/core/structured/querying_test.py +65 -29
- langfun/core/template.py +39 -13
- langfun/core/template_test.py +83 -17
- langfun/env/event_handlers/metric_writer_test.py +3 -3
- langfun/env/load_balancers_test.py +2 -2
- {langfun-0.1.2.dev202510240805.dist-info → langfun-0.1.2.dev202510250803.dist-info}/METADATA +1 -1
- {langfun-0.1.2.dev202510240805.dist-info → langfun-0.1.2.dev202510250803.dist-info}/RECORD +41 -41
- {langfun-0.1.2.dev202510240805.dist-info → langfun-0.1.2.dev202510250803.dist-info}/WHEEL +0 -0
- {langfun-0.1.2.dev202510240805.dist-info → langfun-0.1.2.dev202510250803.dist-info}/licenses/LICENSE +0 -0
- {langfun-0.1.2.dev202510240805.dist-info → langfun-0.1.2.dev202510250803.dist-info}/top_level.txt +0 -0
langfun/core/concurrent_test.py
CHANGED
|
@@ -253,14 +253,16 @@ class AnthropicConversionTest(unittest.TestCase):
|
|
|
253
253
|
)
|
|
254
254
|
self.assertEqual(
|
|
255
255
|
m.text,
|
|
256
|
-
'What are the common words from <<[[
|
|
256
|
+
'What are the common words from <<[[image:dc6e1e43]]>> and'
|
|
257
|
+
' <<[[pdf:5daf5f31]]>> ?'
|
|
257
258
|
)
|
|
258
|
-
|
|
259
|
-
self.
|
|
260
|
-
self.assertEqual(
|
|
259
|
+
modalities = m.modalities()
|
|
260
|
+
self.assertIsInstance(modalities[0], lf_modalities.Image)
|
|
261
|
+
self.assertEqual(modalities[0].mime_type, 'image/png')
|
|
262
|
+
self.assertEqual(modalities[0].content, image_content)
|
|
261
263
|
|
|
262
|
-
self.assertIsInstance(
|
|
263
|
-
self.assertEqual(
|
|
264
|
+
self.assertIsInstance(modalities[1], lf_modalities.PDF)
|
|
265
|
+
self.assertEqual(modalities[1].content, pdf_content)
|
|
264
266
|
|
|
265
267
|
|
|
266
268
|
if __name__ == '__main__':
|
|
@@ -225,19 +225,22 @@ class GeminiConversionTest(unittest.TestCase):
|
|
|
225
225
|
self.assertEqual(
|
|
226
226
|
m.text,
|
|
227
227
|
(
|
|
228
|
-
'What are the common words from <<[[
|
|
229
|
-
'and <<[[
|
|
228
|
+
'What are the common words from <<[[image:dc6e1e43]]>> , '
|
|
229
|
+
'<<[[pdf:4dc12e93]]>> and <<[[video:7e169565]]>> ?'
|
|
230
230
|
)
|
|
231
231
|
)
|
|
232
|
-
self.assertIsInstance(m.
|
|
233
|
-
self.assertEqual(m.
|
|
234
|
-
self.assertEqual(m.
|
|
232
|
+
self.assertIsInstance(m.modalities()[0], lf_modalities.Image)
|
|
233
|
+
self.assertEqual(m.modalities()[0].mime_type, 'image/png')
|
|
234
|
+
self.assertEqual(m.modalities()[0].to_bytes(), image_content)
|
|
235
235
|
|
|
236
|
-
self.assertIsInstance(m.
|
|
237
|
-
self.assertEqual(m.
|
|
236
|
+
self.assertIsInstance(m.modalities()[1], lf_modalities.PDF)
|
|
237
|
+
self.assertEqual(m.modalities()[1].uri, 'https://my.pdf')
|
|
238
238
|
|
|
239
|
-
self.assertIsInstance(m.
|
|
240
|
-
self.assertEqual(
|
|
239
|
+
self.assertIsInstance(m.modalities()[2], lf_modalities.Video)
|
|
240
|
+
self.assertEqual(
|
|
241
|
+
m.modalities()[2].uri,
|
|
242
|
+
'https://www.youtube.com/watch?v=abcd'
|
|
243
|
+
)
|
|
241
244
|
|
|
242
245
|
|
|
243
246
|
if __name__ == '__main__':
|
|
@@ -19,10 +19,13 @@ import langfun.core as lf
|
|
|
19
19
|
from langfun.core import modalities as lf_modalities
|
|
20
20
|
|
|
21
21
|
|
|
22
|
-
class
|
|
23
|
-
"""Converter to OpenAI API.
|
|
22
|
+
class OpenAIChatCompletionAPIMessageConverter(lf.MessageConverter):
|
|
23
|
+
"""Converter to OpenAI ChatCompletion API.
|
|
24
24
|
|
|
25
|
-
|
|
25
|
+
See https://platform.openai.com/docs/api-reference/chat
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
FORMAT_ID = 'openai_chat_completion_api'
|
|
26
29
|
|
|
27
30
|
chunk_preprocessor: Annotated[
|
|
28
31
|
Callable[[str | lf.Modality], Any] | None,
|
|
@@ -41,22 +44,29 @@ class OpenAIMessageConverter(lf.MessageConverter):
|
|
|
41
44
|
chunk = self.chunk_preprocessor(chunk)
|
|
42
45
|
if chunk is None:
|
|
43
46
|
continue
|
|
44
|
-
|
|
45
|
-
if isinstance(chunk, str):
|
|
46
|
-
item = dict(type='text', text=chunk)
|
|
47
|
-
elif isinstance(chunk, lf_modalities.Image):
|
|
48
|
-
item = dict(
|
|
49
|
-
type='image_url', image_url=dict(url=chunk.embeddable_uri)
|
|
50
|
-
)
|
|
51
|
-
# TODO(daiyip): Support audio_input.
|
|
52
|
-
else:
|
|
53
|
-
raise ValueError(f'Unsupported content type: {chunk!r}.')
|
|
54
|
-
parts.append(item)
|
|
47
|
+
parts.append(self.chunk_to_json(type(message), chunk))
|
|
55
48
|
return dict(
|
|
56
49
|
role=self.get_role(message),
|
|
57
50
|
content=parts,
|
|
58
51
|
)
|
|
59
52
|
|
|
53
|
+
def chunk_to_json(
|
|
54
|
+
self,
|
|
55
|
+
message_cls: type[lf.Message],
|
|
56
|
+
chunk: str | lf.Modality
|
|
57
|
+
) -> dict[str, Any]:
|
|
58
|
+
"""Converts a Langfun chunk to OpenAI chunk."""
|
|
59
|
+
del message_cls
|
|
60
|
+
if isinstance(chunk, str):
|
|
61
|
+
return dict(type='text', text=chunk)
|
|
62
|
+
elif isinstance(chunk, lf_modalities.Image):
|
|
63
|
+
return dict(
|
|
64
|
+
type='image_url', image_url=dict(url=chunk.embeddable_uri)
|
|
65
|
+
)
|
|
66
|
+
# TODO(daiyip): Support audio_input.
|
|
67
|
+
else:
|
|
68
|
+
raise ValueError(f'Unsupported content type: {chunk!r}.')
|
|
69
|
+
|
|
60
70
|
def get_role(self, message: lf.Message) -> str:
|
|
61
71
|
"""Returns the role of the message."""
|
|
62
72
|
if isinstance(message, lf.SystemMessage):
|
|
@@ -92,40 +102,134 @@ class OpenAIMessageConverter(lf.MessageConverter):
|
|
|
92
102
|
assert isinstance(content, list)
|
|
93
103
|
chunks = []
|
|
94
104
|
for item in content:
|
|
95
|
-
|
|
96
|
-
if t == 'text':
|
|
97
|
-
chunk = self._safe_read(item, 'text')
|
|
98
|
-
elif t == 'image_url':
|
|
99
|
-
chunk = lf_modalities.Image.from_uri(
|
|
100
|
-
self._safe_read(self._safe_read(item, 'image_url'), 'url')
|
|
101
|
-
)
|
|
102
|
-
else:
|
|
103
|
-
raise ValueError(f'Unsupported content type: {item!r}.')
|
|
104
|
-
chunks.append(chunk)
|
|
105
|
+
chunks.append(self.json_to_chunk(item))
|
|
105
106
|
return message_cls.from_chunks(chunks)
|
|
106
107
|
|
|
108
|
+
def json_to_chunk(self, json: dict[str, Any]) -> str | lf.Modality:
|
|
109
|
+
"""Returns a Langfun chunk from OpenAI chunk JSON."""
|
|
110
|
+
t = self._safe_read(json, 'type')
|
|
111
|
+
if t == 'text':
|
|
112
|
+
return self._safe_read(json, 'text')
|
|
113
|
+
elif t == 'image_url':
|
|
114
|
+
return lf_modalities.Image.from_uri(
|
|
115
|
+
self._safe_read(self._safe_read(json, 'image_url'), 'url')
|
|
116
|
+
)
|
|
117
|
+
else:
|
|
118
|
+
raise ValueError(f'Unsupported content type: {json!r}.')
|
|
119
|
+
|
|
107
120
|
|
|
108
|
-
def
|
|
121
|
+
def _as_openai_chat_completion_api_format(
|
|
109
122
|
self,
|
|
110
123
|
chunk_preprocessor: Callable[[str | lf.Modality], Any] | None = None,
|
|
111
124
|
**kwargs
|
|
112
125
|
) -> dict[str, Any]:
|
|
113
126
|
"""Returns an OpenAI format message."""
|
|
114
|
-
return
|
|
127
|
+
return OpenAIChatCompletionAPIMessageConverter(
|
|
115
128
|
chunk_preprocessor=chunk_preprocessor, **kwargs
|
|
116
129
|
).to_value(self)
|
|
117
130
|
|
|
118
131
|
|
|
119
132
|
@classmethod
|
|
120
|
-
def
|
|
133
|
+
def _from_openai_chat_completion_api_format(
|
|
121
134
|
cls,
|
|
122
135
|
openai_message: dict[str, Any],
|
|
123
136
|
**kwargs
|
|
124
137
|
) -> lf.Message:
|
|
125
138
|
"""Creates a Langfun message from the OpenAI format message."""
|
|
126
139
|
del cls
|
|
127
|
-
return
|
|
140
|
+
return OpenAIChatCompletionAPIMessageConverter(
|
|
141
|
+
**kwargs
|
|
142
|
+
).from_value(openai_message)
|
|
128
143
|
|
|
129
144
|
# Set shortcut methods in lf.Message.
|
|
130
|
-
lf.Message.
|
|
131
|
-
|
|
145
|
+
lf.Message.as_openai_chat_completion_api_format = (
|
|
146
|
+
_as_openai_chat_completion_api_format
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
lf.Message.from_openai_chat_completion_api_format = (
|
|
150
|
+
_from_openai_chat_completion_api_format
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
#
|
|
155
|
+
# OpenAI Responses API message converter.
|
|
156
|
+
#
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
class OpenAIResponsesAPIMessageConverter(
|
|
160
|
+
OpenAIChatCompletionAPIMessageConverter
|
|
161
|
+
):
|
|
162
|
+
"""Converter to OpenAI Responses API.
|
|
163
|
+
|
|
164
|
+
See https://platform.openai.com/docs/api-reference/responses/create
|
|
165
|
+
"""
|
|
166
|
+
|
|
167
|
+
FORMAT_ID = 'openai_responses_api'
|
|
168
|
+
|
|
169
|
+
def to_value(self, message: lf.Message) -> dict[str, Any]:
|
|
170
|
+
"""Converts a Langfun message to OpenAI API."""
|
|
171
|
+
message_json = super().to_value(message)
|
|
172
|
+
message_json['type'] = 'message'
|
|
173
|
+
return message_json
|
|
174
|
+
|
|
175
|
+
def chunk_to_json(
|
|
176
|
+
self,
|
|
177
|
+
message_cls: type[lf.Message],
|
|
178
|
+
chunk: str | lf.Modality
|
|
179
|
+
) -> dict[str, Any]:
|
|
180
|
+
"""Converts a Langfun chunk to OpenAI chunk."""
|
|
181
|
+
source = 'output' if issubclass(message_cls, lf.AIMessage) else 'input'
|
|
182
|
+
|
|
183
|
+
if isinstance(chunk, str):
|
|
184
|
+
return dict(type=f'{source}_text', text=chunk)
|
|
185
|
+
elif isinstance(chunk, lf_modalities.Image):
|
|
186
|
+
return dict(
|
|
187
|
+
type=f'{source}_image', image_url=chunk.embeddable_uri
|
|
188
|
+
)
|
|
189
|
+
# TODO(daiyip): Support audio_input.
|
|
190
|
+
else:
|
|
191
|
+
raise ValueError(f'Unsupported content type: {chunk!r}.')
|
|
192
|
+
|
|
193
|
+
def json_to_chunk(self, json: dict[str, Any]) -> str | lf.Modality:
|
|
194
|
+
"""Returns a Langfun chunk from OpenAI chunk JSON."""
|
|
195
|
+
t = self._safe_read(json, 'type')
|
|
196
|
+
if t in ('input_text', 'output_text'):
|
|
197
|
+
return self._safe_read(json, 'text')
|
|
198
|
+
elif t in ('input_image', 'output_image'):
|
|
199
|
+
return lf_modalities.Image.from_uri(self._safe_read(json, 'image_url'))
|
|
200
|
+
else:
|
|
201
|
+
raise ValueError(f'Unsupported content type: {json!r}.')
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
def _as_openai_responses_api_format(
|
|
205
|
+
self,
|
|
206
|
+
chunk_preprocessor: Callable[[str | lf.Modality], Any] | None = None,
|
|
207
|
+
**kwargs
|
|
208
|
+
) -> dict[str, Any]:
|
|
209
|
+
"""Returns an OpenAI format message."""
|
|
210
|
+
return OpenAIResponsesAPIMessageConverter(
|
|
211
|
+
chunk_preprocessor=chunk_preprocessor, **kwargs
|
|
212
|
+
).to_value(self)
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
@classmethod
|
|
216
|
+
def _from_openai_responses_api_format(
|
|
217
|
+
cls,
|
|
218
|
+
openai_message: dict[str, Any],
|
|
219
|
+
**kwargs
|
|
220
|
+
) -> lf.Message:
|
|
221
|
+
"""Creates a Langfun message from the OpenAI format message."""
|
|
222
|
+
del cls
|
|
223
|
+
return OpenAIResponsesAPIMessageConverter(
|
|
224
|
+
**kwargs
|
|
225
|
+
).from_value(openai_message)
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
# Set shortcut methods in lf.Message.
|
|
229
|
+
lf.Message.as_openai_responses_api_format = (
|
|
230
|
+
_as_openai_responses_api_format
|
|
231
|
+
)
|
|
232
|
+
|
|
233
|
+
lf.Message.from_openai_responses_api_format = (
|
|
234
|
+
_from_openai_responses_api_format
|
|
235
|
+
)
|
|
@@ -30,25 +30,25 @@ image_content = (
|
|
|
30
30
|
)
|
|
31
31
|
|
|
32
32
|
|
|
33
|
-
class
|
|
33
|
+
class OpenAIChatCompletionAPIConverterTest(unittest.TestCase):
|
|
34
34
|
|
|
35
35
|
def test_as_format_with_role(self):
|
|
36
36
|
self.assertEqual(
|
|
37
|
-
lf.UserMessage('hi').as_format('
|
|
37
|
+
lf.UserMessage('hi').as_format('openai_chat_completion_api'),
|
|
38
38
|
{
|
|
39
39
|
'role': 'user',
|
|
40
40
|
'content': [{'type': 'text', 'text': 'hi'}],
|
|
41
41
|
},
|
|
42
42
|
)
|
|
43
43
|
self.assertEqual(
|
|
44
|
-
lf.AIMessage('hi').as_format('
|
|
44
|
+
lf.AIMessage('hi').as_format('openai_chat_completion_api'),
|
|
45
45
|
{
|
|
46
46
|
'role': 'assistant',
|
|
47
47
|
'content': [{'type': 'text', 'text': 'hi'}],
|
|
48
48
|
},
|
|
49
49
|
)
|
|
50
50
|
self.assertEqual(
|
|
51
|
-
lf.SystemMessage('hi').as_format('
|
|
51
|
+
lf.SystemMessage('hi').as_format('openai_chat_completion_api'),
|
|
52
52
|
{
|
|
53
53
|
'role': 'system',
|
|
54
54
|
'content': [{'type': 'text', 'text': 'hi'}],
|
|
@@ -60,7 +60,7 @@ class OpenAIConversionTest(unittest.TestCase):
|
|
|
60
60
|
lf.Template(
|
|
61
61
|
'What is this {{image}}?',
|
|
62
62
|
image=lf_modalities.Image.from_bytes(image_content)
|
|
63
|
-
).render().as_format('
|
|
63
|
+
).render().as_format('openai_chat_completion_api'),
|
|
64
64
|
{
|
|
65
65
|
'role': 'user',
|
|
66
66
|
'content': [
|
|
@@ -90,7 +90,7 @@ class OpenAIConversionTest(unittest.TestCase):
|
|
|
90
90
|
lf.Template(
|
|
91
91
|
'What is this {{image}}?',
|
|
92
92
|
image=lf_modalities.Image.from_bytes(image_content)
|
|
93
|
-
).render().
|
|
93
|
+
).render().as_openai_chat_completion_api_format(
|
|
94
94
|
chunk_preprocessor=lambda x: x if isinstance(x, str) else None
|
|
95
95
|
),
|
|
96
96
|
{
|
|
@@ -114,7 +114,7 @@ class OpenAIConversionTest(unittest.TestCase):
|
|
|
114
114
|
{
|
|
115
115
|
'content': 'this is a text',
|
|
116
116
|
},
|
|
117
|
-
format='
|
|
117
|
+
format='openai_chat_completion_api',
|
|
118
118
|
),
|
|
119
119
|
lf.AIMessage('this is a text'),
|
|
120
120
|
)
|
|
@@ -126,7 +126,7 @@ class OpenAIConversionTest(unittest.TestCase):
|
|
|
126
126
|
'role': 'user',
|
|
127
127
|
'content': [{'type': 'text', 'text': 'hi'}],
|
|
128
128
|
},
|
|
129
|
-
format='
|
|
129
|
+
format='openai_chat_completion_api',
|
|
130
130
|
),
|
|
131
131
|
lf.UserMessage('hi'),
|
|
132
132
|
)
|
|
@@ -136,7 +136,7 @@ class OpenAIConversionTest(unittest.TestCase):
|
|
|
136
136
|
'role': 'assistant',
|
|
137
137
|
'content': [{'type': 'text', 'text': 'hi'}],
|
|
138
138
|
},
|
|
139
|
-
format='
|
|
139
|
+
format='openai_chat_completion_api',
|
|
140
140
|
),
|
|
141
141
|
lf.AIMessage('hi'),
|
|
142
142
|
)
|
|
@@ -146,7 +146,7 @@ class OpenAIConversionTest(unittest.TestCase):
|
|
|
146
146
|
'role': 'system',
|
|
147
147
|
'content': [{'type': 'text', 'text': 'hi'}],
|
|
148
148
|
},
|
|
149
|
-
format='
|
|
149
|
+
format='openai_chat_completion_api',
|
|
150
150
|
),
|
|
151
151
|
lf.SystemMessage('hi'),
|
|
152
152
|
)
|
|
@@ -156,21 +156,165 @@ class OpenAIConversionTest(unittest.TestCase):
|
|
|
156
156
|
'role': 'function',
|
|
157
157
|
'content': [{'type': 'text', 'text': 'hi'}],
|
|
158
158
|
},
|
|
159
|
-
format='
|
|
159
|
+
format='openai_chat_completion_api',
|
|
160
160
|
)
|
|
161
161
|
|
|
162
162
|
def test_from_value_with_image(self):
|
|
163
|
-
|
|
163
|
+
image = lf_modalities.Image.from_bytes(image_content)
|
|
164
|
+
m = lf.Message.from_openai_chat_completion_api_format(
|
|
165
|
+
lf.Template(
|
|
166
|
+
'What is this {{image}}?',
|
|
167
|
+
image=image
|
|
168
|
+
).render().as_format('openai_chat_completion_api'),
|
|
169
|
+
)
|
|
170
|
+
self.assertEqual(m.text, f'What is this <<[[{image.id}]]>> ?')
|
|
171
|
+
self.assertIsInstance(m.images[0], lf_modalities.Image)
|
|
172
|
+
self.assertEqual(m.images[0].mime_type, 'image/png')
|
|
173
|
+
self.assertEqual(m.images[0].to_bytes(), image_content)
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
class OpenAIResponsesAPIMessageConverterTest(unittest.TestCase):
|
|
177
|
+
|
|
178
|
+
def test_as_format_with_role(self):
|
|
179
|
+
self.assertEqual(
|
|
180
|
+
lf.UserMessage('hi').as_format('openai_responses_api'),
|
|
181
|
+
{
|
|
182
|
+
'type': 'message',
|
|
183
|
+
'role': 'user',
|
|
184
|
+
'content': [{'type': 'input_text', 'text': 'hi'}],
|
|
185
|
+
},
|
|
186
|
+
)
|
|
187
|
+
self.assertEqual(
|
|
188
|
+
lf.AIMessage('hi').as_format('openai_responses_api'),
|
|
189
|
+
{
|
|
190
|
+
'type': 'message',
|
|
191
|
+
'role': 'assistant',
|
|
192
|
+
'content': [{'type': 'output_text', 'text': 'hi'}],
|
|
193
|
+
},
|
|
194
|
+
)
|
|
195
|
+
self.assertEqual(
|
|
196
|
+
lf.SystemMessage('hi').as_format('openai_responses_api'),
|
|
197
|
+
{
|
|
198
|
+
'type': 'message',
|
|
199
|
+
'role': 'system',
|
|
200
|
+
'content': [{'type': 'input_text', 'text': 'hi'}],
|
|
201
|
+
},
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
def test_as_format_with_image(self):
|
|
205
|
+
self.assertEqual(
|
|
164
206
|
lf.Template(
|
|
165
207
|
'What is this {{image}}?',
|
|
166
208
|
image=lf_modalities.Image.from_bytes(image_content)
|
|
167
|
-
).render().as_format('
|
|
209
|
+
).render().as_format('openai_responses_api'),
|
|
210
|
+
{
|
|
211
|
+
'type': 'message',
|
|
212
|
+
'role': 'user',
|
|
213
|
+
'content': [
|
|
214
|
+
{
|
|
215
|
+
'type': 'input_text',
|
|
216
|
+
'text': 'What is this'
|
|
217
|
+
},
|
|
218
|
+
{
|
|
219
|
+
'type': 'input_image',
|
|
220
|
+
'image_url': (
|
|
221
|
+
'data:image/png;base64,'
|
|
222
|
+
+ base64.b64encode(image_content).decode('utf-8')
|
|
223
|
+
)
|
|
224
|
+
},
|
|
225
|
+
{
|
|
226
|
+
'type': 'input_text',
|
|
227
|
+
'text': '?'
|
|
228
|
+
}
|
|
229
|
+
],
|
|
230
|
+
},
|
|
168
231
|
)
|
|
169
|
-
self.assertEqual(m.text, 'What is this <<[[obj0]]>> ?')
|
|
170
|
-
self.assertIsInstance(m.obj0, lf_modalities.Image)
|
|
171
|
-
self.assertEqual(m.obj0.mime_type, 'image/png')
|
|
172
|
-
self.assertEqual(m.obj0.to_bytes(), image_content)
|
|
173
232
|
|
|
233
|
+
def test_as_format_with_chunk_preprocessor(self):
|
|
234
|
+
self.assertEqual(
|
|
235
|
+
lf.Template(
|
|
236
|
+
'What is this {{image}}?',
|
|
237
|
+
image=lf_modalities.Image.from_bytes(image_content)
|
|
238
|
+
).render().as_openai_responses_api_format(
|
|
239
|
+
chunk_preprocessor=lambda x: x if isinstance(x, str) else None
|
|
240
|
+
),
|
|
241
|
+
{
|
|
242
|
+
'type': 'message',
|
|
243
|
+
'role': 'user',
|
|
244
|
+
'content': [
|
|
245
|
+
{
|
|
246
|
+
'type': 'input_text',
|
|
247
|
+
'text': 'What is this'
|
|
248
|
+
},
|
|
249
|
+
{
|
|
250
|
+
'type': 'input_text',
|
|
251
|
+
'text': '?'
|
|
252
|
+
}
|
|
253
|
+
],
|
|
254
|
+
},
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
def test_from_value_with_simple_text(self):
|
|
258
|
+
self.assertEqual(
|
|
259
|
+
lf.Message.from_value(
|
|
260
|
+
{
|
|
261
|
+
'content': 'this is a text',
|
|
262
|
+
},
|
|
263
|
+
format='openai_responses_api',
|
|
264
|
+
),
|
|
265
|
+
lf.AIMessage('this is a text'),
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
def test_from_value_with_role(self):
|
|
269
|
+
self.assertEqual(
|
|
270
|
+
lf.Message.from_value(
|
|
271
|
+
{
|
|
272
|
+
'role': 'user',
|
|
273
|
+
'content': [{'type': 'input_text', 'text': 'hi'}],
|
|
274
|
+
},
|
|
275
|
+
format='openai_responses_api',
|
|
276
|
+
),
|
|
277
|
+
lf.UserMessage('hi'),
|
|
278
|
+
)
|
|
279
|
+
self.assertEqual(
|
|
280
|
+
lf.Message.from_value(
|
|
281
|
+
{
|
|
282
|
+
'role': 'assistant',
|
|
283
|
+
'content': [{'type': 'output_text', 'text': 'hi'}],
|
|
284
|
+
},
|
|
285
|
+
format='openai_responses_api',
|
|
286
|
+
),
|
|
287
|
+
lf.AIMessage('hi'),
|
|
288
|
+
)
|
|
289
|
+
self.assertEqual(
|
|
290
|
+
lf.Message.from_value(
|
|
291
|
+
{
|
|
292
|
+
'role': 'system',
|
|
293
|
+
'content': [{'type': 'input_text', 'text': 'hi'}],
|
|
294
|
+
},
|
|
295
|
+
format='openai_responses_api',
|
|
296
|
+
),
|
|
297
|
+
lf.SystemMessage('hi'),
|
|
298
|
+
)
|
|
299
|
+
with self.assertRaisesRegex(ValueError, 'Unsupported role: .*'):
|
|
300
|
+
lf.Message.from_value(
|
|
301
|
+
{
|
|
302
|
+
'role': 'function',
|
|
303
|
+
'content': [{'type': 'input_text', 'text': 'hi'}],
|
|
304
|
+
},
|
|
305
|
+
format='openai_responses_api',
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
def test_from_value_with_image(self):
|
|
309
|
+
image = lf_modalities.Image.from_bytes(image_content)
|
|
310
|
+
m = lf.Message.from_openai_responses_api_format(
|
|
311
|
+
lf.Template(
|
|
312
|
+
'What is this {{image}}?', image=image
|
|
313
|
+
).render().as_format('openai_responses_api'),
|
|
314
|
+
)
|
|
315
|
+
self.assertEqual(m.text, f'What is this <<[[{image.id}]]>> ?')
|
|
316
|
+
self.assertIsInstance(m.modalities()[0], lf_modalities.Image)
|
|
317
|
+
self.assertEqual(m.modalities()[0].content, image_content)
|
|
174
318
|
|
|
175
319
|
if __name__ == '__main__':
|
|
176
320
|
unittest.main()
|
|
@@ -18,6 +18,7 @@ import sys
|
|
|
18
18
|
import tempfile
|
|
19
19
|
import unittest
|
|
20
20
|
|
|
21
|
+
from langfun.core import concurrent as lf_concurrent
|
|
21
22
|
from langfun.core import console as lf_console
|
|
22
23
|
from langfun.core.eval.v2 import eval_test_helper
|
|
23
24
|
from langfun.core.eval.v2 import progress_tracking # pylint: disable=unused-import
|
|
@@ -51,6 +52,7 @@ class TqdmProgressTrackerTest(unittest.TestCase):
|
|
|
51
52
|
with contextlib.redirect_stderr(string_io):
|
|
52
53
|
_ = experiment.run(root_dir, 'new', plugins=[])
|
|
53
54
|
sys.stderr.flush()
|
|
55
|
+
lf_concurrent.ProgressBar.refresh()
|
|
54
56
|
self.assertIn('All: 100%', string_io.getvalue())
|
|
55
57
|
|
|
56
58
|
def test_with_example_ids(self):
|
|
@@ -62,6 +64,7 @@ class TqdmProgressTrackerTest(unittest.TestCase):
|
|
|
62
64
|
with contextlib.redirect_stderr(string_io):
|
|
63
65
|
_ = experiment.run(root_dir, 'new', example_ids=[1], plugins=[])
|
|
64
66
|
sys.stderr.flush()
|
|
67
|
+
lf_concurrent.ProgressBar.refresh()
|
|
65
68
|
self.assertIn('All: 100%', string_io.getvalue())
|
|
66
69
|
|
|
67
70
|
|
langfun/core/langfunc_test.py
CHANGED
|
@@ -82,7 +82,7 @@ class LangFuncCallTest(unittest.TestCase):
|
|
|
82
82
|
|
|
83
83
|
i = l.render()
|
|
84
84
|
self.assertEqual(i, 'Hello')
|
|
85
|
-
self.assertEqual(i, message.UserMessage('Hello'))
|
|
85
|
+
self.assertEqual(i, message.UserMessage('Hello', __template_input__={}))
|
|
86
86
|
self.assertEqual(i.tags, ['rendered'])
|
|
87
87
|
|
|
88
88
|
r = l()
|
|
@@ -96,7 +96,9 @@ class LangFuncCallTest(unittest.TestCase):
|
|
|
96
96
|
self.assertEqual(r.tags, ['lm-response', 'lm-output'])
|
|
97
97
|
self.assertEqual(
|
|
98
98
|
r.source,
|
|
99
|
-
message.UserMessage(
|
|
99
|
+
message.UserMessage(
|
|
100
|
+
'Hello', metadata=dict(cache_seed=0, __template_input__={})
|
|
101
|
+
)
|
|
100
102
|
)
|
|
101
103
|
self.assertEqual(r.source.tags, ['rendered', 'lm-input'])
|
|
102
104
|
|
langfun/core/language_model.py
CHANGED
|
@@ -1253,11 +1253,11 @@ class LanguageModel(component.Component):
|
|
|
1253
1253
|
title=f'\n[{call_counter}] PROMPT SENT TO LM{title_suffix}:',
|
|
1254
1254
|
color='green',
|
|
1255
1255
|
)
|
|
1256
|
-
|
|
1257
|
-
if referred_modalities:
|
|
1256
|
+
if prompt.referred_modalities:
|
|
1258
1257
|
console.write(
|
|
1259
1258
|
pg.object_utils.kvlist_str(
|
|
1260
|
-
[(k, repr(v), None)
|
|
1259
|
+
[(k, repr(v), None)
|
|
1260
|
+
for k, v in prompt.referred_modalities.items()]
|
|
1261
1261
|
),
|
|
1262
1262
|
title=f'\n[{call_counter}] MODALITY OBJECTS SENT TO LM:',
|
|
1263
1263
|
color='green',
|
|
@@ -1343,9 +1343,9 @@ class LanguageModel(component.Component):
|
|
|
1343
1343
|
color='green',
|
|
1344
1344
|
)
|
|
1345
1345
|
if isinstance(prompt, list):
|
|
1346
|
-
referred_modalities_lst = [p.referred_modalities
|
|
1346
|
+
referred_modalities_lst = [p.referred_modalities for p in prompt]
|
|
1347
1347
|
else:
|
|
1348
|
-
referred_modalities_lst = [prompt.referred_modalities
|
|
1348
|
+
referred_modalities_lst = [prompt.referred_modalities,]
|
|
1349
1349
|
if referred_modalities_lst:
|
|
1350
1350
|
for referred_modalities in referred_modalities_lst:
|
|
1351
1351
|
console.write(
|
|
@@ -1420,7 +1420,7 @@ class LanguageModel(component.Component):
|
|
|
1420
1420
|
title=f'\n[{call_counter}] PROMPT TO TOKENIZE:',
|
|
1421
1421
|
color='green',
|
|
1422
1422
|
)
|
|
1423
|
-
referred_modalities_lst = [prompt.referred_modalities
|
|
1423
|
+
referred_modalities_lst = [prompt.referred_modalities,]
|
|
1424
1424
|
if referred_modalities_lst:
|
|
1425
1425
|
for referred_modalities in referred_modalities_lst:
|
|
1426
1426
|
console.write(
|
|
@@ -656,11 +656,17 @@ class LanguageModelTest(unittest.TestCase):
|
|
|
656
656
|
|
|
657
657
|
string_io = io.StringIO()
|
|
658
658
|
lm = MockModel(sampling_options=lm_lib.LMSamplingOptions(top_k=1))
|
|
659
|
+
image = Image()
|
|
659
660
|
with contextlib.redirect_stdout(string_io):
|
|
660
661
|
self.assertEqual(
|
|
661
|
-
lm(
|
|
662
|
-
|
|
663
|
-
|
|
662
|
+
lm(
|
|
663
|
+
message_lib.UserMessage(
|
|
664
|
+
f'hi <<[[{image.id}]]>>',
|
|
665
|
+
referred_modalities=[image],
|
|
666
|
+
),
|
|
667
|
+
debug=True
|
|
668
|
+
),
|
|
669
|
+
f'hi <<[[{image.id}]]>>'
|
|
664
670
|
)
|
|
665
671
|
|
|
666
672
|
debug_info = string_io.getvalue()
|
langfun/core/llms/__init__.py
CHANGED
|
@@ -30,7 +30,8 @@ from langfun.core.llms.compositional import RandomChoice
|
|
|
30
30
|
|
|
31
31
|
# Base models by request/response protocol.
|
|
32
32
|
from langfun.core.llms.rest import REST
|
|
33
|
-
from langfun.core.llms.openai_compatible import
|
|
33
|
+
from langfun.core.llms.openai_compatible import OpenAIChatCompletionAPI
|
|
34
|
+
from langfun.core.llms.openai_compatible import OpenAIResponsesAPI
|
|
34
35
|
from langfun.core.llms.gemini import Gemini
|
|
35
36
|
from langfun.core.llms.anthropic import Anthropic
|
|
36
37
|
|
langfun/core/llms/cache/base.py
CHANGED
|
@@ -121,4 +121,6 @@ class LMCacheBase(lf.LMCache):
|
|
|
121
121
|
|
|
122
122
|
def default_key(lm: lf.LanguageModel, prompt: lf.Message, seed: int) -> Any:
|
|
123
123
|
"""Default key for LM cache."""
|
|
124
|
-
|
|
124
|
+
# prompt text already contains the modality id for referenced modality
|
|
125
|
+
# objects, so no need to include them in the key.
|
|
126
|
+
return (prompt.text, lm.sampling_options.cache_key(), seed)
|