langfun 0.0.2.dev20240507__py3-none-any.whl → 0.0.2.dev20240513__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langfun might be problematic. Click here for more details.
- langfun/__init__.py +5 -0
- langfun/core/eval/base.py +4 -2
- langfun/core/langfunc.py +1 -17
- langfun/core/langfunc_test.py +4 -0
- langfun/core/llms/__init__.py +8 -0
- langfun/core/llms/fake.py +6 -6
- langfun/core/llms/google_genai.py +8 -0
- langfun/core/llms/openai.py +3 -2
- langfun/core/llms/vertexai.py +291 -0
- langfun/core/llms/vertexai_test.py +233 -0
- langfun/core/modalities/image.py +1 -3
- langfun/core/modalities/mime.py +6 -0
- langfun/core/modalities/video.py +1 -3
- langfun/core/structured/__init__.py +2 -0
- langfun/core/structured/prompting.py +31 -7
- langfun/core/structured/prompting_test.py +43 -0
- langfun/core/template.py +22 -1
- {langfun-0.0.2.dev20240507.dist-info → langfun-0.0.2.dev20240513.dist-info}/METADATA +2 -1
- {langfun-0.0.2.dev20240507.dist-info → langfun-0.0.2.dev20240513.dist-info}/RECORD +22 -20
- {langfun-0.0.2.dev20240507.dist-info → langfun-0.0.2.dev20240513.dist-info}/LICENSE +0 -0
- {langfun-0.0.2.dev20240507.dist-info → langfun-0.0.2.dev20240513.dist-info}/WHEEL +0 -0
- {langfun-0.0.2.dev20240507.dist-info → langfun-0.0.2.dev20240513.dist-info}/top_level.txt +0 -0
langfun/__init__.py
CHANGED
@@ -33,6 +33,11 @@ complete = structured.complete
|
|
33
33
|
score = structured.score
|
34
34
|
generate_class = structured.generate_class
|
35
35
|
|
36
|
+
# Helper functions for input/output transformations based on
|
37
|
+
# `lf.query` (e.g. jax-on-beam could use these for batch processing)
|
38
|
+
query_prompt = structured.query_prompt
|
39
|
+
query_output = structured.query_output
|
40
|
+
|
36
41
|
source_form = structured.source_form
|
37
42
|
function_gen = structured.function_gen
|
38
43
|
|
langfun/core/eval/base.py
CHANGED
@@ -1179,7 +1179,7 @@ class Evaluation(Evaluable):
|
|
1179
1179
|
|
1180
1180
|
def process(self, example: Any, **kwargs) -> lf.Message:
|
1181
1181
|
"""Process an example and returns its output."""
|
1182
|
-
prompt = self.prompt
|
1182
|
+
prompt = lf.Template.from_value(self.prompt, example=example)
|
1183
1183
|
if self.method == 'call':
|
1184
1184
|
return lf_structured.call(
|
1185
1185
|
prompt,
|
@@ -1207,7 +1207,9 @@ class Evaluation(Evaluable):
|
|
1207
1207
|
else:
|
1208
1208
|
assert self.method == 'complete', self.method
|
1209
1209
|
assert isinstance(self.schema.spec, pg.typing.Object), self.schema
|
1210
|
-
|
1210
|
+
# TODO(daiyip): Currently multi-modal inputs within the prompt for
|
1211
|
+
# completion is not supported.
|
1212
|
+
input_value = self.schema.spec.cls.partial(prompt.render().text)
|
1211
1213
|
return lf_structured.complete(
|
1212
1214
|
input_value,
|
1213
1215
|
lm=self.lm,
|
langfun/core/langfunc.py
CHANGED
@@ -14,7 +14,7 @@
|
|
14
14
|
"""LangFunc: Language-based functions."""
|
15
15
|
|
16
16
|
import dataclasses
|
17
|
-
from typing import Annotated, Type
|
17
|
+
from typing import Annotated, Type
|
18
18
|
|
19
19
|
from langfun.core import component
|
20
20
|
from langfun.core import language_model
|
@@ -328,22 +328,6 @@ class LangFunc(
|
|
328
328
|
"""Transforms the output message before returning from __call__."""
|
329
329
|
return lm_output
|
330
330
|
|
331
|
-
@classmethod
|
332
|
-
def from_value(
|
333
|
-
cls, value: Union[str, template_lib.Template], **kwargs
|
334
|
-
) -> 'LangFunc':
|
335
|
-
"""Create a LangFunc object from a string or template."""
|
336
|
-
if isinstance(value, LangFunc):
|
337
|
-
return value
|
338
|
-
if isinstance(value, template_lib.Template):
|
339
|
-
lfun = LangFunc(value.template_str, **kwargs)
|
340
|
-
# So lfun could acccess all attributes from value.
|
341
|
-
lfun.sym_setparent(value)
|
342
|
-
return lfun
|
343
|
-
if isinstance(value, str):
|
344
|
-
return LangFunc(template_str=value, **kwargs)
|
345
|
-
return LangFunc('{{input}}', input=value, **kwargs)
|
346
|
-
|
347
331
|
|
348
332
|
# Register converter from str to LangFunc, therefore we can always
|
349
333
|
# pass strs to attributes that accept LangFunc.
|
langfun/core/langfunc_test.py
CHANGED
@@ -57,6 +57,10 @@ class BasicTest(unittest.TestCase):
|
|
57
57
|
l2 = LangFunc.from_value(l1)
|
58
58
|
self.assertIs(l2, l1)
|
59
59
|
|
60
|
+
l3 = LangFunc.from_value(l1, x=1)
|
61
|
+
self.assertIsNot(l3, l1)
|
62
|
+
self.assertTrue(pg.eq(l3, LangFunc('Hello', x=1)))
|
63
|
+
|
60
64
|
c = template_lib.Template(
|
61
65
|
'{{x}} + {{l}}',
|
62
66
|
x=1,
|
langfun/core/llms/__init__.py
CHANGED
@@ -27,6 +27,7 @@ from langfun.core.llms.fake import StaticSequence
|
|
27
27
|
# Gemini models.
|
28
28
|
from langfun.core.llms.google_genai import GenAI
|
29
29
|
from langfun.core.llms.google_genai import GeminiPro
|
30
|
+
from langfun.core.llms.google_genai import GeminiPro1_5
|
30
31
|
from langfun.core.llms.google_genai import GeminiProVision
|
31
32
|
from langfun.core.llms.google_genai import Palm2
|
32
33
|
from langfun.core.llms.google_genai import Palm2_IT
|
@@ -73,6 +74,13 @@ from langfun.core.llms.groq import GroqLlama2_70B
|
|
73
74
|
from langfun.core.llms.groq import GroqMistral_8x7B
|
74
75
|
from langfun.core.llms.groq import GroqGemma7B_IT
|
75
76
|
|
77
|
+
from langfun.core.llms.vertexai import VertexAI
|
78
|
+
from langfun.core.llms.vertexai import VertexAIGeminiPro1_5
|
79
|
+
from langfun.core.llms.vertexai import VertexAIGeminiPro1
|
80
|
+
from langfun.core.llms.vertexai import VertexAIGeminiPro1Vision
|
81
|
+
from langfun.core.llms.vertexai import VertexAIPalm2
|
82
|
+
from langfun.core.llms.vertexai import VertexAIPalm2_32K
|
83
|
+
|
76
84
|
|
77
85
|
# LLaMA C++ models.
|
78
86
|
from langfun.core.llms.llama_cpp import LlamaCppRemote
|
langfun/core/llms/fake.py
CHANGED
@@ -57,12 +57,12 @@ class StaticResponse(Fake):
|
|
57
57
|
"""Language model that always gives the same canned response."""
|
58
58
|
|
59
59
|
response: Annotated[
|
60
|
-
str,
|
60
|
+
str | lf.Message,
|
61
61
|
'A canned response that will be returned regardless of the prompt.'
|
62
62
|
]
|
63
63
|
|
64
64
|
def _response_from(self, prompt: lf.Message) -> lf.Message:
|
65
|
-
return lf.AIMessage(self.response)
|
65
|
+
return lf.AIMessage.from_value(self.response)
|
66
66
|
|
67
67
|
|
68
68
|
@lf.use_init_args(['mapping'])
|
@@ -70,12 +70,12 @@ class StaticMapping(Fake):
|
|
70
70
|
"""A static mapping from prompt to response."""
|
71
71
|
|
72
72
|
mapping: Annotated[
|
73
|
-
dict[str, str],
|
73
|
+
dict[str, str | lf.Message],
|
74
74
|
'A mapping from prompt to response.'
|
75
75
|
]
|
76
76
|
|
77
77
|
def _response_from(self, prompt: lf.Message) -> lf.Message:
|
78
|
-
return lf.AIMessage(self.mapping[prompt])
|
78
|
+
return lf.AIMessage.from_value(self.mapping[prompt])
|
79
79
|
|
80
80
|
|
81
81
|
@lf.use_init_args(['sequence'])
|
@@ -83,7 +83,7 @@ class StaticSequence(Fake):
|
|
83
83
|
"""A static sequence of responses to use."""
|
84
84
|
|
85
85
|
sequence: Annotated[
|
86
|
-
list[str],
|
86
|
+
list[str | lf.Message],
|
87
87
|
'A sequence of strings as the response.'
|
88
88
|
]
|
89
89
|
|
@@ -92,6 +92,6 @@ class StaticSequence(Fake):
|
|
92
92
|
self._pos = 0
|
93
93
|
|
94
94
|
def _response_from(self, prompt: lf.Message) -> lf.Message:
|
95
|
-
r = lf.AIMessage(self.sequence[self._pos])
|
95
|
+
r = lf.AIMessage.from_value(self.sequence[self._pos])
|
96
96
|
self._pos += 1
|
97
97
|
return r
|
@@ -34,6 +34,7 @@ class GenAI(lf.LanguageModel):
|
|
34
34
|
'gemini-pro-vision',
|
35
35
|
'text-bison-001',
|
36
36
|
'chat-bison-001',
|
37
|
+
'gemini-1.5-pro-latest',
|
37
38
|
],
|
38
39
|
'Model name.',
|
39
40
|
]
|
@@ -262,6 +263,13 @@ _GOOGLE_GENAI_MODEL_HUB = _ModelHub()
|
|
262
263
|
#
|
263
264
|
|
264
265
|
|
266
|
+
class GeminiPro1_5(GenAI): # pylint: disable=invalid-name
|
267
|
+
"""Gemini Pro latest model."""
|
268
|
+
|
269
|
+
model = 'gemini-1.5-pro-latest'
|
270
|
+
multimodal = True
|
271
|
+
|
272
|
+
|
265
273
|
class GeminiPro(GenAI):
|
266
274
|
"""Gemini Pro model."""
|
267
275
|
|
langfun/core/llms/openai.py
CHANGED
@@ -233,8 +233,9 @@ class OpenAI(lf.LanguageModel):
|
|
233
233
|
for chunk in prompt.chunk():
|
234
234
|
if isinstance(chunk, str):
|
235
235
|
item = dict(type='text', text=chunk)
|
236
|
-
elif isinstance(chunk, lf_modalities.Image)
|
237
|
-
|
236
|
+
elif isinstance(chunk, lf_modalities.Image):
|
237
|
+
uri = chunk.uri or chunk.content_uri
|
238
|
+
item = dict(type='image_url', image_url=dict(url=uri))
|
238
239
|
else:
|
239
240
|
raise ValueError(f'Unsupported modality object: {chunk!r}.')
|
240
241
|
content.append(item)
|
@@ -0,0 +1,291 @@
|
|
1
|
+
# Copyright 2023 The Langfun Authors
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
"""Vertex AI generative models."""
|
15
|
+
|
16
|
+
import functools
|
17
|
+
import os
|
18
|
+
from typing import Annotated, Any
|
19
|
+
|
20
|
+
from google.auth import credentials as credentials_lib
|
21
|
+
import langfun.core as lf
|
22
|
+
from langfun.core import modalities as lf_modalities
|
23
|
+
import pyglove as pg
|
24
|
+
|
25
|
+
|
26
|
+
SUPPORTED_MODELS_AND_SETTINGS = {
|
27
|
+
'gemini-1.5-pro-preview-0409': pg.Dict(api='gemini', rpm=5),
|
28
|
+
'gemini-1.0-pro': pg.Dict(api='gemini', rpm=300),
|
29
|
+
'gemini-1.0-pro-vision': pg.Dict(api='gemini', rpm=100),
|
30
|
+
# PaLM APIs.
|
31
|
+
'text-bison': pg.Dict(api='palm', rpm=1600),
|
32
|
+
'text-bison-32k': pg.Dict(api='palm', rpm=300),
|
33
|
+
'text-unicorn': pg.Dict(api='palm', rpm=100),
|
34
|
+
}
|
35
|
+
|
36
|
+
|
37
|
+
@lf.use_init_args(['model'])
|
38
|
+
class VertexAI(lf.LanguageModel):
|
39
|
+
"""Language model served on VertexAI."""
|
40
|
+
|
41
|
+
model: pg.typing.Annotated[
|
42
|
+
pg.typing.Enum(
|
43
|
+
pg.MISSING_VALUE, list(SUPPORTED_MODELS_AND_SETTINGS.keys())
|
44
|
+
),
|
45
|
+
(
|
46
|
+
'Vertex AI model name. See '
|
47
|
+
'https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models '
|
48
|
+
'for details.'
|
49
|
+
),
|
50
|
+
]
|
51
|
+
|
52
|
+
project: Annotated[
|
53
|
+
str | None,
|
54
|
+
(
|
55
|
+
'Vertex AI project ID. Or set from environment variable '
|
56
|
+
'VERTEXAI_PROJECT.'
|
57
|
+
),
|
58
|
+
] = None
|
59
|
+
|
60
|
+
location: Annotated[
|
61
|
+
str | None,
|
62
|
+
(
|
63
|
+
'Vertex AI service location. Or set from environment variable '
|
64
|
+
'VERTEXAI_LOCATION.'
|
65
|
+
),
|
66
|
+
] = None
|
67
|
+
|
68
|
+
credentials: Annotated[
|
69
|
+
credentials_lib.Credentials | None,
|
70
|
+
(
|
71
|
+
'Credentials to use. If None, the default credentials to the '
|
72
|
+
'environment will be used.'
|
73
|
+
),
|
74
|
+
] = None
|
75
|
+
|
76
|
+
multimodal: Annotated[bool, 'Whether this model has multimodal support.'] = (
|
77
|
+
False
|
78
|
+
)
|
79
|
+
|
80
|
+
def _on_bound(self):
|
81
|
+
super()._on_bound()
|
82
|
+
self.__dict__.pop('_api_initialized', None)
|
83
|
+
|
84
|
+
@functools.cached_property
|
85
|
+
def _api_initialized(self):
|
86
|
+
project = self.project or os.environ.get('VERTEXAI_PROJECT', None)
|
87
|
+
if not project:
|
88
|
+
raise ValueError(
|
89
|
+
'Please specify `project` during `__init__` or set environment '
|
90
|
+
'variable `VERTEXAI_PROJECT` with your Vertex AI project ID.'
|
91
|
+
)
|
92
|
+
|
93
|
+
location = self.location or os.environ.get('VERTEXAI_LOCATION', None)
|
94
|
+
if not location:
|
95
|
+
raise ValueError(
|
96
|
+
'Please specify `location` during `__init__` or set environment '
|
97
|
+
'variable `VERTEXAI_LOCATION` with your Vertex AI service location.'
|
98
|
+
)
|
99
|
+
|
100
|
+
credentials = self.credentials
|
101
|
+
# Placeholder for Google-internal credentials.
|
102
|
+
from google.cloud.aiplatform import vertexai # pylint: disable=g-import-not-at-top
|
103
|
+
vertexai.init(project=project, location=location, credentials=credentials)
|
104
|
+
return True
|
105
|
+
|
106
|
+
@property
|
107
|
+
def model_id(self) -> str:
|
108
|
+
"""Returns a string to identify the model."""
|
109
|
+
return f'VertexAI({self.model})'
|
110
|
+
|
111
|
+
@property
|
112
|
+
def resource_id(self) -> str:
|
113
|
+
"""Returns a string to identify the resource for rate control."""
|
114
|
+
return self.model_id
|
115
|
+
|
116
|
+
@property
|
117
|
+
def max_concurrency(self) -> int:
|
118
|
+
"""Returns the maximum number of concurrent requests."""
|
119
|
+
return self.rate_to_max_concurrency(
|
120
|
+
requests_per_min=SUPPORTED_MODELS_AND_SETTINGS[self.model].rpm,
|
121
|
+
tokens_per_min=0,
|
122
|
+
)
|
123
|
+
|
124
|
+
def _generation_config(
|
125
|
+
self, options: lf.LMSamplingOptions
|
126
|
+
) -> Any: # generative_models.GenerationConfig
|
127
|
+
"""Creates generation config from langfun sampling options."""
|
128
|
+
from google.cloud.aiplatform.vertexai.preview import generative_models # pylint: disable=g-import-not-at-top
|
129
|
+
return generative_models.GenerationConfig(
|
130
|
+
temperature=options.temperature,
|
131
|
+
top_p=options.top_p,
|
132
|
+
top_k=options.top_k,
|
133
|
+
max_output_tokens=options.max_tokens,
|
134
|
+
stop_sequences=options.stop,
|
135
|
+
)
|
136
|
+
|
137
|
+
def _content_from_message(
|
138
|
+
self, prompt: lf.Message
|
139
|
+
) -> list[str | Any]:
|
140
|
+
"""Gets generation input from langfun message."""
|
141
|
+
from google.cloud.aiplatform.vertexai.preview import generative_models # pylint: disable=g-import-not-at-top
|
142
|
+
chunks = []
|
143
|
+
for lf_chunk in prompt.chunk():
|
144
|
+
if isinstance(lf_chunk, str):
|
145
|
+
chunk = lf_chunk
|
146
|
+
elif self.multimodal and isinstance(lf_chunk, lf_modalities.Image):
|
147
|
+
chunk = generative_models.Image.from_bytes(lf_chunk.to_bytes())
|
148
|
+
else:
|
149
|
+
raise ValueError(f'Unsupported modality: {lf_chunk!r}')
|
150
|
+
chunks.append(chunk)
|
151
|
+
return chunks
|
152
|
+
|
153
|
+
def _generation_response_to_message(
|
154
|
+
self,
|
155
|
+
response: Any, # generative_models.GenerationResponse
|
156
|
+
) -> lf.Message:
|
157
|
+
"""Parses generative response into message."""
|
158
|
+
return lf.AIMessage(response.text)
|
159
|
+
|
160
|
+
def _sample(self, prompts: list[lf.Message]) -> list[lf.LMSamplingResult]:
|
161
|
+
assert self._api_initialized, 'Vertex AI API is not initialized.'
|
162
|
+
return lf.concurrent_execute(
|
163
|
+
self._sample_single,
|
164
|
+
prompts,
|
165
|
+
executor=self.resource_id,
|
166
|
+
max_workers=self.max_concurrency,
|
167
|
+
# NOTE(daiyip): Vertex has its own policy on handling
|
168
|
+
# with rate limit, so we do not retry on errors.
|
169
|
+
retry_on_errors=None,
|
170
|
+
)
|
171
|
+
|
172
|
+
def _sample_single(self, prompt: lf.Message) -> lf.LMSamplingResult:
|
173
|
+
if self.sampling_options.n > 1:
|
174
|
+
raise ValueError(
|
175
|
+
f'`n` greater than 1 is not supported: {self.sampling_options.n}.'
|
176
|
+
)
|
177
|
+
api = SUPPORTED_MODELS_AND_SETTINGS[self.model].api
|
178
|
+
match api:
|
179
|
+
case 'gemini':
|
180
|
+
return self._sample_generative_model(prompt)
|
181
|
+
case 'palm':
|
182
|
+
return self._sample_text_generation_model(prompt)
|
183
|
+
case _:
|
184
|
+
raise ValueError(f'Unsupported API: {api}')
|
185
|
+
|
186
|
+
def _sample_generative_model(self, prompt: lf.Message) -> lf.LMSamplingResult:
|
187
|
+
"""Samples a generative model."""
|
188
|
+
model = _VERTEXAI_MODEL_HUB.get_generative_model(self.model)
|
189
|
+
input_content = self._content_from_message(prompt)
|
190
|
+
response = model.generate_content(
|
191
|
+
input_content,
|
192
|
+
generation_config=self._generation_config(self.sampling_options),
|
193
|
+
)
|
194
|
+
usage_metadata = response.usage_metadata
|
195
|
+
usage = lf.LMSamplingUsage(
|
196
|
+
prompt_tokens=usage_metadata.prompt_token_count,
|
197
|
+
completion_tokens=usage_metadata.candidates_token_count,
|
198
|
+
total_tokens=usage_metadata.total_token_count,
|
199
|
+
)
|
200
|
+
return lf.LMSamplingResult(
|
201
|
+
[
|
202
|
+
# Scoring is not supported.
|
203
|
+
lf.LMSample(
|
204
|
+
self._generation_response_to_message(response), score=0.0
|
205
|
+
),
|
206
|
+
],
|
207
|
+
usage=usage,
|
208
|
+
)
|
209
|
+
|
210
|
+
def _sample_text_generation_model(
|
211
|
+
self, prompt: lf.Message
|
212
|
+
) -> lf.LMSamplingResult:
|
213
|
+
"""Samples a text generation model."""
|
214
|
+
model = _VERTEXAI_MODEL_HUB.get_text_generation_model(self.model)
|
215
|
+
predict_options = dict(
|
216
|
+
temperature=self.sampling_options.temperature,
|
217
|
+
top_k=self.sampling_options.top_k,
|
218
|
+
top_p=self.sampling_options.top_p,
|
219
|
+
max_output_tokens=self.sampling_options.max_tokens,
|
220
|
+
stop_sequences=self.sampling_options.stop,
|
221
|
+
)
|
222
|
+
response = model.predict(prompt.text, **predict_options)
|
223
|
+
return lf.LMSamplingResult([
|
224
|
+
# Scoring is not supported.
|
225
|
+
lf.LMSample(lf.AIMessage(response.text), score=0.0)
|
226
|
+
])
|
227
|
+
|
228
|
+
|
229
|
+
class _ModelHub:
|
230
|
+
"""Vertex AI model hub."""
|
231
|
+
|
232
|
+
def __init__(self):
|
233
|
+
self._generative_model_cache = {}
|
234
|
+
self._text_generation_model_cache = {}
|
235
|
+
|
236
|
+
def get_generative_model(
|
237
|
+
self, model_id: str
|
238
|
+
) -> Any: # generative_models.GenerativeModel:
|
239
|
+
"""Gets a generative model by model id."""
|
240
|
+
model = self._generative_model_cache.get(model_id, None)
|
241
|
+
if model is None:
|
242
|
+
from google.cloud.aiplatform.vertexai.preview import generative_models # pylint: disable=g-import-not-at-top
|
243
|
+
model = generative_models.GenerativeModel(model_id)
|
244
|
+
self._generative_model_cache[model_id] = model
|
245
|
+
return model
|
246
|
+
|
247
|
+
def get_text_generation_model(
|
248
|
+
self, model_id: str
|
249
|
+
) -> Any: # language_models.TextGenerationModel
|
250
|
+
"""Gets a text generation model by model id."""
|
251
|
+
model = self._text_generation_model_cache.get(model_id, None)
|
252
|
+
if model is None:
|
253
|
+
from google.cloud.aiplatform.vertexai import language_models # pylint: disable=g-import-not-at-top
|
254
|
+
model = language_models.TextGenerationModel.from_pretrained(model_id)
|
255
|
+
self._text_generation_model_cache[model_id] = model
|
256
|
+
return model
|
257
|
+
|
258
|
+
|
259
|
+
_VERTEXAI_MODEL_HUB = _ModelHub()
|
260
|
+
|
261
|
+
|
262
|
+
class VertexAIGeminiPro1_5(VertexAI): # pylint: disable=invalid-name
|
263
|
+
"""Vertex AI Gemini 1.5 Pro model."""
|
264
|
+
|
265
|
+
model = 'gemini-1.5-pro-preview-0409'
|
266
|
+
multimodal = True
|
267
|
+
|
268
|
+
|
269
|
+
class VertexAIGeminiPro1(VertexAI): # pylint: disable=invalid-name
|
270
|
+
"""Vertex AI Gemini 1.0 Pro model."""
|
271
|
+
|
272
|
+
model = 'gemini-1.0-pro'
|
273
|
+
|
274
|
+
|
275
|
+
class VertexAIGeminiPro1Vision(VertexAI): # pylint: disable=invalid-name
|
276
|
+
"""Vertex AI Gemini 1.0 Pro model."""
|
277
|
+
|
278
|
+
model = 'gemini-1.0-pro-vision'
|
279
|
+
multimodal = True
|
280
|
+
|
281
|
+
|
282
|
+
class VertexAIPalm2(VertexAI): # pylint: disable=invalid-name
|
283
|
+
"""Vertex AI PaLM2 text generation model."""
|
284
|
+
|
285
|
+
model = 'text-bison'
|
286
|
+
|
287
|
+
|
288
|
+
class VertexAIPalm2_32K(VertexAI): # pylint: disable=invalid-name
|
289
|
+
"""Vertex AI PaLM2 text generation model (32K context length)."""
|
290
|
+
|
291
|
+
model = 'text-bison-32k'
|
@@ -0,0 +1,233 @@
|
|
1
|
+
# Copyright 2024 The Langfun Authors
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
"""Tests for Gemini models."""
|
15
|
+
|
16
|
+
import os
|
17
|
+
import unittest
|
18
|
+
from unittest import mock
|
19
|
+
|
20
|
+
from google.cloud.aiplatform.vertexai.preview import generative_models
|
21
|
+
import langfun.core as lf
|
22
|
+
from langfun.core import modalities as lf_modalities
|
23
|
+
from langfun.core.llms import vertexai
|
24
|
+
import pyglove as pg
|
25
|
+
|
26
|
+
|
27
|
+
example_image = (
|
28
|
+
b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x18\x00\x00\x00\x18\x04'
|
29
|
+
b'\x03\x00\x00\x00\x12Y \xcb\x00\x00\x00\x18PLTE\x00\x00'
|
30
|
+
b'\x00fff_chaag_cg_ch^ci_ciC\xedb\x94\x00\x00\x00\x08tRNS'
|
31
|
+
b'\x00\n\x9f*\xd4\xff_\xf4\xe4\x8b\xf3a\x00\x00\x00>IDATx'
|
32
|
+
b'\x01c \x05\x08)"\xd8\xcc\xae!\x06pNz\x88k\x19\\Q\xa8"\x10'
|
33
|
+
b'\xc1\x14\x95\x01%\xc1\n\xa143Ta\xa8"D-\x84\x03QM\x98\xc3'
|
34
|
+
b'\x1a\x1a\x1a@5\x0e\x04\xa0q\x88\x05\x00\x07\xf8\x18\xf9'
|
35
|
+
b'\xdao\xd0|\x00\x00\x00\x00IEND\xaeB`\x82'
|
36
|
+
)
|
37
|
+
|
38
|
+
|
39
|
+
def mock_generate_content(content, generation_config, **kwargs):
|
40
|
+
del kwargs
|
41
|
+
c = pg.Dict(generation_config.to_dict())
|
42
|
+
print('zzz', c)
|
43
|
+
return generative_models.GenerationResponse.from_dict({
|
44
|
+
'candidates': [
|
45
|
+
{
|
46
|
+
'index': 0,
|
47
|
+
'content': {
|
48
|
+
'role': 'model',
|
49
|
+
'parts': [
|
50
|
+
{
|
51
|
+
'text': (
|
52
|
+
f'This is a response to {content[0]} with '
|
53
|
+
f'temperature={c.temperature}, '
|
54
|
+
f'top_p={c.top_p}, '
|
55
|
+
f'top_k={c.top_k}, '
|
56
|
+
f'max_tokens={c.max_output_tokens}, '
|
57
|
+
f'stop={"".join(c.stop_sequences)}.'
|
58
|
+
)
|
59
|
+
},
|
60
|
+
],
|
61
|
+
},
|
62
|
+
},
|
63
|
+
]
|
64
|
+
})
|
65
|
+
|
66
|
+
|
67
|
+
class VertexAITest(unittest.TestCase):
|
68
|
+
"""Tests for Vertex model."""
|
69
|
+
|
70
|
+
def test_content_from_message_text_only(self):
|
71
|
+
text = 'This is a beautiful day'
|
72
|
+
model = vertexai.VertexAIGeminiPro1()
|
73
|
+
chunks = model._content_from_message(lf.UserMessage(text))
|
74
|
+
self.assertEqual(chunks, [text])
|
75
|
+
|
76
|
+
def test_content_from_message_mm(self):
|
77
|
+
message = lf.UserMessage(
|
78
|
+
'This is an {{image}}, what is it?',
|
79
|
+
image=lf_modalities.Image.from_bytes(example_image),
|
80
|
+
)
|
81
|
+
|
82
|
+
# Non-multimodal model.
|
83
|
+
with self.assertRaisesRegex(ValueError, 'Unsupported modality'):
|
84
|
+
vertexai.VertexAIGeminiPro1()._content_from_message(message)
|
85
|
+
|
86
|
+
model = vertexai.VertexAIGeminiPro1Vision()
|
87
|
+
chunks = model._content_from_message(message)
|
88
|
+
self.maxDiff = None
|
89
|
+
self.assertEqual([chunks[0], chunks[2]], ['This is an', ', what is it?'])
|
90
|
+
self.assertIsInstance(chunks[1], generative_models.Image)
|
91
|
+
|
92
|
+
def test_generation_response_to_message_text_only(self):
|
93
|
+
response = generative_models.GenerationResponse.from_dict({
|
94
|
+
'candidates': [
|
95
|
+
{
|
96
|
+
'index': 0,
|
97
|
+
'content': {
|
98
|
+
'role': 'model',
|
99
|
+
'parts': [
|
100
|
+
{
|
101
|
+
'text': 'hello world',
|
102
|
+
},
|
103
|
+
],
|
104
|
+
},
|
105
|
+
},
|
106
|
+
],
|
107
|
+
})
|
108
|
+
model = vertexai.VertexAIGeminiPro1()
|
109
|
+
message = model._generation_response_to_message(response)
|
110
|
+
self.assertEqual(message, lf.AIMessage('hello world'))
|
111
|
+
|
112
|
+
def test_model_hub(self):
|
113
|
+
with mock.patch(
|
114
|
+
'google.cloud.aiplatform.vertexai.preview.generative_models.'
|
115
|
+
'GenerativeModel.__init__'
|
116
|
+
) as mock_model_init:
|
117
|
+
mock_model_init.side_effect = lambda *args, **kwargs: None
|
118
|
+
model = vertexai._VERTEXAI_MODEL_HUB.get_generative_model(
|
119
|
+
'gemini-1.0-pro'
|
120
|
+
)
|
121
|
+
self.assertIsNotNone(model)
|
122
|
+
self.assertIs(
|
123
|
+
vertexai._VERTEXAI_MODEL_HUB.get_generative_model('gemini-1.0-pro'),
|
124
|
+
model,
|
125
|
+
)
|
126
|
+
|
127
|
+
with mock.patch(
|
128
|
+
'google.cloud.aiplatform.vertexai.language_models.'
|
129
|
+
'TextGenerationModel.from_pretrained'
|
130
|
+
) as mock_model_init:
|
131
|
+
|
132
|
+
class TextGenerationModel:
|
133
|
+
pass
|
134
|
+
|
135
|
+
mock_model_init.side_effect = lambda *args, **kw: TextGenerationModel()
|
136
|
+
model = vertexai._VERTEXAI_MODEL_HUB.get_text_generation_model(
|
137
|
+
'text-bison'
|
138
|
+
)
|
139
|
+
self.assertIsNotNone(model)
|
140
|
+
self.assertIs(
|
141
|
+
vertexai._VERTEXAI_MODEL_HUB.get_text_generation_model('text-bison'),
|
142
|
+
model,
|
143
|
+
)
|
144
|
+
|
145
|
+
def test_project_and_location_check(self):
|
146
|
+
with self.assertRaisesRegex(ValueError, 'Please specify `project`'):
|
147
|
+
_ = vertexai.VertexAIGeminiPro1()._api_initialized
|
148
|
+
|
149
|
+
with self.assertRaisesRegex(ValueError, 'Please specify `location`'):
|
150
|
+
_ = vertexai.VertexAIGeminiPro1(project='abc')._api_initialized
|
151
|
+
|
152
|
+
self.assertTrue(
|
153
|
+
vertexai.VertexAIGeminiPro1(
|
154
|
+
project='abc', location='us-central1'
|
155
|
+
)._api_initialized
|
156
|
+
)
|
157
|
+
|
158
|
+
os.environ['VERTEXAI_PROJECT'] = 'abc'
|
159
|
+
os.environ['VERTEXAI_LOCATION'] = 'us-central1'
|
160
|
+
self.assertTrue(vertexai.VertexAIGeminiPro1()._api_initialized)
|
161
|
+
del os.environ['VERTEXAI_PROJECT']
|
162
|
+
del os.environ['VERTEXAI_LOCATION']
|
163
|
+
|
164
|
+
def test_call_generative_model(self):
|
165
|
+
with mock.patch(
|
166
|
+
'google.cloud.aiplatform.vertexai.preview.generative_models.'
|
167
|
+
'GenerativeModel.__init__'
|
168
|
+
) as mock_model_init:
|
169
|
+
mock_model_init.side_effect = lambda *args, **kwargs: None
|
170
|
+
|
171
|
+
with mock.patch(
|
172
|
+
'google.cloud.aiplatform.vertexai.preview.generative_models.'
|
173
|
+
'GenerativeModel.generate_content'
|
174
|
+
) as mock_generate:
|
175
|
+
mock_generate.side_effect = mock_generate_content
|
176
|
+
|
177
|
+
lm = vertexai.VertexAIGeminiPro1(project='abc', location='us-central1')
|
178
|
+
self.assertEqual(
|
179
|
+
lm(
|
180
|
+
'hello',
|
181
|
+
temperature=2.0,
|
182
|
+
top_p=1.0,
|
183
|
+
top_k=20,
|
184
|
+
max_tokens=1024,
|
185
|
+
stop='\n',
|
186
|
+
).text,
|
187
|
+
(
|
188
|
+
'This is a response to hello with temperature=2.0, '
|
189
|
+
'top_p=1.0, top_k=20.0, max_tokens=1024, stop=\n.'
|
190
|
+
),
|
191
|
+
)
|
192
|
+
|
193
|
+
def test_call_text_generation_model(self):
|
194
|
+
with mock.patch(
|
195
|
+
'google.cloud.aiplatform.vertexai.language_models.'
|
196
|
+
'TextGenerationModel.from_pretrained'
|
197
|
+
) as mock_model_init:
|
198
|
+
|
199
|
+
class TextGenerationModel:
|
200
|
+
|
201
|
+
def predict(self, prompt, **kwargs):
|
202
|
+
c = pg.Dict(kwargs)
|
203
|
+
return pg.Dict(
|
204
|
+
text=(
|
205
|
+
f'This is a response to {prompt} with '
|
206
|
+
f'temperature={c.temperature}, '
|
207
|
+
f'top_p={c.top_p}, '
|
208
|
+
f'top_k={c.top_k}, '
|
209
|
+
f'max_tokens={c.max_output_tokens}, '
|
210
|
+
f'stop={"".join(c.stop_sequences)}.'
|
211
|
+
)
|
212
|
+
)
|
213
|
+
|
214
|
+
mock_model_init.side_effect = lambda *args, **kw: TextGenerationModel()
|
215
|
+
lm = vertexai.VertexAIPalm2(project='abc', location='us-central1')
|
216
|
+
self.assertEqual(
|
217
|
+
lm(
|
218
|
+
'hello',
|
219
|
+
temperature=2.0,
|
220
|
+
top_p=1.0,
|
221
|
+
top_k=20,
|
222
|
+
max_tokens=1024,
|
223
|
+
stop='\n',
|
224
|
+
).text,
|
225
|
+
(
|
226
|
+
'This is a response to hello with temperature=2.0, '
|
227
|
+
'top_p=1.0, top_k=20, max_tokens=1024, stop=\n.'
|
228
|
+
),
|
229
|
+
)
|
230
|
+
|
231
|
+
|
232
|
+
if __name__ == '__main__':
|
233
|
+
unittest.main()
|
langfun/core/modalities/image.py
CHANGED
@@ -13,7 +13,6 @@
|
|
13
13
|
# limitations under the License.
|
14
14
|
"""Image modality."""
|
15
15
|
|
16
|
-
import base64
|
17
16
|
import imghdr
|
18
17
|
from typing import cast
|
19
18
|
from langfun.core.modalities import mime
|
@@ -36,5 +35,4 @@ class Image(mime.MimeType):
|
|
36
35
|
def _repr_html_(self) -> str:
|
37
36
|
if self.uri and self.uri.lower().startswith(('http:', 'https:', 'ftp:')):
|
38
37
|
return f'<img src="{self.uri}">'
|
39
|
-
|
40
|
-
return f'<img src="data:image/{self.image_format};base64,{image_raw}">'
|
38
|
+
return f'<img src="{self.content_uri}">'
|
langfun/core/modalities/mime.py
CHANGED
@@ -14,6 +14,7 @@
|
|
14
14
|
"""MIME type data."""
|
15
15
|
|
16
16
|
import abc
|
17
|
+
import base64
|
17
18
|
from typing import Annotated, Union
|
18
19
|
import langfun.core as lf
|
19
20
|
import pyglove as pg
|
@@ -54,6 +55,11 @@ class MimeType(lf.Modality):
|
|
54
55
|
self.rebind(content=content, skip_notification=True)
|
55
56
|
return self.content
|
56
57
|
|
58
|
+
@property
|
59
|
+
def content_uri(self) -> str:
|
60
|
+
base64_content = base64.b64encode(self.to_bytes()).decode()
|
61
|
+
return f'data:{self.mime_type};base64,{base64_content}'
|
62
|
+
|
57
63
|
@classmethod
|
58
64
|
def from_uri(cls, uri: str, **kwargs) -> 'MimeType':
|
59
65
|
return cls(uri=uri, content=None, **kwargs)
|
langfun/core/modalities/video.py
CHANGED
@@ -13,7 +13,6 @@
|
|
13
13
|
# limitations under the License.
|
14
14
|
"""Video modality."""
|
15
15
|
|
16
|
-
import base64
|
17
16
|
from typing import cast
|
18
17
|
from langfun.core.modalities import mime
|
19
18
|
|
@@ -40,8 +39,7 @@ class Video(mime.MimeType):
|
|
40
39
|
def _repr_html_(self) -> str:
|
41
40
|
if self.uri and self.uri.lower().startswith(('http:', 'https:', 'ftp:')):
|
42
41
|
return f'<video controls> <source src="{self.uri}"> </video>'
|
43
|
-
video_raw = base64.b64encode(self.to_bytes()).decode()
|
44
42
|
return (
|
45
43
|
'<video controls> <source'
|
46
|
-
f' src="data:video/{self.
|
44
|
+
f' src="data:video/{self.content_uri}"> </video>'
|
47
45
|
)
|
@@ -64,6 +64,8 @@ from langfun.core.structured.prompting import QueryStructure
|
|
64
64
|
from langfun.core.structured.prompting import QueryStructureJson
|
65
65
|
from langfun.core.structured.prompting import QueryStructurePython
|
66
66
|
from langfun.core.structured.prompting import query
|
67
|
+
from langfun.core.structured.prompting import query_prompt
|
68
|
+
from langfun.core.structured.prompting import query_output
|
67
69
|
|
68
70
|
from langfun.core.structured.description import DescribeStructure
|
69
71
|
from langfun.core.structured.description import describe
|
@@ -16,6 +16,7 @@
|
|
16
16
|
from typing import Any, Callable, Type, Union
|
17
17
|
|
18
18
|
import langfun.core as lf
|
19
|
+
from langfun.core.llms import fake
|
19
20
|
from langfun.core.structured import mapping
|
20
21
|
from langfun.core.structured import schema as schema_lib
|
21
22
|
import pyglove as pg
|
@@ -214,13 +215,8 @@ def query(
|
|
214
215
|
# prompt rendering.
|
215
216
|
prompt_kwargs.pop('template_str', None)
|
216
217
|
|
217
|
-
if isinstance(prompt, str):
|
218
|
-
prompt = lf.Template(prompt, **prompt_kwargs)
|
219
|
-
elif isinstance(prompt, lf.Template):
|
220
|
-
prompt = prompt.rebind(**prompt_kwargs, raise_on_no_change=False)
|
221
|
-
|
222
|
-
if isinstance(prompt, lf.Template):
|
223
|
-
prompt = prompt.render(lm=lm)
|
218
|
+
if isinstance(prompt, (str, lf.Message, lf.Template)):
|
219
|
+
prompt = lf.Template.from_value(prompt, **prompt_kwargs).render(lm=lm)
|
224
220
|
else:
|
225
221
|
prompt = schema_lib.mark_missing(prompt)
|
226
222
|
|
@@ -240,3 +236,31 @@ def query(
|
|
240
236
|
skip_lm=skip_lm,
|
241
237
|
)
|
242
238
|
return output if returns_message else output.result
|
239
|
+
|
240
|
+
|
241
|
+
def query_prompt(
|
242
|
+
prompt: Union[str, pg.Symbolic],
|
243
|
+
schema: Union[
|
244
|
+
schema_lib.Schema, Type[Any], list[Type[Any]], dict[str, Any], None
|
245
|
+
] = None,
|
246
|
+
**kwargs,
|
247
|
+
) -> lf.Message:
|
248
|
+
"""Returns the final prompt sent to LLM for `lf.query`."""
|
249
|
+
kwargs.pop('returns_message', None)
|
250
|
+
kwargs.pop('skip_lm', None)
|
251
|
+
return query(prompt, schema, skip_lm=True, returns_message=True, **kwargs)
|
252
|
+
|
253
|
+
|
254
|
+
def query_output(
|
255
|
+
response: Union[str, lf.Message],
|
256
|
+
schema: Union[
|
257
|
+
schema_lib.Schema, Type[Any], list[Type[Any]], dict[str, Any], None
|
258
|
+
],
|
259
|
+
**kwargs,
|
260
|
+
) -> Any:
|
261
|
+
"""Returns the final output of `lf.query` from a provided LLM response."""
|
262
|
+
kwargs.pop('prompt', None)
|
263
|
+
kwargs.pop('lm', None)
|
264
|
+
return query(
|
265
|
+
'Unused prompt', schema, lm=fake.StaticResponse(response), **kwargs
|
266
|
+
)
|
@@ -285,6 +285,49 @@ class QueryTest(unittest.TestCase):
|
|
285
285
|
with self.assertRaisesRegex(ValueError, 'Unknown protocol'):
|
286
286
|
prompting.query('what is 1 + 1', int, protocol='text')
|
287
287
|
|
288
|
+
def test_query_prompt(self):
|
289
|
+
self.assertEqual(
|
290
|
+
prompting.query_prompt('what is this?', int),
|
291
|
+
inspect.cleandoc("""
|
292
|
+
Please respond to the last INPUT_OBJECT with OUTPUT_OBJECT according to OUTPUT_TYPE.
|
293
|
+
|
294
|
+
INPUT_OBJECT:
|
295
|
+
1 + 1 =
|
296
|
+
|
297
|
+
OUTPUT_TYPE:
|
298
|
+
Answer
|
299
|
+
|
300
|
+
```python
|
301
|
+
class Answer:
|
302
|
+
final_answer: int
|
303
|
+
```
|
304
|
+
|
305
|
+
OUTPUT_OBJECT:
|
306
|
+
```python
|
307
|
+
Answer(
|
308
|
+
final_answer=2
|
309
|
+
)
|
310
|
+
```
|
311
|
+
|
312
|
+
INPUT_OBJECT:
|
313
|
+
what is this?
|
314
|
+
|
315
|
+
OUTPUT_TYPE:
|
316
|
+
int
|
317
|
+
|
318
|
+
OUTPUT_OBJECT:
|
319
|
+
"""),
|
320
|
+
)
|
321
|
+
|
322
|
+
def test_query_output(self):
|
323
|
+
self.assertEqual(
|
324
|
+
prompting.query_output(
|
325
|
+
lf.AIMessage('1'),
|
326
|
+
int,
|
327
|
+
),
|
328
|
+
1,
|
329
|
+
)
|
330
|
+
|
288
331
|
|
289
332
|
class QueryStructurePythonTest(unittest.TestCase):
|
290
333
|
|
langfun/core/template.py
CHANGED
@@ -17,7 +17,7 @@ import contextlib
|
|
17
17
|
import dataclasses
|
18
18
|
import functools
|
19
19
|
import inspect
|
20
|
-
from typing import Annotated, Any, Callable, Iterator, Set, Tuple, Type
|
20
|
+
from typing import Annotated, Any, Callable, Iterator, Set, Tuple, Type, Union
|
21
21
|
|
22
22
|
import jinja2
|
23
23
|
from jinja2 import meta as jinja2_meta
|
@@ -495,6 +495,27 @@ class Template(
|
|
495
495
|
t.sym_setparent(self)
|
496
496
|
return t
|
497
497
|
|
498
|
+
@classmethod
|
499
|
+
def from_value(
|
500
|
+
cls,
|
501
|
+
value: Union[str, message_lib.Message, 'Template'],
|
502
|
+
**kwargs
|
503
|
+
) -> 'Template':
|
504
|
+
"""Create a template object from a string or template."""
|
505
|
+
if isinstance(value, cls):
|
506
|
+
return value.clone(override=kwargs) if kwargs else value # pylint: disable=no-value-for-parameter
|
507
|
+
if isinstance(value, str):
|
508
|
+
return cls(template_str=value, **kwargs)
|
509
|
+
if isinstance(value, message_lib.Message):
|
510
|
+
kwargs.update(value.metadata)
|
511
|
+
return cls(template_str=value.text, **kwargs)
|
512
|
+
if isinstance(value, Template):
|
513
|
+
lfun = cls(template_str=value.template_str, **kwargs)
|
514
|
+
# So lfun could acccess all attributes from value.
|
515
|
+
lfun.sym_setparent(value)
|
516
|
+
return lfun
|
517
|
+
return cls(template_str='{{input}}', input=value, **kwargs)
|
518
|
+
|
498
519
|
|
499
520
|
# Register converter from str to LangFunc, therefore we can always
|
500
521
|
# pass strs to attributes that accept LangFunc.
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: langfun
|
3
|
-
Version: 0.0.2.
|
3
|
+
Version: 0.0.2.dev20240513
|
4
4
|
Summary: Langfun: Language as Functions.
|
5
5
|
Home-page: https://github.com/google/langfun
|
6
6
|
Author: Langfun Authors
|
@@ -21,6 +21,7 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
21
21
|
Classifier: Topic :: Software Development :: Libraries
|
22
22
|
Description-Content-Type: text/markdown
|
23
23
|
License-File: LICENSE
|
24
|
+
Requires-Dist: google-cloud-aiplatform >=1.5.0
|
24
25
|
Requires-Dist: google-generativeai >=0.3.2
|
25
26
|
Requires-Dist: jinja2 >=3.1.2
|
26
27
|
Requires-Dist: openai ==0.27.2
|
@@ -1,4 +1,4 @@
|
|
1
|
-
langfun/__init__.py,sha256=
|
1
|
+
langfun/__init__.py,sha256=YAbi2FfTfKT41KJAx1tSNoiole_YRJmcEk3oOoqFqOs,2128
|
2
2
|
langfun/core/__init__.py,sha256=6QEuXOZ9BXxm6TjpaMXuLwUBTYO3pkFDqn9QVBXyyPQ,4248
|
3
3
|
langfun/core/component.py,sha256=oxesbC0BoE_TbtxwW5x-BAZWxZyyJbuPiX5S38RqCv0,9909
|
4
4
|
langfun/core/component_test.py,sha256=uR-_Sz_42Jxc5qzLIB-f5_pXmNwnC01Xlbv5NOQSeSU,8021
|
@@ -6,8 +6,8 @@ langfun/core/concurrent.py,sha256=TRc49pJ3HQro2kb5FtcWkHjhBm8UcgE8RJybU5cU3-0,24
|
|
6
6
|
langfun/core/concurrent_test.py,sha256=mwFMZhDUdppnDr7vDSTwcbMHwrdsIoKJwRYNtl4ZWL4,15185
|
7
7
|
langfun/core/console.py,sha256=bk5rNPNm9rMGW5YT2HixxU04p2umnoabn5SDz6Dqe88,2317
|
8
8
|
langfun/core/console_test.py,sha256=5SYJdxpJGLgdSSQqqMPoA1X6jpsLD8rgcyk-EgI65oE,1077
|
9
|
-
langfun/core/langfunc.py,sha256=
|
10
|
-
langfun/core/langfunc_test.py,sha256=
|
9
|
+
langfun/core/langfunc.py,sha256=RvIcRjIq0jWYRu1xim-FYe4HSrt97r3GMBO_PuagUmw,11060
|
10
|
+
langfun/core/langfunc_test.py,sha256=_mfARnakX3oji5HDigFSLMd6yQ2wma-2Mgbztwqn73g,8501
|
11
11
|
langfun/core/language_model.py,sha256=6wtY8RGbOymfo1PYzcYCfOlWuKQcSVFs5R1sFB4-QMQ,20202
|
12
12
|
langfun/core/language_model_test.py,sha256=T-itu7Li2smv2dkru0C0neCs2W4VJXlNTYahXU6jF54,19548
|
13
13
|
langfun/core/memory.py,sha256=f-asN1F7Vehgdn_fK84v73GrEUOxRtaW934keutTKjk,2416
|
@@ -21,7 +21,7 @@ langfun/core/sampling.py,sha256=vygWvgC8MFw0_AKNSmz-ywMXJYWf8cl0tI8QycvAmyI,5795
|
|
21
21
|
langfun/core/sampling_test.py,sha256=U7PANpMsl9E_pa4_Y4FzesSjcwg-u-LKHGCWSgv-8FY,3663
|
22
22
|
langfun/core/subscription.py,sha256=euawEuSZP-BHydaT-AQpfYFL0m5pWPGcW0upFhrojqc,10930
|
23
23
|
langfun/core/subscription_test.py,sha256=Y4ZdbZEwm83YNZBxHff0QR4QUa4rdaNXA3_jfIcArBo,8717
|
24
|
-
langfun/core/template.py,sha256=
|
24
|
+
langfun/core/template.py,sha256=UhNNGUDJ4StUhPBKzHmjym36khxHOGWGr9MDxBwgxQA,22284
|
25
25
|
langfun/core/template_test.py,sha256=Mbv0dFjboGCVvbDkHD-HacZnlCi8Ku2Hpf2UjdwGSNo,15464
|
26
26
|
langfun/core/text_formatting.py,sha256=ytjj7opnRJ6w-pkglL2CZUyfYDXLpNf65E42LBb31gc,5158
|
27
27
|
langfun/core/text_formatting_test.py,sha256=nyKC6tn2L4hPJiqQHgxcbQsJJi4A4Nbj8FiO8iT6B80,1514
|
@@ -40,7 +40,7 @@ langfun/core/coding/python/parsing_test.py,sha256=9vAWF484kWIm6JZq8NFiMgKUDhXV-d
|
|
40
40
|
langfun/core/coding/python/permissions.py,sha256=1QWGHvzL8MM0Ok_auQ9tURqZHtdOfJaDpBzZ29GUE-c,2544
|
41
41
|
langfun/core/coding/python/permissions_test.py,sha256=w5EDb8QxpxgJyZkojyzVWQvDfg366zn99-g__6TbPQ0,2699
|
42
42
|
langfun/core/eval/__init__.py,sha256=Evt-E4FEhZF2tXL6-byh_AyA7Cc_ZoGmvnN7vkAZedk,1898
|
43
|
-
langfun/core/eval/base.py,sha256=
|
43
|
+
langfun/core/eval/base.py,sha256=zcMPBKmcll5O08waEEnvmkEoXgcINhOat9rRJk8X8b4,74268
|
44
44
|
langfun/core/eval/base_test.py,sha256=cHOTIWVW4Dp8gKKIKcZrAcJ-w84j2GIozTzJoiAX7p4,26743
|
45
45
|
langfun/core/eval/matching.py,sha256=Y4vFoNTQEOwko6IA8l9OZ52-vt52e3VGmcTtvLA67wM,9782
|
46
46
|
langfun/core/eval/matching_test.py,sha256=f7iVyXH5KGJBWt4Wp14Bt9J3X59A6Ayfog9MbuFvPew,5532
|
@@ -48,19 +48,21 @@ langfun/core/eval/patching.py,sha256=R0s2eAd1m97exQt06dmUL0V_MBG0W2Hxg7fhNB7cXW0
|
|
48
48
|
langfun/core/eval/patching_test.py,sha256=8kCd54Egjju22FMgtJuxEsrXkW8ifs-UUBHtrCG1L6w,4775
|
49
49
|
langfun/core/eval/scoring.py,sha256=1J7IATo-8FXUR0SBqk9icztHiM0lWkBFcWUo-vUURgQ,6376
|
50
50
|
langfun/core/eval/scoring_test.py,sha256=O8olHbrUEg60gMxwOkWzKBJZpZoUlmVnBANX5Se2SXM,4546
|
51
|
-
langfun/core/llms/__init__.py,sha256=
|
51
|
+
langfun/core/llms/__init__.py,sha256=C-NrcgFqf3_EP_dN8oADdckQ-rfPKZhsjeSf86kJpLk,3642
|
52
52
|
langfun/core/llms/anthropic.py,sha256=7W9YdPN3SlAFhAIQlihMkrpo7tTY_4NvD0KIlCrqcsk,8505
|
53
53
|
langfun/core/llms/anthropic_test.py,sha256=TMM30myyEhwF99Le4RvJEXOn8RYl0q1FRkt9Q9nl1jk,5540
|
54
|
-
langfun/core/llms/fake.py,sha256=
|
54
|
+
langfun/core/llms/fake.py,sha256=_smsN_CsYbeWrtjpegEPwdAPV9mwaIuH_4oZGeXQwQI,2896
|
55
55
|
langfun/core/llms/fake_test.py,sha256=ipKfdOcuqVcJ8lDXVpnBVb9HHG0hAVkFkMoHpWjC2cI,7212
|
56
|
-
langfun/core/llms/google_genai.py,sha256=
|
56
|
+
langfun/core/llms/google_genai.py,sha256=nDI_Adur_K458l6EWoiiAhzjfnjRSqfTiikdu7iLPyU,8808
|
57
57
|
langfun/core/llms/google_genai_test.py,sha256=_UcGTfl16-aDUlEWFC2W2F8y9jPUs53RBYA6MOCpGXw,7525
|
58
58
|
langfun/core/llms/groq.py,sha256=NaGItVL_pkOpqPpI4bPGU27xLFRoaeizZ49v2s-4ERs,7844
|
59
59
|
langfun/core/llms/groq_test.py,sha256=M6GtlrsOvDun_j-sR8cPh4W_moHWZNSTiThu3kuwbbc,5281
|
60
60
|
langfun/core/llms/llama_cpp.py,sha256=Y_KkMUf3Xfac49koMUtUslKl3h-HWp3-ntq7Jaa3bdo,2385
|
61
61
|
langfun/core/llms/llama_cpp_test.py,sha256=ZxC6defGd_HX9SFRU9U4cJiQnBKundbOrchbXuC1Z2M,1683
|
62
|
-
langfun/core/llms/openai.py,sha256=
|
62
|
+
langfun/core/llms/openai.py,sha256=u2lqYcKFjFxLfWYD0KLT3YThqcoo66rWs3n0bcuSYBs,13286
|
63
63
|
langfun/core/llms/openai_test.py,sha256=asSA1sVy_7hnXioD_2HTxtSDpVTKBUO_EjZuyHpwbn0,14854
|
64
|
+
langfun/core/llms/vertexai.py,sha256=O2Lp-F4KJzvQSCjPV--sa6nMS9-GsLj2eiqA-1qGhWQ,9661
|
65
|
+
langfun/core/llms/vertexai_test.py,sha256=LBk4luL_N13ZejZebBzQ3tkfjxFhk7uBS4JjEpojJAo,7836
|
64
66
|
langfun/core/llms/cache/__init__.py,sha256=QAo3InUMDM_YpteNnVCSejI4zOsnjSMWKJKzkb3VY64,993
|
65
67
|
langfun/core/llms/cache/base.py,sha256=cFfYvOIUae842pncqCAsRvqXCk2AnAsRYVx0mcIoAeY,3338
|
66
68
|
langfun/core/llms/cache/in_memory.py,sha256=YfFyJEhLs73cUiB0ZfhMxYpdE8Iuxxw-dvMFwGHTSHw,4742
|
@@ -69,13 +71,13 @@ langfun/core/memories/__init__.py,sha256=HpghfZ-w1NQqzJXBx8Lz0daRhB2rcy2r9Xm491S
|
|
69
71
|
langfun/core/memories/conversation_history.py,sha256=c9amD8hCxGFiZuVAzkP0dOMWSp8L90uvwkOejjuBqO0,1835
|
70
72
|
langfun/core/memories/conversation_history_test.py,sha256=AaW8aNoFjxNusanwJDV0r3384Mg0eAweGmPx5DIkM0Y,2052
|
71
73
|
langfun/core/modalities/__init__.py,sha256=ldCbs1HHAHAJECNu19vppA0sWEidI40xBs4W1F_YOlo,1073
|
72
|
-
langfun/core/modalities/image.py,sha256=
|
74
|
+
langfun/core/modalities/image.py,sha256=MUqRCQYyP7Gcf3dmzjU9J9ZEpfI08gAli9ZDmk0bJEk,1254
|
73
75
|
langfun/core/modalities/image_test.py,sha256=YxDRvC49Bjwyyndd_P7y6XjyS7dOft0Zewwxk-7q4kE,2301
|
74
|
-
langfun/core/modalities/mime.py,sha256=
|
76
|
+
langfun/core/modalities/mime.py,sha256=RatBOPqYEneYMe-lfgRxJp5T3yvgV6vBMNY8lK2WU8k,2421
|
75
77
|
langfun/core/modalities/mime_test.py,sha256=cVHxRvJ1QXC1SVhBmWkJdWGpL9Xl0UNfTQq6j0OGGL4,1881
|
76
|
-
langfun/core/modalities/video.py,sha256=
|
78
|
+
langfun/core/modalities/video.py,sha256=bzJLeBDF6FIVHyrAvRqYcQq2pCLBqN-UIgX_f3lM3E0,1654
|
77
79
|
langfun/core/modalities/video_test.py,sha256=jYuI2m8S8zDCAVBPEUbbpP205dXAht90A2_PHWo4-r8,2039
|
78
|
-
langfun/core/structured/__init__.py,sha256=
|
80
|
+
langfun/core/structured/__init__.py,sha256=Qg1ocwsb60od8fJky3F3JAOhwjwT9WA7IX3C2j2s3zA,3707
|
79
81
|
langfun/core/structured/completion.py,sha256=skBxt6V_fv2TBUKnzFgnPMbVY8HSYn8sY04MLok2yvs,7299
|
80
82
|
langfun/core/structured/completion_test.py,sha256=MYxEzeScC3gFVujvrMMboBF5nh-QiVLwGgqAV3oaFUQ,19273
|
81
83
|
langfun/core/structured/description.py,sha256=SXW4MJvshFjbR-0gw6rE21o6WXq12UlRXawvDBXMZFA,5211
|
@@ -86,8 +88,8 @@ langfun/core/structured/mapping.py,sha256=V2EI53KwhXxqcoH2ouhuei8aYWny0ml_FwMTiS
|
|
86
88
|
langfun/core/structured/mapping_test.py,sha256=PiXklMeIa8L6KtMi3ju7J9Y39gZy0hIGz-Oeq4A_7XE,3835
|
87
89
|
langfun/core/structured/parsing.py,sha256=keoVqEfzAbdULh6GawWFsTQzU91MzJXYFZjXGXLaD8g,11492
|
88
90
|
langfun/core/structured/parsing_test.py,sha256=34wDrXaQ-EYhJLfDL8mX9K53oQMSzh5pVYdKjnESmK8,20895
|
89
|
-
langfun/core/structured/prompting.py,sha256=
|
90
|
-
langfun/core/structured/prompting_test.py,sha256=
|
91
|
+
langfun/core/structured/prompting.py,sha256=cswl9c93edsYnXsZQmMzPpmqOuKnBzbgebTYBbSxwzo,8815
|
92
|
+
langfun/core/structured/prompting_test.py,sha256=rddf5qHN8Gm_JaNMmytwiVEBm-eZVJFLQO4GljUgR44,21700
|
91
93
|
langfun/core/structured/schema.py,sha256=Zy9y6Vq9DrFwcuP5o5VL_PvMCmzavF-nuDqyviBnaxk,25818
|
92
94
|
langfun/core/structured/schema_generation.py,sha256=U3nRQsqmMZg_qIVDh2fiY3K4JLfsAL1LcKzIFP1iXFg,5316
|
93
95
|
langfun/core/structured/schema_generation_test.py,sha256=RM9s71kMNg2jTePwInkiW9fK1ACN37eyPeF8OII-0zw,2950
|
@@ -103,8 +105,8 @@ langfun/core/templates/demonstration.py,sha256=vCrgYubdZM5Umqcgp8NUVGXgr4P_c-fik
|
|
103
105
|
langfun/core/templates/demonstration_test.py,sha256=SafcDQ0WgI7pw05EmPI2S4v1t3ABKzup8jReCljHeK4,2162
|
104
106
|
langfun/core/templates/selfplay.py,sha256=yhgrJbiYwq47TgzThmHrDQTF4nDrTI09CWGhuQPNv-s,2273
|
105
107
|
langfun/core/templates/selfplay_test.py,sha256=DYVrkk7uNKCqJGEHH31HssU2BPuMItU1vJLzfcXIlYg,2156
|
106
|
-
langfun-0.0.2.
|
107
|
-
langfun-0.0.2.
|
108
|
-
langfun-0.0.2.
|
109
|
-
langfun-0.0.2.
|
110
|
-
langfun-0.0.2.
|
108
|
+
langfun-0.0.2.dev20240513.dist-info/LICENSE,sha256=WNHhf_5RCaeuKWyq_K39vmp9F28LxKsB4SpomwSZ2L0,11357
|
109
|
+
langfun-0.0.2.dev20240513.dist-info/METADATA,sha256=rjYqB2Epk9KuZYcGS-MbSrGnRFo4VDaHwafhCFGnah0,3452
|
110
|
+
langfun-0.0.2.dev20240513.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
111
|
+
langfun-0.0.2.dev20240513.dist-info/top_level.txt,sha256=RhlEkHxs1qtzmmtWSwYoLVJAc1YrbPtxQ52uh8Z9VvY,8
|
112
|
+
langfun-0.0.2.dev20240513.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|