langfun 0.0.2.dev20240212__py3-none-any.whl → 0.0.2.dev20240214__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langfun/core/eval/base_test.py +1 -1
- langfun/core/llms/__init__.py +3 -4
- langfun/core/llms/openai.py +21 -26
- langfun/core/structured/parsing.py +8 -1
- {langfun-0.0.2.dev20240212.dist-info → langfun-0.0.2.dev20240214.dist-info}/METADATA +1 -1
- {langfun-0.0.2.dev20240212.dist-info → langfun-0.0.2.dev20240214.dist-info}/RECORD +9 -9
- {langfun-0.0.2.dev20240212.dist-info → langfun-0.0.2.dev20240214.dist-info}/LICENSE +0 -0
- {langfun-0.0.2.dev20240212.dist-info → langfun-0.0.2.dev20240214.dist-info}/WHEEL +0 -0
- {langfun-0.0.2.dev20240212.dist-info → langfun-0.0.2.dev20240214.dist-info}/top_level.txt +0 -0
langfun/core/eval/base_test.py
CHANGED
@@ -481,7 +481,7 @@ class SuiteTest(unittest.TestCase):
|
|
481
481
|
schema_fn='answer_schema()',
|
482
482
|
),
|
483
483
|
cache_stats=dict(
|
484
|
-
use_cache=True, num_queries=4, num_hits=
|
484
|
+
use_cache=True, num_queries=4, num_hits=1, num_updates=3
|
485
485
|
),
|
486
486
|
metrics=dict(total=2, failures=2, failure_rate=1.0),
|
487
487
|
),
|
langfun/core/llms/__init__.py
CHANGED
@@ -32,21 +32,20 @@ from langfun.core.llms.gemini import GeminiProVision
|
|
32
32
|
from langfun.core.llms.openai import OpenAI
|
33
33
|
|
34
34
|
from langfun.core.llms.openai import Gpt4Turbo
|
35
|
+
from langfun.core.llms.openai import Gpt4Turbo_0125
|
35
36
|
from langfun.core.llms.openai import Gpt4TurboVision
|
36
37
|
from langfun.core.llms.openai import Gpt4
|
37
38
|
from langfun.core.llms.openai import Gpt4_0613
|
38
|
-
from langfun.core.llms.openai import Gpt4_0314
|
39
39
|
from langfun.core.llms.openai import Gpt4_32K
|
40
40
|
from langfun.core.llms.openai import Gpt4_32K_0613
|
41
|
-
from langfun.core.llms.openai import Gpt4_32K_0314
|
42
41
|
|
43
42
|
from langfun.core.llms.openai import Gpt35Turbo
|
43
|
+
from langfun.core.llms.openai import Gpt35Turbo_0125
|
44
44
|
from langfun.core.llms.openai import Gpt35Turbo_1106
|
45
45
|
from langfun.core.llms.openai import Gpt35Turbo_0613
|
46
|
-
from langfun.core.llms.openai import Gpt35Turbo_0301
|
47
46
|
from langfun.core.llms.openai import Gpt35Turbo16K
|
48
47
|
from langfun.core.llms.openai import Gpt35Turbo16K_0613
|
49
|
-
|
48
|
+
|
50
49
|
|
51
50
|
from langfun.core.llms.openai import Gpt35
|
52
51
|
|
langfun/core/llms/openai.py
CHANGED
@@ -302,40 +302,40 @@ class Gpt4(OpenAI):
|
|
302
302
|
|
303
303
|
class Gpt4Turbo(Gpt4):
|
304
304
|
"""GPT-4 Turbo with 128K context window size. Knowledge up to 4-2023."""
|
305
|
-
model = 'gpt-4-
|
305
|
+
model = 'gpt-4-turbo-preview'
|
306
306
|
|
307
307
|
|
308
|
-
class Gpt4TurboVision(
|
308
|
+
class Gpt4TurboVision(Gpt4Turbo):
|
309
309
|
"""GPT-4 Turbo with vision."""
|
310
310
|
model = 'gpt-4-vision-preview'
|
311
311
|
multimodal = True
|
312
312
|
|
313
313
|
|
314
|
-
class
|
315
|
-
"""GPT-4
|
316
|
-
model = 'gpt-4-
|
314
|
+
class Gpt4Turbo_0125(Gpt4Turbo): # pylint:disable=invalid-name
|
315
|
+
"""GPT-4 Turbo with 128K context window size. Knowledge up to 4-2023."""
|
316
|
+
model = 'gpt-4-0125-preview'
|
317
317
|
|
318
318
|
|
319
|
-
class
|
320
|
-
"""GPT-4
|
321
|
-
model = 'gpt-4-
|
319
|
+
class Gpt4Turbo_1106(Gpt4Turbo): # pylint:disable=invalid-name
|
320
|
+
"""GPT-4 Turbo @20231106. 128K context window. Knowledge up to 4-2023."""
|
321
|
+
model = 'gpt-4-1106-preview'
|
322
|
+
|
323
|
+
|
324
|
+
class Gpt4_0613(Gpt4): # pylint:disable=invalid-name
|
325
|
+
"""GPT-4 @20230613. 8K context window. Knowledge up to 9-2021."""
|
326
|
+
model = 'gpt-4-0613'
|
322
327
|
|
323
328
|
|
324
329
|
class Gpt4_32K(Gpt4): # pylint:disable=invalid-name
|
325
|
-
"""GPT-4 with 32K context window
|
330
|
+
"""Latest GPT-4 with 32K context window."""
|
326
331
|
model = 'gpt-4-32k'
|
327
332
|
|
328
333
|
|
329
334
|
class Gpt4_32K_0613(Gpt4_32K): # pylint:disable=invalid-name
|
330
|
-
"""GPT-4 32K
|
335
|
+
"""GPT-4 @20230613. 32K context window. Knowledge up to 9-2021."""
|
331
336
|
model = 'gpt-4-32k-0613'
|
332
337
|
|
333
338
|
|
334
|
-
class Gpt4_32K_0314(Gpt4_32K): # pylint:disable=invalid-name
|
335
|
-
"""GPT-4 32K 0314."""
|
336
|
-
model = 'gpt-4-32k-0314'
|
337
|
-
|
338
|
-
|
339
339
|
class Gpt35(OpenAI):
|
340
340
|
"""GPT-3.5. 4K max tokens, trained up on data up to Sep, 2021."""
|
341
341
|
model = 'text-davinci-003'
|
@@ -346,8 +346,13 @@ class Gpt35Turbo(Gpt35):
|
|
346
346
|
model = 'gpt-3.5-turbo'
|
347
347
|
|
348
348
|
|
349
|
+
class Gpt35Turbo_0125(Gpt35Turbo): # pylint:disable=invalid-name
|
350
|
+
"""GPT-3.5 Turbo @20240125. 16K context window. Knowledge up to 09/2021."""
|
351
|
+
model = 'gpt-3.5-turbo-0125'
|
352
|
+
|
353
|
+
|
349
354
|
class Gpt35Turbo_1106(Gpt35Turbo): # pylint:disable=invalid-name
|
350
|
-
"""Gpt3.5 Turbo
|
355
|
+
"""Gpt3.5 Turbo @20231106. 16K context window. Knowledge up to 09/2021."""
|
351
356
|
model = 'gpt-3.5-turbo-1106'
|
352
357
|
|
353
358
|
|
@@ -356,11 +361,6 @@ class Gpt35Turbo_0613(Gpt35Turbo): # pylint:disable=invalid-name
|
|
356
361
|
model = 'gpt-3.5-turbo-0613'
|
357
362
|
|
358
363
|
|
359
|
-
class Gpt35Turbo_0301(Gpt35Turbo): # pylint:disable=invalid-name
|
360
|
-
"""Gpt3.5 Turbo snapshot at 2023/03/01, with 4K context window size."""
|
361
|
-
model = 'gpt-3.5-turbo-0301'
|
362
|
-
|
363
|
-
|
364
364
|
class Gpt35Turbo16K(Gpt35Turbo):
|
365
365
|
"""Latest GPT-3.5 model with 16K context window size."""
|
366
366
|
model = 'gpt-3.5-turbo-16k'
|
@@ -371,11 +371,6 @@ class Gpt35Turbo16K_0613(Gpt35Turbo): # pylint:disable=invalid-name
|
|
371
371
|
model = 'gpt-3.5-turbo-16k-0613'
|
372
372
|
|
373
373
|
|
374
|
-
class Gpt35Turbo16K_0301(Gpt35Turbo): # pylint:disable=invalid-name
|
375
|
-
"""Gtp 3.5 Turbo 16K 0301."""
|
376
|
-
model = 'gpt-3.5-turbo-16k-0301'
|
377
|
-
|
378
|
-
|
379
374
|
class Gpt3(OpenAI):
|
380
375
|
"""Most capable GPT-3 model (Davinci) 2K context window size.
|
381
376
|
|
@@ -78,6 +78,7 @@ def parse(
|
|
78
78
|
user_prompt: str | None = None,
|
79
79
|
lm: lf.LanguageModel | None = None,
|
80
80
|
examples: list[mapping.MappingExample] | None = None,
|
81
|
+
include_context: bool = False,
|
81
82
|
autofix: int = 0,
|
82
83
|
autofix_lm: lf.LanguageModel | None = None,
|
83
84
|
protocol: schema_lib.SchemaProtocol = 'python',
|
@@ -131,6 +132,8 @@ def parse(
|
|
131
132
|
`lf.context` context manager will be used.
|
132
133
|
examples: An optional list of fewshot examples for helping parsing. If None,
|
133
134
|
the default one-shot example will be added.
|
135
|
+
include_context: If True, include the request sent to LLM for obtaining the
|
136
|
+
response to pares. Otherwise include only the response.
|
134
137
|
autofix: Number of attempts to auto fix the generated code. If 0, autofix is
|
135
138
|
disabled. Auto-fix is not supported for 'json' protocol.
|
136
139
|
autofix_lm: The language model to use for autofix. If not specified, the
|
@@ -153,7 +156,7 @@ def parse(
|
|
153
156
|
message = lf.AIMessage.from_value(message)
|
154
157
|
if message.source is None and user_prompt is not None:
|
155
158
|
message.source = lf.UserMessage(user_prompt, tags=['lm-input'])
|
156
|
-
context = getattr(message.lm_input, 'text', None)
|
159
|
+
context = getattr(message.lm_input, 'text', None) if include_context else None
|
157
160
|
|
158
161
|
if examples is None:
|
159
162
|
examples = DEFAULT_PARSE_EXAMPLES
|
@@ -184,6 +187,7 @@ def call(
|
|
184
187
|
lm: lf.LanguageModel | None = None,
|
185
188
|
parsing_lm: lf.LanguageModel | None = None,
|
186
189
|
parsing_examples: list[mapping.MappingExample] | None = None,
|
190
|
+
parsing_include_context: bool = False,
|
187
191
|
autofix: int = 0,
|
188
192
|
autofix_lm: lf.LanguageModel | None = None,
|
189
193
|
response_postprocess: Callable[[str], str] | None = None,
|
@@ -225,6 +229,8 @@ def call(
|
|
225
229
|
for prompting the LM will be used.
|
226
230
|
parsing_examples: Examples for parsing the output. If None,
|
227
231
|
`lf.structured.DEFAULT_PARSE_EXAMPLES` will be used.
|
232
|
+
parsing_include_context: If True, include the request sent to LLM for
|
233
|
+
obtaining the response to pares. Otherwise include only the response.
|
228
234
|
autofix: Number of attempts to auto fix the generated code. If 0, autofix is
|
229
235
|
disabled. Auto-fix is not supported for 'json' protocol.
|
230
236
|
autofix_lm: The language model to use for autofix. If not specified, the
|
@@ -258,6 +264,7 @@ def call(
|
|
258
264
|
schema,
|
259
265
|
examples=parsing_examples,
|
260
266
|
lm=parsing_lm or lm,
|
267
|
+
include_context=parsing_include_context,
|
261
268
|
autofix=autofix,
|
262
269
|
autofix_lm=autofix_lm or lm,
|
263
270
|
protocol=protocol,
|
@@ -41,19 +41,19 @@ langfun/core/coding/python/permissions.py,sha256=1QWGHvzL8MM0Ok_auQ9tURqZHtdOfJa
|
|
41
41
|
langfun/core/coding/python/permissions_test.py,sha256=w5EDb8QxpxgJyZkojyzVWQvDfg366zn99-g__6TbPQ0,2699
|
42
42
|
langfun/core/eval/__init__.py,sha256=iDA2OcJ3kR6ixZizXIY3N9LsjkaVrfTbSClTiSP8ekY,1291
|
43
43
|
langfun/core/eval/base.py,sha256=tT_85jpLMCbXufKf64BMslid9FB1TNhe3AIkIpLULhA,53782
|
44
|
-
langfun/core/eval/base_test.py,sha256=
|
44
|
+
langfun/core/eval/base_test.py,sha256=3AG-PN6yv0DMcHvpPas2nv2bJoY9JdAYSYwiPUnnolo,21177
|
45
45
|
langfun/core/eval/matching.py,sha256=g2yuBb4FeOlAlB10hqdWvaIg4QVQlJbiViRDcD2Y8go,9567
|
46
46
|
langfun/core/eval/matching_test.py,sha256=IfuMF_dEmy4VzK6tIldRzD2Nqlml7SSh4u-baFNcZrw,4912
|
47
47
|
langfun/core/eval/scoring.py,sha256=mshqbV_WM0zcp15TSR32ACMBDymlsbf6YH06PPx1Tw0,6139
|
48
48
|
langfun/core/eval/scoring_test.py,sha256=_L_B40VZkyI2_PJce-jVKYC4llrO4jGUR5j86Gu6AT0,4046
|
49
|
-
langfun/core/llms/__init__.py,sha256=
|
49
|
+
langfun/core/llms/__init__.py,sha256=zTTSz46M52wqJtgxg2lGvTgrTB1wl9xMaQvOxfi00bs,2346
|
50
50
|
langfun/core/llms/fake.py,sha256=JH790_WDtlohL0leJMqd1F6a1YuM9XV3rgxHBsoILRg,2309
|
51
51
|
langfun/core/llms/fake_test.py,sha256=nP3420LKGwTJJG1YH3y5XgH6yKmbFmmbonBwvMu-ZYA,3368
|
52
52
|
langfun/core/llms/gemini.py,sha256=p3d4Cl2uET-os1n_V3YNE6-6cYrZjndj7lxZIk2E8_4,5688
|
53
53
|
langfun/core/llms/gemini_test.py,sha256=ybNNCn3JW3hYpMe0wT5ILGDrMPaYYU8PN2kSookM0jk,5433
|
54
54
|
langfun/core/llms/llama_cpp.py,sha256=EIjJa1-Tg4_VaIxVR88oDWSWc_axc1r2KwSPpl4PSp0,2549
|
55
55
|
langfun/core/llms/llama_cpp_test.py,sha256=ZxC6defGd_HX9SFRU9U4cJiQnBKundbOrchbXuC1Z2M,1683
|
56
|
-
langfun/core/llms/openai.py,sha256=
|
56
|
+
langfun/core/llms/openai.py,sha256=ufFz1Q2bHVukyAck0dZ5MYZ8IKcVkYzDjQn3YXugLCQ,11711
|
57
57
|
langfun/core/llms/openai_test.py,sha256=yfw7A-4Zo9u1cIkAMk39evE-tO7z6isNYTXiSnJXDQw,7599
|
58
58
|
langfun/core/llms/cache/__init__.py,sha256=QAo3InUMDM_YpteNnVCSejI4zOsnjSMWKJKzkb3VY64,993
|
59
59
|
langfun/core/llms/cache/base.py,sha256=cFfYvOIUae842pncqCAsRvqXCk2AnAsRYVx0mcIoAeY,3338
|
@@ -72,7 +72,7 @@ langfun/core/structured/description.py,sha256=vDiW1g2VbvG8ucNjV7Pp3VYCeAnLcp6vLQ
|
|
72
72
|
langfun/core/structured/description_test.py,sha256=UtZGjSFUaQ6130t1E5tcL7ODu0xIefkapb53TbnqsK8,7362
|
73
73
|
langfun/core/structured/mapping.py,sha256=lGkjhmvVdhBGgJmc5KbfT2xQjC1MuU4OCcCfsAYJjaQ,10192
|
74
74
|
langfun/core/structured/mapping_test.py,sha256=07DDCGbwytQHSMm7fCi5-Ly-JNgdV4ubHZq0wthX4A4,3338
|
75
|
-
langfun/core/structured/parsing.py,sha256=
|
75
|
+
langfun/core/structured/parsing.py,sha256=XWo1UdG1A_c0v4OgQ1C_6nK0264_UAVrmJfFz4jHbRE,10690
|
76
76
|
langfun/core/structured/parsing_test.py,sha256=2_Uf3LYNRON1-5ysEr75xiG_cAxR3ZiixSfvUQu6mOQ,20846
|
77
77
|
langfun/core/structured/prompting.py,sha256=P8in3qHXCuwjfzLpplS5woQSHV5aheXgm2mFiqVQD4g,6384
|
78
78
|
langfun/core/structured/prompting_test.py,sha256=5lPsxUzyHEjOh0D5V5GEYjFFJZvUrebLV1aCCJS4H3Y,18971
|
@@ -87,8 +87,8 @@ langfun/core/templates/demonstration.py,sha256=vCrgYubdZM5Umqcgp8NUVGXgr4P_c-fik
|
|
87
87
|
langfun/core/templates/demonstration_test.py,sha256=SafcDQ0WgI7pw05EmPI2S4v1t3ABKzup8jReCljHeK4,2162
|
88
88
|
langfun/core/templates/selfplay.py,sha256=yhgrJbiYwq47TgzThmHrDQTF4nDrTI09CWGhuQPNv-s,2273
|
89
89
|
langfun/core/templates/selfplay_test.py,sha256=IB5rWbjK_9CTkqEo1BclQPzFAKcIiusJckH8J19HFgI,2096
|
90
|
-
langfun-0.0.2.
|
91
|
-
langfun-0.0.2.
|
92
|
-
langfun-0.0.2.
|
93
|
-
langfun-0.0.2.
|
94
|
-
langfun-0.0.2.
|
90
|
+
langfun-0.0.2.dev20240214.dist-info/LICENSE,sha256=WNHhf_5RCaeuKWyq_K39vmp9F28LxKsB4SpomwSZ2L0,11357
|
91
|
+
langfun-0.0.2.dev20240214.dist-info/METADATA,sha256=uxTKGG1k253gR9c6VOHJULhL357YLkWxtXpVV6HAoyw,3368
|
92
|
+
langfun-0.0.2.dev20240214.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
|
93
|
+
langfun-0.0.2.dev20240214.dist-info/top_level.txt,sha256=RhlEkHxs1qtzmmtWSwYoLVJAc1YrbPtxQ52uh8Z9VvY,8
|
94
|
+
langfun-0.0.2.dev20240214.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|