langfun 0.0.2.dev20240514__py3-none-any.whl → 0.0.2.dev20240518__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langfun might be problematic. Click here for more details.

@@ -566,12 +566,19 @@ class LanguageModel(component.Component):
566
566
 
567
567
  def score(
568
568
  self,
569
- prompt: str | message_lib.Message,
569
+ prompt: str | message_lib.Message | list[message_lib.Message],
570
570
  completions: list[str | message_lib.Message],
571
571
  **kwargs,
572
572
  ) -> list[LMScoringResult]:
573
573
  """Scores the given prompt."""
574
- prompt = message_lib.UserMessage.from_value(prompt)
574
+ if isinstance(prompt, list):
575
+ if len(prompt) != len(completions):
576
+ raise ValueError(
577
+ 'prompt and completions must have the same length.'
578
+ )
579
+ prompt = [message_lib.UserMessage.from_value(p) for p in prompt]
580
+ else:
581
+ prompt = message_lib.UserMessage.from_value(prompt)
575
582
  completions = [message_lib.UserMessage.from_value(c) for c in completions]
576
583
 
577
584
  call_counter = self._call_counter
@@ -587,7 +594,8 @@ class LanguageModel(component.Component):
587
594
  return scoring_results
588
595
 
589
596
  def _score(
590
- self, prompt: message_lib.Message, completions: list[message_lib.Message]
597
+ self, prompt: message_lib.Message | list[message_lib.Message],
598
+ completions: list[message_lib.Message]
591
599
  ) -> list[LMScoringResult]:
592
600
  """Subclass to implement."""
593
601
  raise NotImplementedError(
@@ -596,7 +604,7 @@ class LanguageModel(component.Component):
596
604
 
597
605
  def _debug_score(
598
606
  self,
599
- prompt: message_lib.Message,
607
+ prompt: message_lib.Message | list[message_lib.Message],
600
608
  completions: list[message_lib.Message],
601
609
  scoring_results: list[LMScoringResult],
602
610
  call_counter: int,
@@ -615,15 +623,19 @@ class LanguageModel(component.Component):
615
623
  title=f'\n[{call_counter}] SCORING LM WITH PROMPT:',
616
624
  color='green',
617
625
  )
618
- referred_modalities = prompt.referred_modalities()
619
- if referred_modalities:
620
- console.write(
621
- pg.object_utils.kvlist_str(
622
- [(k, repr(v), None) for k, v in referred_modalities.items()]
623
- ),
624
- title=f'\n[{call_counter}] MODALITY OBJECTS SENT TO LM:',
625
- color='green',
626
- )
626
+ if isinstance(prompt, list):
627
+ referred_modalities_lst = [p.referred_modalities() for p in prompt]
628
+ else:
629
+ referred_modalities_lst = [prompt.referred_modalities(),]
630
+ if referred_modalities_lst:
631
+ for referred_modalities in referred_modalities_lst:
632
+ console.write(
633
+ pg.object_utils.kvlist_str(
634
+ [(k, repr(v), None) for k, v in referred_modalities.items()]
635
+ ),
636
+ title=f'\n[{call_counter}] MODALITY OBJECTS SENT TO LM:',
637
+ color='green',
638
+ )
627
639
 
628
640
  if debug & LMDebugMode.RESPONSE:
629
641
  console.write(
@@ -68,7 +68,7 @@ class MockScoringModel(MockModel):
68
68
 
69
69
  def _score(
70
70
  self,
71
- prompt: message_lib.Message,
71
+ prompt: message_lib.Message | list[message_lib.Message],
72
72
  completions: list[message_lib.Message],
73
73
  **kwargs
74
74
  ) -> list[lm_lib.LMScoringResult]:
@@ -508,6 +508,17 @@ class LanguageModelTest(unittest.TestCase):
508
508
  ],
509
509
  )
510
510
 
511
+ self.assertEqual(
512
+ lm.score(
513
+ [message_lib.UserMessage('hi {{image}}', image=Image()),
514
+ message_lib.UserMessage('hi {{image}}', image=Image())],
515
+ ['1', '2'], debug=debug_mode),
516
+ [
517
+ lm_lib.LMScoringResult(score=-0.0),
518
+ lm_lib.LMScoringResult(score=-1.0),
519
+ ],
520
+ )
521
+
511
522
  debug_info = string_io.getvalue()
512
523
  expected_included = [
513
524
  debug_prints[f]
@@ -528,6 +539,10 @@ class LanguageModelTest(unittest.TestCase):
528
539
  if debug_mode & lm_lib.LMDebugMode.PROMPT:
529
540
  self.assertIn('[0] MODALITY OBJECTS SENT TO LM', debug_info)
530
541
 
542
+ def test_score_with_unmatched_prompt_and_completions(self):
543
+ with self.assertRaises(ValueError):
544
+ MockScoringModel().score(['hi',], ['1', '2', '3'])
545
+
531
546
  def test_score_with_unsupported_model(self):
532
547
  with self.assertRaises(NotImplementedError):
533
548
  MockModel().score('hi', ['1', '2'])
@@ -35,25 +35,39 @@ from langfun.core.llms.google_genai import Palm2_IT
35
35
  # OpenAI models.
36
36
  from langfun.core.llms.openai import OpenAI
37
37
 
38
+ from langfun.core.llms.openai import Gpt4o
39
+ from langfun.core.llms.openai import Gpt4o_20240513
40
+
38
41
  from langfun.core.llms.openai import Gpt4Turbo
39
42
  from langfun.core.llms.openai import Gpt4Turbo_20240409
40
43
  from langfun.core.llms.openai import Gpt4TurboPreview
41
- from langfun.core.llms.openai import Gpt4TurboPreview_0125
42
- from langfun.core.llms.openai import Gpt4TurboPreview_1106
44
+ from langfun.core.llms.openai import Gpt4TurboPreview_20240125
45
+ from langfun.core.llms.openai import Gpt4TurboPreview_20231106
43
46
  from langfun.core.llms.openai import Gpt4VisionPreview
44
- from langfun.core.llms.openai import Gpt4VisionPreview_1106
47
+ from langfun.core.llms.openai import Gpt4VisionPreview_20231106
45
48
  from langfun.core.llms.openai import Gpt4
46
- from langfun.core.llms.openai import Gpt4_0613
49
+ from langfun.core.llms.openai import Gpt4_20230613
50
+
47
51
  from langfun.core.llms.openai import Gpt4_32K
48
- from langfun.core.llms.openai import Gpt4_32K_0613
52
+ from langfun.core.llms.openai import Gpt4_32K_20230613
49
53
 
50
54
  from langfun.core.llms.openai import Gpt35Turbo
51
- from langfun.core.llms.openai import Gpt35Turbo_0125
52
- from langfun.core.llms.openai import Gpt35Turbo_1106
53
- from langfun.core.llms.openai import Gpt35Turbo_0613
55
+ from langfun.core.llms.openai import Gpt35Turbo_20240125
56
+ from langfun.core.llms.openai import Gpt35Turbo_20231106
57
+ from langfun.core.llms.openai import Gpt35Turbo_20230613
54
58
  from langfun.core.llms.openai import Gpt35Turbo16K
55
- from langfun.core.llms.openai import Gpt35Turbo16K_0613
56
-
59
+ from langfun.core.llms.openai import Gpt35Turbo16K_20230613
60
+
61
+ # For backward compatibility.
62
+ Gpt4TurboPreview_0125 = Gpt4TurboPreview_20240125
63
+ Gpt4TurboPreview_1106 = Gpt4TurboPreview_20231106
64
+ Gpt4VisionPreview_1106 = Gpt4VisionPreview_20231106
65
+ Gpt4_0613 = Gpt4_20230613
66
+ Gpt4_32K_0613 = Gpt4_32K_20230613
67
+ Gpt35Turbo_0125 = Gpt35Turbo_20240125
68
+ Gpt35Turbo_1106 = Gpt35Turbo_20231106
69
+ Gpt35Turbo_0613 = Gpt35Turbo_20230613
70
+ Gpt35Turbo16K_0613 = Gpt35Turbo16K_20230613
57
71
 
58
72
  from langfun.core.llms.openai import Gpt35
59
73
 
langfun/core/llms/fake.py CHANGED
@@ -21,7 +21,8 @@ import langfun.core as lf
21
21
  class Fake(lf.LanguageModel):
22
22
  """The base class for all fake language models."""
23
23
 
24
- def _score(self, prompt: lf.Message, completions: list[lf.Message]):
24
+ def _score(self, prompt: lf.Message| list[lf.Message],
25
+ completions: list[lf.Message]):
25
26
  return [lf.LMScoringResult(score=-i * 1.0) for i in range(len(completions))]
26
27
 
27
28
  def _sample(self, prompts: list[lf.Message]) -> list[lf.LMSamplingResult]:
@@ -33,15 +33,18 @@ _DEFAULT_RPM = 3000
33
33
  SUPPORTED_MODELS_AND_SETTINGS = {
34
34
  # Models from https://platform.openai.com/docs/models
35
35
  # RPM is from https://platform.openai.com/docs/guides/rate-limits
36
+ # GPT-4o models
37
+ 'gpt-4o': pg.Dict(rpm=10000, tpm=5000000),
38
+ 'gpt-4o-2024-05-13': pg.Dict(rpm=10000, tpm=5000000),
36
39
  # GPT-4-Turbo models
37
- 'gpt-4-turbo': pg.Dict(rpm=10000, tpm=1500000),
38
- 'gpt-4-turbo-2024-04-09': pg.Dict(rpm=10000, tpm=1500000),
39
- 'gpt-4-turbo-preview': pg.Dict(rpm=10000, tpm=1500000),
40
- 'gpt-4-0125-preview': pg.Dict(rpm=10000, tpm=1500000),
41
- 'gpt-4-1106-preview': pg.Dict(rpm=10000, tpm=1500000),
42
- 'gpt-4-vision-preview': pg.Dict(rpm=10000, tpm=1500000),
40
+ 'gpt-4-turbo': pg.Dict(rpm=10000, tpm=2000000),
41
+ 'gpt-4-turbo-2024-04-09': pg.Dict(rpm=10000, tpm=2000000),
42
+ 'gpt-4-turbo-preview': pg.Dict(rpm=10000, tpm=2000000),
43
+ 'gpt-4-0125-preview': pg.Dict(rpm=10000, tpm=2000000),
44
+ 'gpt-4-1106-preview': pg.Dict(rpm=10000, tpm=2000000),
45
+ 'gpt-4-vision-preview': pg.Dict(rpm=10000, tpm=2000000),
43
46
  'gpt-4-1106-vision-preview': pg.Dict(
44
- rpm=10000, tpm=1500000
47
+ rpm=10000, tpm=2000000
45
48
  ),
46
49
  # GPT-4 models
47
50
  'gpt-4': pg.Dict(rpm=10000, tpm=300000),
@@ -309,12 +312,12 @@ class Gpt4TurboPreview(Gpt4):
309
312
  model = 'gpt-4-turbo-preview'
310
313
 
311
314
 
312
- class Gpt4TurboPreview_0125(Gpt4TurboPreview): # pylint: disable=invalid-name
315
+ class Gpt4TurboPreview_20240125(Gpt4TurboPreview): # pylint: disable=invalid-name
313
316
  """GPT-4 Turbo Preview with 128k context window. Knowledge up to Dec. 2023."""
314
317
  model = 'gpt-4-0125-preview'
315
318
 
316
319
 
317
- class Gpt4TurboPreview_1106(Gpt4TurboPreview): # pylint: disable=invalid-name
320
+ class Gpt4TurboPreview_20231106(Gpt4TurboPreview): # pylint: disable=invalid-name
318
321
  """GPT-4 Turbo Preview with 128k context window. Knowledge up to Apr. 2023."""
319
322
  model = 'gpt-4-1106-preview'
320
323
 
@@ -325,12 +328,12 @@ class Gpt4VisionPreview(Gpt4):
325
328
  multimodal = True
326
329
 
327
330
 
328
- class Gpt4VisionPreview_1106(Gpt4): # pylint: disable=invalid-name
331
+ class Gpt4VisionPreview_20231106(Gpt4): # pylint: disable=invalid-name
329
332
  """GPT-4 Turbo vision preview. 128k context window. Knowledge to Apr. 2023."""
330
333
  model = 'gpt-4-1106-vision-preview'
331
334
 
332
335
 
333
- class Gpt4_0613(Gpt4): # pylint:disable=invalid-name
336
+ class Gpt4_20230613(Gpt4): # pylint:disable=invalid-name
334
337
  """GPT-4 @20230613. 8K context window. Knowledge up to 9-2021."""
335
338
  model = 'gpt-4-0613'
336
339
 
@@ -340,11 +343,23 @@ class Gpt4_32K(Gpt4): # pylint:disable=invalid-name
340
343
  model = 'gpt-4-32k'
341
344
 
342
345
 
343
- class Gpt4_32K_0613(Gpt4_32K): # pylint:disable=invalid-name
346
+ class Gpt4_32K_20230613(Gpt4_32K): # pylint:disable=invalid-name
344
347
  """GPT-4 @20230613. 32K context window. Knowledge up to 9-2021."""
345
348
  model = 'gpt-4-32k-0613'
346
349
 
347
350
 
351
+ class Gpt4o(OpenAI):
352
+ """GPT-4o."""
353
+ model = 'gpt-4o'
354
+ multimodal = True
355
+
356
+
357
+ class Gpt4o_20240513(OpenAI): # pylint:disable=invalid-name
358
+ """GPT-4o."""
359
+ model = 'gpt-4o-2024-05-13'
360
+ multimodal = True
361
+
362
+
348
363
  class Gpt35(OpenAI):
349
364
  """GPT-3.5. 4K max tokens, trained up on data up to Sep, 2021."""
350
365
  model = 'text-davinci-003'
@@ -355,17 +370,17 @@ class Gpt35Turbo(Gpt35):
355
370
  model = 'gpt-3.5-turbo'
356
371
 
357
372
 
358
- class Gpt35Turbo_0125(Gpt35Turbo): # pylint:disable=invalid-name
373
+ class Gpt35Turbo_20240125(Gpt35Turbo): # pylint:disable=invalid-name
359
374
  """GPT-3.5 Turbo @20240125. 16K context window. Knowledge up to 09/2021."""
360
375
  model = 'gpt-3.5-turbo-0125'
361
376
 
362
377
 
363
- class Gpt35Turbo_1106(Gpt35Turbo): # pylint:disable=invalid-name
378
+ class Gpt35Turbo_20231106(Gpt35Turbo): # pylint:disable=invalid-name
364
379
  """Gpt3.5 Turbo @20231106. 16K context window. Knowledge up to 09/2021."""
365
380
  model = 'gpt-3.5-turbo-1106'
366
381
 
367
382
 
368
- class Gpt35Turbo_0613(Gpt35Turbo): # pylint:disable=invalid-name
383
+ class Gpt35Turbo_20230613(Gpt35Turbo): # pylint:disable=invalid-name
369
384
  """Gpt3.5 Turbo snapshot at 2023/06/13, with 4K context window size."""
370
385
  model = 'gpt-3.5-turbo-0613'
371
386
 
@@ -375,7 +390,7 @@ class Gpt35Turbo16K(Gpt35Turbo):
375
390
  model = 'gpt-3.5-turbo-16k'
376
391
 
377
392
 
378
- class Gpt35Turbo16K_0613(Gpt35Turbo): # pylint:disable=invalid-name
393
+ class Gpt35Turbo16K_20230613(Gpt35Turbo): # pylint:disable=invalid-name
379
394
  """Gtp 3.5 Turbo 16K 0613."""
380
395
  model = 'gpt-3.5-turbo-16k-0613'
381
396
 
@@ -99,7 +99,7 @@ class VertexAI(lf.LanguageModel):
99
99
 
100
100
  credentials = self.credentials
101
101
  # Placeholder for Google-internal credentials.
102
- from google.cloud.aiplatform import vertexai # pylint: disable=g-import-not-at-top
102
+ import vertexai
103
103
  vertexai.init(project=project, location=location, credentials=credentials)
104
104
  return True
105
105
 
@@ -125,7 +125,7 @@ class VertexAI(lf.LanguageModel):
125
125
  self, options: lf.LMSamplingOptions
126
126
  ) -> Any: # generative_models.GenerationConfig
127
127
  """Creates generation config from langfun sampling options."""
128
- from google.cloud.aiplatform.vertexai.preview import generative_models # pylint: disable=g-import-not-at-top
128
+ from vertexai import generative_models
129
129
  return generative_models.GenerationConfig(
130
130
  temperature=options.temperature,
131
131
  top_p=options.top_p,
@@ -138,7 +138,7 @@ class VertexAI(lf.LanguageModel):
138
138
  self, prompt: lf.Message
139
139
  ) -> list[str | Any]:
140
140
  """Gets generation input from langfun message."""
141
- from google.cloud.aiplatform.vertexai.preview import generative_models # pylint: disable=g-import-not-at-top
141
+ from vertexai import generative_models
142
142
  chunks = []
143
143
  for lf_chunk in prompt.chunk():
144
144
  if isinstance(lf_chunk, str):
@@ -239,7 +239,7 @@ class _ModelHub:
239
239
  """Gets a generative model by model id."""
240
240
  model = self._generative_model_cache.get(model_id, None)
241
241
  if model is None:
242
- from google.cloud.aiplatform.vertexai.preview import generative_models # pylint: disable=g-import-not-at-top
242
+ from vertexai import generative_models
243
243
  model = generative_models.GenerativeModel(model_id)
244
244
  self._generative_model_cache[model_id] = model
245
245
  return model
@@ -250,7 +250,7 @@ class _ModelHub:
250
250
  """Gets a text generation model by model id."""
251
251
  model = self._text_generation_model_cache.get(model_id, None)
252
252
  if model is None:
253
- from google.cloud.aiplatform.vertexai import language_models # pylint: disable=g-import-not-at-top
253
+ from vertexai import language_models
254
254
  model = language_models.TextGenerationModel.from_pretrained(model_id)
255
255
  self._text_generation_model_cache[model_id] = model
256
256
  return model
@@ -17,7 +17,7 @@ import os
17
17
  import unittest
18
18
  from unittest import mock
19
19
 
20
- from google.cloud.aiplatform.vertexai.preview import generative_models
20
+ from vertexai import generative_models
21
21
  import langfun.core as lf
22
22
  from langfun.core import modalities as lf_modalities
23
23
  from langfun.core.llms import vertexai
@@ -39,7 +39,6 @@ example_image = (
39
39
  def mock_generate_content(content, generation_config, **kwargs):
40
40
  del kwargs
41
41
  c = pg.Dict(generation_config.to_dict())
42
- print('zzz', c)
43
42
  return generative_models.GenerationResponse.from_dict({
44
43
  'candidates': [
45
44
  {
@@ -111,7 +110,7 @@ class VertexAITest(unittest.TestCase):
111
110
 
112
111
  def test_model_hub(self):
113
112
  with mock.patch(
114
- 'google.cloud.aiplatform.vertexai.preview.generative_models.'
113
+ 'vertexai.generative_models.'
115
114
  'GenerativeModel.__init__'
116
115
  ) as mock_model_init:
117
116
  mock_model_init.side_effect = lambda *args, **kwargs: None
@@ -125,7 +124,7 @@ class VertexAITest(unittest.TestCase):
125
124
  )
126
125
 
127
126
  with mock.patch(
128
- 'google.cloud.aiplatform.vertexai.language_models.'
127
+ 'vertexai.language_models.'
129
128
  'TextGenerationModel.from_pretrained'
130
129
  ) as mock_model_init:
131
130
 
@@ -163,13 +162,13 @@ class VertexAITest(unittest.TestCase):
163
162
 
164
163
  def test_call_generative_model(self):
165
164
  with mock.patch(
166
- 'google.cloud.aiplatform.vertexai.preview.generative_models.'
165
+ 'vertexai.generative_models.'
167
166
  'GenerativeModel.__init__'
168
167
  ) as mock_model_init:
169
168
  mock_model_init.side_effect = lambda *args, **kwargs: None
170
169
 
171
170
  with mock.patch(
172
- 'google.cloud.aiplatform.vertexai.preview.generative_models.'
171
+ 'vertexai.generative_models.'
173
172
  'GenerativeModel.generate_content'
174
173
  ) as mock_generate:
175
174
  mock_generate.side_effect = mock_generate_content
@@ -192,7 +191,7 @@ class VertexAITest(unittest.TestCase):
192
191
 
193
192
  def test_call_text_generation_model(self):
194
193
  with mock.patch(
195
- 'google.cloud.aiplatform.vertexai.language_models.'
194
+ 'vertexai.language_models.'
196
195
  'TextGenerationModel.from_pretrained'
197
196
  ) as mock_model_init:
198
197
 
@@ -23,7 +23,7 @@ import pyglove as pg
23
23
 
24
24
 
25
25
  def score(
26
- prompt: Union[str, pg.Symbolic],
26
+ prompt: Union[str, pg.Symbolic] | list[str | pg.Symbolic],
27
27
  completions: list[str | pg.Symbolic],
28
28
  schema: Union[
29
29
  schema_lib.Schema, Type[Any], list[Type[Any]], dict[str, Any], None
@@ -49,15 +49,27 @@ def score(
49
49
  f'{[type(c) for c in completions]}.'
50
50
  )
51
51
 
52
- input_message = prompting.query(
53
- prompt,
54
- schema,
55
- examples=examples,
56
- protocol=protocol,
57
- skip_lm=True,
58
- returns_message=True,
59
- **kwargs,
60
- )
52
+ if isinstance(prompt, list):
53
+ prompts = []
54
+ for p in prompt:
55
+ prompts.append(
56
+ prompting.query_prompt(
57
+ p,
58
+ schema,
59
+ examples=examples,
60
+ protocol=protocol,
61
+ **kwargs,
62
+ )
63
+ )
64
+ input_message = prompts
65
+ else:
66
+ input_message = prompting.query_prompt(
67
+ prompt,
68
+ schema,
69
+ examples=examples,
70
+ protocol=protocol,
71
+ **kwargs,
72
+ )
61
73
  if lm is None:
62
74
  lm_override = lf.get_contextual_override('lm')
63
75
  if lm_override is None:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langfun
3
- Version: 0.0.2.dev20240514
3
+ Version: 0.0.2.dev20240518
4
4
  Summary: Langfun: Language as Functions.
5
5
  Home-page: https://github.com/google/langfun
6
6
  Author: Langfun Authors
@@ -8,8 +8,8 @@ langfun/core/console.py,sha256=bk5rNPNm9rMGW5YT2HixxU04p2umnoabn5SDz6Dqe88,2317
8
8
  langfun/core/console_test.py,sha256=5SYJdxpJGLgdSSQqqMPoA1X6jpsLD8rgcyk-EgI65oE,1077
9
9
  langfun/core/langfunc.py,sha256=RvIcRjIq0jWYRu1xim-FYe4HSrt97r3GMBO_PuagUmw,11060
10
10
  langfun/core/langfunc_test.py,sha256=_mfARnakX3oji5HDigFSLMd6yQ2wma-2Mgbztwqn73g,8501
11
- langfun/core/language_model.py,sha256=6wtY8RGbOymfo1PYzcYCfOlWuKQcSVFs5R1sFB4-QMQ,20202
12
- langfun/core/language_model_test.py,sha256=T-itu7Li2smv2dkru0C0neCs2W4VJXlNTYahXU6jF54,19548
11
+ langfun/core/language_model.py,sha256=owNCgefGoPeRCHrxBhMtNdOj3orbeVml4eqLf1n211o,20760
12
+ langfun/core/language_model_test.py,sha256=36evArVJgSQ9lRgHfMmlLW3lwjjDoiAgfTEbk2FIKa4,20122
13
13
  langfun/core/memory.py,sha256=f-asN1F7Vehgdn_fK84v73GrEUOxRtaW934keutTKjk,2416
14
14
  langfun/core/message.py,sha256=QhvV9t5qaryPcruyxxcXi3gm9QDInkSldwTtK6sVJ3c,15734
15
15
  langfun/core/message_test.py,sha256=Z23pUM5vPnDrYkIIibe2KL73D5HKur_awI0ut_EQFQA,9501
@@ -48,10 +48,10 @@ langfun/core/eval/patching.py,sha256=R0s2eAd1m97exQt06dmUL0V_MBG0W2Hxg7fhNB7cXW0
48
48
  langfun/core/eval/patching_test.py,sha256=8kCd54Egjju22FMgtJuxEsrXkW8ifs-UUBHtrCG1L6w,4775
49
49
  langfun/core/eval/scoring.py,sha256=1J7IATo-8FXUR0SBqk9icztHiM0lWkBFcWUo-vUURgQ,6376
50
50
  langfun/core/eval/scoring_test.py,sha256=O8olHbrUEg60gMxwOkWzKBJZpZoUlmVnBANX5Se2SXM,4546
51
- langfun/core/llms/__init__.py,sha256=C-NrcgFqf3_EP_dN8oADdckQ-rfPKZhsjeSf86kJpLk,3642
51
+ langfun/core/llms/__init__.py,sha256=h_kam-0fjWISAQ90KZ_ydBhwADVCzrhLPXmAki3GfU0,4175
52
52
  langfun/core/llms/anthropic.py,sha256=7W9YdPN3SlAFhAIQlihMkrpo7tTY_4NvD0KIlCrqcsk,8505
53
53
  langfun/core/llms/anthropic_test.py,sha256=TMM30myyEhwF99Le4RvJEXOn8RYl0q1FRkt9Q9nl1jk,5540
54
- langfun/core/llms/fake.py,sha256=_smsN_CsYbeWrtjpegEPwdAPV9mwaIuH_4oZGeXQwQI,2896
54
+ langfun/core/llms/fake.py,sha256=Dd7-6ka9pFf3fcWZyczamjOqQ91MOI-m7We3Oc9Ffmo,2927
55
55
  langfun/core/llms/fake_test.py,sha256=ipKfdOcuqVcJ8lDXVpnBVb9HHG0hAVkFkMoHpWjC2cI,7212
56
56
  langfun/core/llms/google_genai.py,sha256=nDI_Adur_K458l6EWoiiAhzjfnjRSqfTiikdu7iLPyU,8808
57
57
  langfun/core/llms/google_genai_test.py,sha256=_UcGTfl16-aDUlEWFC2W2F8y9jPUs53RBYA6MOCpGXw,7525
@@ -59,10 +59,10 @@ langfun/core/llms/groq.py,sha256=NaGItVL_pkOpqPpI4bPGU27xLFRoaeizZ49v2s-4ERs,784
59
59
  langfun/core/llms/groq_test.py,sha256=M6GtlrsOvDun_j-sR8cPh4W_moHWZNSTiThu3kuwbbc,5281
60
60
  langfun/core/llms/llama_cpp.py,sha256=Y_KkMUf3Xfac49koMUtUslKl3h-HWp3-ntq7Jaa3bdo,2385
61
61
  langfun/core/llms/llama_cpp_test.py,sha256=ZxC6defGd_HX9SFRU9U4cJiQnBKundbOrchbXuC1Z2M,1683
62
- langfun/core/llms/openai.py,sha256=u2lqYcKFjFxLfWYD0KLT3YThqcoo66rWs3n0bcuSYBs,13286
62
+ langfun/core/llms/openai.py,sha256=IN46gIqfY6aEEfxCPNmyH1hrep3oWBhJDwVFilfqNkM,13657
63
63
  langfun/core/llms/openai_test.py,sha256=asSA1sVy_7hnXioD_2HTxtSDpVTKBUO_EjZuyHpwbn0,14854
64
- langfun/core/llms/vertexai.py,sha256=O2Lp-F4KJzvQSCjPV--sa6nMS9-GsLj2eiqA-1qGhWQ,9661
65
- langfun/core/llms/vertexai_test.py,sha256=LBk4luL_N13ZejZebBzQ3tkfjxFhk7uBS4JjEpojJAo,7836
64
+ langfun/core/llms/vertexai.py,sha256=DBH7WzrTL5oIyKWoZTuIR8B_gtgRi7hlS9GteSY0r4E,9317
65
+ langfun/core/llms/vertexai_test.py,sha256=1u813e2esr8HVbOpLTOhIgzaX-GqCRTBouJ1doRn80Q,7642
66
66
  langfun/core/llms/cache/__init__.py,sha256=QAo3InUMDM_YpteNnVCSejI4zOsnjSMWKJKzkb3VY64,993
67
67
  langfun/core/llms/cache/base.py,sha256=cFfYvOIUae842pncqCAsRvqXCk2AnAsRYVx0mcIoAeY,3338
68
68
  langfun/core/llms/cache/in_memory.py,sha256=YfFyJEhLs73cUiB0ZfhMxYpdE8Iuxxw-dvMFwGHTSHw,4742
@@ -94,7 +94,7 @@ langfun/core/structured/schema.py,sha256=Zy9y6Vq9DrFwcuP5o5VL_PvMCmzavF-nuDqyviB
94
94
  langfun/core/structured/schema_generation.py,sha256=U3nRQsqmMZg_qIVDh2fiY3K4JLfsAL1LcKzIFP1iXFg,5316
95
95
  langfun/core/structured/schema_generation_test.py,sha256=RM9s71kMNg2jTePwInkiW9fK1ACN37eyPeF8OII-0zw,2950
96
96
  langfun/core/structured/schema_test.py,sha256=NgQK1zGSliZVx_Af6gDBTqQxXRHvmAvGARv4dUs8IbI,23078
97
- langfun/core/structured/scoring.py,sha256=5DsMNrWKf98ZYCEkxA4-HvA62nMSNBs9DC5m8dYL7Cs,2442
97
+ langfun/core/structured/scoring.py,sha256=QyT1S8FkLtKICfUbh4AXoKK3YJ_rgejyk6TI2OtOa68,2751
98
98
  langfun/core/structured/scoring_test.py,sha256=39_dw6p_FkoqeUccO67yIqos-MccAWezoozS21i8mi0,1732
99
99
  langfun/core/templates/__init__.py,sha256=bO0eMsVJbi7sxEB2YlInKRQ2EVP-RyyKUwcD-8msuN4,927
100
100
  langfun/core/templates/completion.py,sha256=mUqZHOEV3ag6-A08XghpeEltcrBvCDxXP004eDDfeag,1931
@@ -105,8 +105,8 @@ langfun/core/templates/demonstration.py,sha256=vCrgYubdZM5Umqcgp8NUVGXgr4P_c-fik
105
105
  langfun/core/templates/demonstration_test.py,sha256=SafcDQ0WgI7pw05EmPI2S4v1t3ABKzup8jReCljHeK4,2162
106
106
  langfun/core/templates/selfplay.py,sha256=yhgrJbiYwq47TgzThmHrDQTF4nDrTI09CWGhuQPNv-s,2273
107
107
  langfun/core/templates/selfplay_test.py,sha256=DYVrkk7uNKCqJGEHH31HssU2BPuMItU1vJLzfcXIlYg,2156
108
- langfun-0.0.2.dev20240514.dist-info/LICENSE,sha256=WNHhf_5RCaeuKWyq_K39vmp9F28LxKsB4SpomwSZ2L0,11357
109
- langfun-0.0.2.dev20240514.dist-info/METADATA,sha256=DOQe0LBigfl1Lk0WWtih781_jjlcP2BJUboWFwxDbek,3452
110
- langfun-0.0.2.dev20240514.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
111
- langfun-0.0.2.dev20240514.dist-info/top_level.txt,sha256=RhlEkHxs1qtzmmtWSwYoLVJAc1YrbPtxQ52uh8Z9VvY,8
112
- langfun-0.0.2.dev20240514.dist-info/RECORD,,
108
+ langfun-0.0.2.dev20240518.dist-info/LICENSE,sha256=WNHhf_5RCaeuKWyq_K39vmp9F28LxKsB4SpomwSZ2L0,11357
109
+ langfun-0.0.2.dev20240518.dist-info/METADATA,sha256=wHT3BdfN3BJ6tJDhfy7-LFrf74CUA-hE6SIt6N1ERe4,3452
110
+ langfun-0.0.2.dev20240518.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
111
+ langfun-0.0.2.dev20240518.dist-info/top_level.txt,sha256=RhlEkHxs1qtzmmtWSwYoLVJAc1YrbPtxQ52uh8Z9VvY,8
112
+ langfun-0.0.2.dev20240518.dist-info/RECORD,,