langfun 0.0.2.dev20240407__py3-none-any.whl → 0.0.2.dev20240412__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
langfun/core/eval/base.py CHANGED
@@ -1551,8 +1551,33 @@ class Summary(pg.Object):
1551
1551
  def _repr_html_(self) -> str:
1552
1552
  return self.html()
1553
1553
 
1554
+ def json(
1555
+ self,
1556
+ ) -> dict[
1557
+ str, # Task name
1558
+ list[pg.Dict], # List of pg.Dict with `experiment` and `metrics`.
1559
+ ]:
1560
+ """Returns the JSON representation of the summary."""
1561
+ task_results = {}
1562
+ for task in sorted(self.tasks(), key=lambda cls: cls.__name__):
1563
+ results = []
1564
+ for entry in self.select(task=task).evaluations:
1565
+ results.append(
1566
+ pg.Dict(
1567
+ experiment=entry,
1568
+ metrics=entry.result.metrics if entry.result else None,
1569
+ )
1570
+ )
1571
+ task_results[task.__name__] = results
1572
+ return task_results
1573
+
1554
1574
  def save(self, file: str, pivot_field: str | None = None) -> None:
1555
1575
  pg.save(self.html(pivot_field), file, file_format='txt')
1576
+ if file.endswith('.html'):
1577
+ json_file = file.replace('.html', '.json')
1578
+ else:
1579
+ json_file = os.path.join(file, '.json')
1580
+ pg.save(self.json(), json_file)
1556
1581
 
1557
1582
  @classmethod
1558
1583
  def from_dirs(
@@ -101,7 +101,7 @@ class EvaluationTest(unittest.TestCase):
101
101
  self.assertEqual(s.dir, os.path.join(s.root_dir, s.id))
102
102
  self.assertEqual(s.hash, s.clone().hash)
103
103
  # Test persistent hash.
104
- self.assertEqual(s.hash, 'abc7c29a')
104
+ self.assertEqual(s.hash, '436dc80c')
105
105
  self.assertEqual(
106
106
  s.hash, s.clone(override={'max_workers': 2, 'lm.timeout': 20}).hash
107
107
  )
@@ -209,7 +209,7 @@ class EvaluationTest(unittest.TestCase):
209
209
  s.result,
210
210
  dict(
211
211
  experiment_setup=dict(
212
- id='Evaluation@17915dc6',
212
+ id='Evaluation@f1aa5126',
213
213
  dir=s.dir,
214
214
  model='StaticSequence',
215
215
  prompt_template='{{example.question}}',
@@ -228,13 +228,23 @@ class EvaluationTest(unittest.TestCase):
228
228
  os.path.exists(os.path.join(s.dir, base.Evaluation.RESULT_JSON)))
229
229
  self.assertTrue(
230
230
  os.path.exists(os.path.join(s.dir, base.Evaluation.CACHE_JSON)))
231
- self.assertTrue(
232
- os.path.exists(os.path.join(s.root_dir, base.Evaluation.SUMMARY_HTML))
233
- )
234
231
  self.assertTrue(
235
232
  os.path.exists(os.path.join(s.dir, base.Evaluation.INDEX_HTML)))
236
233
  self.assertTrue(
237
234
  os.path.exists(os.path.join(s.dir, base.Evaluation.FAILURES_HTML)))
235
+ self.assertTrue(
236
+ os.path.exists(os.path.join(s.root_dir, base.Evaluation.SUMMARY_HTML))
237
+ )
238
+ # Check summary JSON.
239
+ summary_json = os.path.join(
240
+ s.root_dir, base.Evaluation.SUMMARY_HTML.replace('.html', '.json')
241
+ )
242
+ self.assertTrue(os.path.exists(summary_json))
243
+ summary = pg.load(summary_json, force_dict=True)
244
+ self.assertIn('Evaluation', summary)
245
+ self.assertEqual(len(summary['Evaluation']), 1)
246
+ self.assertIsNotNone(summary['Evaluation'][0].experiment)
247
+ self.assertIsNotNone(summary['Evaluation'][0].metrics)
238
248
 
239
249
  def test_run_wihtout_save(self):
240
250
  lm = fake.StaticSequence([
@@ -321,7 +331,7 @@ class EvaluationTest(unittest.TestCase):
321
331
  s.children[0].dir, os.path.join(s.root_dir, s.children[0].id)
322
332
  )
323
333
  # Test persistent hash.
324
- self.assertEqual(s.hash, 'ca7f722b')
334
+ self.assertEqual(s.hash, 'b66a4e88')
325
335
 
326
336
  summary = s.run(verbose=True)
327
337
  self.assertEqual(len(summary.evaluations), 2)
@@ -448,7 +458,7 @@ class SuiteTest(unittest.TestCase):
448
458
  lm=lm
449
459
  )
450
460
  # Test for persistent hash.
451
- self.assertEqual(s.hash, '7285e52b')
461
+ self.assertEqual(s.hash, 'bbfdc7a8')
452
462
  s.run()
453
463
  expected = {
454
464
  s.children[0].id: dict(
@@ -103,7 +103,7 @@ class MatchingTest(unittest.TestCase):
103
103
  s.result,
104
104
  dict(
105
105
  experiment_setup=dict(
106
- id='MyTask@3d87f97f',
106
+ id='MyTask@acd56a61',
107
107
  dir=s.dir,
108
108
  model='StaticSequence',
109
109
  prompt_template='{{example.question}}',
@@ -81,7 +81,7 @@ class ScoringTest(unittest.TestCase):
81
81
  s.result,
82
82
  dict(
83
83
  experiment_setup=dict(
84
- id='ConstraintFollowing@9e51bb9e',
84
+ id='ConstraintFollowing@a44d8b89',
85
85
  dir=s.dir,
86
86
  model='StaticSequence',
87
87
  prompt_template='{{example}}',
@@ -92,8 +92,8 @@ class LangFuncCallTest(unittest.TestCase):
92
92
  self.assertEqual(
93
93
  repr(l),
94
94
  "LangFunc(template_str='Hello', clean=True,"
95
- ' lm=ExcitedEchoer(sampling_options=LMSamplingOptions(temperature=0.0,'
96
- ' max_tokens=1024, n=1, top_k=40, top_p=None, stop=None,'
95
+ ' lm=ExcitedEchoer(sampling_options=LMSamplingOptions(temperature=None,'
96
+ ' max_tokens=None, n=1, top_k=40, top_p=None, stop=None,'
97
97
  ' random_seed=None, logprobs=False, top_logprobs=None), cache=None,'
98
98
  ' max_concurrency=None, timeout=120.0, max_attempts=5,'
99
99
  ' retry_interval=(5, 60), exponential_backoff=True, debug=False))',
@@ -63,14 +63,24 @@ class LMSamplingOptions(component.Component):
63
63
  """Language model sampling options."""
64
64
 
65
65
  temperature: Annotated[
66
- float,
66
+ float | None,
67
67
  (
68
68
  'Model temperature, which is usually between 0 and 1.0. '
69
- 'OpenAI models have temperature range from 0.0 to 2.0.'
69
+ 'OpenAI models have temperature range from 0.0 to 2.0. '
70
+ 'If None (default), honor the model\'s default behavior. '
70
71
  )
71
- ] = 0.0
72
- max_tokens: Annotated[int, 'Per example max tokens to generate.'] = 1024
72
+ ] = None
73
+
74
+ max_tokens: Annotated[
75
+ int | None,
76
+ (
77
+ 'Per example max tokens to generate. '
78
+ 'If None, use the model default.'
79
+ )
80
+ ] = None
81
+
73
82
  n: Annotated[int | None, 'Max number of samples to return.'] = 1
83
+
74
84
  top_k: Annotated[
75
85
  int | None,
76
86
  (
@@ -78,6 +88,7 @@ class LMSamplingOptions(component.Component):
78
88
  'Not applicable to OpenAI models.'
79
89
  )
80
90
  ] = 40
91
+
81
92
  top_p: Annotated[
82
93
  float | None,
83
94
  (
@@ -86,6 +97,7 @@ class LMSamplingOptions(component.Component):
86
97
  '`top_p` but not both.'
87
98
  ),
88
99
  ] = None
100
+
89
101
  stop: Annotated[
90
102
  list[str] | None,
91
103
  (
@@ -95,9 +107,11 @@ class LMSamplingOptions(component.Component):
95
107
  '`Model:` is reached.'
96
108
  ),
97
109
  ] = None
110
+
98
111
  random_seed: Annotated[
99
112
  int | None, 'A fixed random seed used during model inference.'
100
113
  ] = None
114
+
101
115
  logprobs: Annotated[
102
116
  bool,
103
117
  (
@@ -106,6 +120,7 @@ class LMSamplingOptions(component.Component):
106
120
  'in the content of message.'
107
121
  ),
108
122
  ] = False
123
+
109
124
  top_logprobs: Annotated[
110
125
  int | None,
111
126
  (
@@ -40,7 +40,7 @@ class MockModel(lm_lib.LanguageModel):
40
40
  return [
41
41
  lm_lib.LMSamplingResult([lm_lib.LMSample( # pylint: disable=g-complex-comprehension
42
42
  response=prompt.text * self.sampling_options.top_k,
43
- score=self.sampling_options.temperature)])
43
+ score=self.sampling_options.temperature or -1.0)])
44
44
  for prompt in prompts
45
45
  ]
46
46
  context.attempt += 1
@@ -73,13 +73,13 @@ class LMSamplingOptionsTest(unittest.TestCase):
73
73
  def test_cache_key(self):
74
74
  options = lm_lib.LMSamplingOptions()
75
75
  key1 = options.cache_key()
76
- self.assertEqual(key1, (0.0, 1024, 1, 40, None, None))
76
+ self.assertEqual(key1, (None, None, 1, 40, None, None))
77
77
  with options.override(temperature=1.0, max_tokens=256):
78
78
  key2 = options.cache_key()
79
79
  self.assertEqual(key2, (1.0, 256, 1, 40, None, None))
80
80
 
81
81
  # Make sure key1 does not change upon override.
82
- self.assertEqual(key1, (0.0, 1024, 1, 40, None, None))
82
+ self.assertEqual(key1, (None, None, 1, 40, None, None))
83
83
 
84
84
 
85
85
  class LanguageModelTest(unittest.TestCase):
@@ -100,8 +100,8 @@ class LanguageModelTest(unittest.TestCase):
100
100
  self.assertEqual(
101
101
  lm.sample(prompts=['foo', 'bar']),
102
102
  [
103
- lm_lib.LMSamplingResult([lm_lib.LMSample('foo', score=0.0)]),
104
- lm_lib.LMSamplingResult([lm_lib.LMSample('bar', score=0.0)]),
103
+ lm_lib.LMSamplingResult([lm_lib.LMSample('foo', score=-1.0)]),
104
+ lm_lib.LMSamplingResult([lm_lib.LMSample('bar', score=-1.0)]),
105
105
  ],
106
106
  )
107
107
  # Test override sampling_options.
@@ -143,7 +143,7 @@ class LanguageModelTest(unittest.TestCase):
143
143
  lm = MockModel(sampling_options=lm_lib.LMSamplingOptions(top_k=1))
144
144
  response = lm(prompt='foo')
145
145
  self.assertEqual(response.text, 'foo')
146
- self.assertEqual(response.score, 0.0)
146
+ self.assertEqual(response.score, -1.0)
147
147
 
148
148
  # Test override sampling_options.
149
149
  self.assertEqual(
@@ -159,9 +159,9 @@ class LanguageModelTest(unittest.TestCase):
159
159
  lm.sample(prompts=['foo', 'bar']),
160
160
  [
161
161
  lm_lib.LMSamplingResult([lm_lib.LMSample(
162
- message_lib.AIMessage('foo', cache_seed=0), score=0.0)]),
162
+ message_lib.AIMessage('foo', cache_seed=0), score=-1.0)]),
163
163
  lm_lib.LMSamplingResult([lm_lib.LMSample(
164
- message_lib.AIMessage('bar', cache_seed=0), score=0.0)]),
164
+ message_lib.AIMessage('bar', cache_seed=0), score=-1.0)]),
165
165
  ])
166
166
  self.assertEqual(cache.stats.num_queries, 2)
167
167
  self.assertEqual(cache.stats.num_hits, 0)
@@ -44,19 +44,19 @@ class InMemoryLMCacheTest(unittest.TestCase):
44
44
  self.assertEqual(
45
45
  list(cache.keys()),
46
46
  [
47
- ('a', (0.0, 1024, 1, 40, None, None), 0),
48
- ('a', (0.0, 1024, 1, 40, None, None), 1),
49
- ('b', (0.0, 1024, 1, 40, None, None), 0),
50
- ('c', (0.0, 1024, 1, 40, None, None), 0),
47
+ ('a', (None, None, 1, 40, None, None), 0),
48
+ ('a', (None, None, 1, 40, None, None), 1),
49
+ ('b', (None, None, 1, 40, None, None), 0),
50
+ ('c', (None, None, 1, 40, None, None), 0),
51
51
  ],
52
52
  )
53
53
  self.assertEqual(
54
54
  list(cache.keys('StaticSequence')),
55
55
  [
56
- ('a', (0.0, 1024, 1, 40, None, None), 0),
57
- ('a', (0.0, 1024, 1, 40, None, None), 1),
58
- ('b', (0.0, 1024, 1, 40, None, None), 0),
59
- ('c', (0.0, 1024, 1, 40, None, None), 0),
56
+ ('a', (None, None, 1, 40, None, None), 0),
57
+ ('a', (None, None, 1, 40, None, None), 1),
58
+ ('b', (None, None, 1, 40, None, None), 0),
59
+ ('c', (None, None, 1, 40, None, None), 0),
60
60
  ],
61
61
  )
62
62
 
@@ -90,19 +90,19 @@ class InMemoryLMCacheTest(unittest.TestCase):
90
90
  list(cache.items()),
91
91
  [
92
92
  (
93
- ('a', (0.0, 1024, 1, 40, None, None), 0),
93
+ ('a', (None, None, 1, 40, None, None), 0),
94
94
  cache_entry('1'),
95
95
  ),
96
96
  (
97
- ('a', (0.0, 1024, 1, 40, None, None), 1),
97
+ ('a', (None, None, 1, 40, None, None), 1),
98
98
  cache_entry('2', 1),
99
99
  ),
100
100
  (
101
- ('b', (0.0, 1024, 1, 40, None, None), 0),
101
+ ('b', (None, None, 1, 40, None, None), 0),
102
102
  cache_entry('3'),
103
103
  ),
104
104
  (
105
- ('c', (0.0, 1024, 1, 40, None, None), 0),
105
+ ('c', (None, None, 1, 40, None, None), 0),
106
106
  cache_entry('4'),
107
107
  ),
108
108
  ],
@@ -111,19 +111,19 @@ class InMemoryLMCacheTest(unittest.TestCase):
111
111
  list(cache.items('StaticSequence')),
112
112
  [
113
113
  (
114
- ('a', (0.0, 1024, 1, 40, None, None), 0),
114
+ ('a', (None, None, 1, 40, None, None), 0),
115
115
  cache_entry('1'),
116
116
  ),
117
117
  (
118
- ('a', (0.0, 1024, 1, 40, None, None), 1),
118
+ ('a', (None, None, 1, 40, None, None), 1),
119
119
  cache_entry('2', 1),
120
120
  ),
121
121
  (
122
- ('b', (0.0, 1024, 1, 40, None, None), 0),
122
+ ('b', (None, None, 1, 40, None, None), 0),
123
123
  cache_entry('3'),
124
124
  ),
125
125
  (
126
- ('c', (0.0, 1024, 1, 40, None, None), 0),
126
+ ('c', (None, None, 1, 40, None, None), 0),
127
127
  cache_entry('4'),
128
128
  ),
129
129
  ],
@@ -161,15 +161,15 @@ class InMemoryLMCacheTest(unittest.TestCase):
161
161
  self.assertEqual(
162
162
  list(cache.keys()),
163
163
  [
164
- ('a', (0.0, 1024, 1, 40, None, None), 0),
165
- ('a', (1.0, 1024, 1, 40, None, None), 0),
164
+ ('a', (None, None, 1, 40, None, None), 0),
165
+ ('a', (1.0, None, 1, 40, None, None), 0),
166
166
  ],
167
167
  )
168
168
 
169
169
  def test_different_model(self):
170
170
  cache = in_memory.InMemory()
171
- lm1 = fake.StaticSequence(['1', '2', '3'], cache=cache)
172
- lm2 = fake.Echo(cache=cache)
171
+ lm1 = fake.StaticSequence(['1', '2', '3'], cache=cache, temperature=0.0)
172
+ lm2 = fake.Echo(cache=cache, temperature=0.0)
173
173
 
174
174
  self.assertEqual(lm1('a'), '1')
175
175
  self.assertEqual(lm2('a'), 'a')
@@ -180,15 +180,15 @@ class InMemoryLMCacheTest(unittest.TestCase):
180
180
  self.assertEqual(
181
181
  list(cache.keys('StaticSequence')),
182
182
  [
183
- ('a', (0.0, 1024, 1, 40, None, None), 0),
184
- ('b', (0.0, 1024, 1, 40, None, None), 0),
183
+ ('a', (0.0, None, 1, 40, None, None), 0),
184
+ ('b', (0.0, None, 1, 40, None, None), 0),
185
185
  ],
186
186
  )
187
187
  self.assertEqual(
188
188
  list(cache.keys('Echo')),
189
189
  [
190
- ('a', (0.0, 1024, 1, 40, None, None), 0),
191
- ('b', (0.0, 1024, 1, 40, None, None), 0),
190
+ ('a', (0.0, None, 1, 40, None, None), 0),
191
+ ('b', (0.0, None, 1, 40, None, None), 0),
192
192
  ],
193
193
  )
194
194
  self.assertEqual(len(cache), 4)
@@ -152,10 +152,15 @@ class GenAITest(unittest.TestCase):
152
152
  )
153
153
 
154
154
  def test_model_hub(self):
155
+ orig_get_model = genai.get_model
156
+ genai.get_model = mock_get_model
157
+
155
158
  model = google_genai._GOOGLE_GENAI_MODEL_HUB.get('gemini-pro')
156
159
  self.assertIsNotNone(model)
157
160
  self.assertIs(google_genai._GOOGLE_GENAI_MODEL_HUB.get('gemini-pro'), model)
158
161
 
162
+ genai.get_model = orig_get_model
163
+
159
164
  def test_api_key_check(self):
160
165
  with self.assertRaisesRegex(ValueError, 'Please specify `api_key`'):
161
166
  _ = google_genai.GeminiPro()._api_initialized
@@ -167,7 +172,7 @@ class GenAITest(unittest.TestCase):
167
172
 
168
173
  def test_call(self):
169
174
  with mock.patch(
170
- 'google.generativeai.generative_models.GenerativeModel.generate_content'
175
+ 'google.generativeai.GenerativeModel.generate_content',
171
176
  ) as mock_generate:
172
177
  orig_get_model = genai.get_model
173
178
  genai.get_model = mock_get_model
@@ -176,7 +181,7 @@ class GenAITest(unittest.TestCase):
176
181
  lm = google_genai.GeminiPro(api_key='test_key')
177
182
  self.maxDiff = None
178
183
  self.assertEqual(
179
- lm('hello', temperature=2.0, top_k=20).text,
184
+ lm('hello', temperature=2.0, top_k=20, max_tokens=1024).text,
180
185
  (
181
186
  'This is a response to hello with n=1, temperature=2.0, '
182
187
  'top_p=None, top_k=20, max_tokens=1024, stop=None.'
@@ -197,7 +202,7 @@ class GenAITest(unittest.TestCase):
197
202
  (
198
203
  "hello to models/text-bison-001 with {'temperature': 2.0, "
199
204
  "'top_k': 20, 'top_p': None, 'candidate_count': 1, "
200
- "'max_output_tokens': 1024, 'stop_sequences': None}"
205
+ "'max_output_tokens': None, 'stop_sequences': None}"
201
206
  ),
202
207
  )
203
208
  genai.get_model = orig_get_model
@@ -51,10 +51,12 @@ class LlamaCppRemote(lf.LanguageModel):
51
51
  data = {
52
52
  "prompt": prompt.text,
53
53
  "n_predict": self.sampling_options.max_tokens,
54
- "temperature": self.sampling_options.temperature,
55
54
  "top_k": self.sampling_options.top_k or 50,
56
55
  "top_p": self.sampling_options.top_p or 0.95,
57
56
  }
57
+ if self.sampling_options.temperature is not None:
58
+ data["temperature"] = self.sampling_options.temperature
59
+
58
60
  response = requests.post(
59
61
  f"{self.url}/completion",
60
62
  json=data,
@@ -163,8 +163,6 @@ class OpenAI(lf.LanguageModel):
163
163
  # NOTE(daiyip): options.top_k is not applicable.
164
164
  args = dict(
165
165
  n=options.n,
166
- temperature=options.temperature,
167
- max_tokens=options.max_tokens,
168
166
  stream=False,
169
167
  timeout=self.timeout,
170
168
  logprobs=options.logprobs,
@@ -173,6 +171,10 @@ class OpenAI(lf.LanguageModel):
173
171
  # Completion and ChatCompletion uses different parameter name for model.
174
172
  args['model' if self.is_chat_model else 'engine'] = self.model
175
173
 
174
+ if options.temperature is not None:
175
+ args['temperature'] = options.temperature
176
+ if options.max_tokens is not None:
177
+ args['max_tokens'] = options.max_tokens
176
178
  if options.top_p is not None:
177
179
  args['top_p'] = options.top_p
178
180
  if options.stop:
@@ -220,6 +222,10 @@ class OpenAI(lf.LanguageModel):
220
222
  retry_on_errors=(
221
223
  openai_error.ServiceUnavailableError,
222
224
  openai_error.RateLimitError,
225
+ # Handling transient OpenAI server error (code 500). Check out
226
+ # https://platform.openai.com/docs/guides/error-codes/error-codes
227
+ (openai_error.APIError,
228
+ '.*The server had an error processing your request'),
223
229
  ),
224
230
  )[0]
225
231
 
@@ -121,7 +121,6 @@ class OpenaiTest(unittest.TestCase):
121
121
  top_logprobs=None,
122
122
  n=1,
123
123
  temperature=1.0,
124
- max_tokens=1024,
125
124
  stream=False,
126
125
  timeout=120.0,
127
126
  stop=['\n'],
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langfun
3
- Version: 0.0.2.dev20240407
3
+ Version: 0.0.2.dev20240412
4
4
  Summary: Langfun: Language as Functions.
5
5
  Home-page: https://github.com/google/langfun
6
6
  Author: Langfun Authors
@@ -7,9 +7,9 @@ langfun/core/concurrent_test.py,sha256=mwFMZhDUdppnDr7vDSTwcbMHwrdsIoKJwRYNtl4ZW
7
7
  langfun/core/console.py,sha256=bk5rNPNm9rMGW5YT2HixxU04p2umnoabn5SDz6Dqe88,2317
8
8
  langfun/core/console_test.py,sha256=5SYJdxpJGLgdSSQqqMPoA1X6jpsLD8rgcyk-EgI65oE,1077
9
9
  langfun/core/langfunc.py,sha256=WXdTc3QsmGD_n80KD9dFRr5MHpGZ9E_y_Rhtk4t9-3w,11852
10
- langfun/core/langfunc_test.py,sha256=dFNJoEXExIkrAJ9_PSWh_iRQoR4Gmp2VOZ_ve61DSHM,8339
11
- langfun/core/language_model.py,sha256=jPuFfjnRCnbT8po-CBPgmXoa09Yfk5_21snCXURqaKU,17011
12
- langfun/core/language_model_test.py,sha256=q7pNdirVWfkQXPA3taCGnyLB2NNs1KqX4JjjnoJvFOQ,11365
10
+ langfun/core/langfunc_test.py,sha256=rRxz2OOka5qagTSS1IcJ1Ij3mjjWawPFe1n9zYtGST8,8340
11
+ langfun/core/language_model.py,sha256=D3aU7ep1MFnyMWYCfvbA3ZK9DgP_wk0PogXo1Kmvk4Q,17185
12
+ langfun/core/language_model_test.py,sha256=bTyQVsH5JAxEzzzuq8VO8bVa9kiAMeiahzrxLxnOuQs,11380
13
13
  langfun/core/memory.py,sha256=f-asN1F7Vehgdn_fK84v73GrEUOxRtaW934keutTKjk,2416
14
14
  langfun/core/message.py,sha256=QhvV9t5qaryPcruyxxcXi3gm9QDInkSldwTtK6sVJ3c,15734
15
15
  langfun/core/message_test.py,sha256=Z23pUM5vPnDrYkIIibe2KL73D5HKur_awI0ut_EQFQA,9501
@@ -40,25 +40,25 @@ langfun/core/coding/python/parsing_test.py,sha256=9vAWF484kWIm6JZq8NFiMgKUDhXV-d
40
40
  langfun/core/coding/python/permissions.py,sha256=1QWGHvzL8MM0Ok_auQ9tURqZHtdOfJaDpBzZ29GUE-c,2544
41
41
  langfun/core/coding/python/permissions_test.py,sha256=w5EDb8QxpxgJyZkojyzVWQvDfg366zn99-g__6TbPQ0,2699
42
42
  langfun/core/eval/__init__.py,sha256=iDA2OcJ3kR6ixZizXIY3N9LsjkaVrfTbSClTiSP8ekY,1291
43
- langfun/core/eval/base.py,sha256=YpAPtWLeh3OpJuuPk7913xkB2OTCaGls1uWMohDA8SI,54551
44
- langfun/core/eval/base_test.py,sha256=8MOum0DWMEm2-NpwmFgcqmlqEmuWYF5MesrCXTySylg,21083
43
+ langfun/core/eval/base.py,sha256=Op-DO-YV8sL8mQvCfbzLfDDL6bDMuTtNYeyp5_QCBsQ,55328
44
+ langfun/core/eval/base_test.py,sha256=mjdQ3ukxc7BhsVJkFJvqtz9EVhSR0OGL9j1zf_AfXR4,21540
45
45
  langfun/core/eval/matching.py,sha256=g2yuBb4FeOlAlB10hqdWvaIg4QVQlJbiViRDcD2Y8go,9567
46
- langfun/core/eval/matching_test.py,sha256=jFrNOaHteNo7wxCwc6w_mGylM0VHwezAcvfaZANKKmA,4898
46
+ langfun/core/eval/matching_test.py,sha256=FFHYD7IDuKe5RMjkx74ksukiwUhO5a_SS340JaIPMws,4898
47
47
  langfun/core/eval/scoring.py,sha256=mshqbV_WM0zcp15TSR32ACMBDymlsbf6YH06PPx1Tw0,6139
48
- langfun/core/eval/scoring_test.py,sha256=3SWvRmrFn1ZrSE9mhA9ApcPg6e9HVXQ58xhui1HPQmI,4024
48
+ langfun/core/eval/scoring_test.py,sha256=YH1cIxBWtfdKcAV9Fh10vLkV5J-gxk8b6nxW4Z2u5pk,4024
49
49
  langfun/core/llms/__init__.py,sha256=gROJ8AjMq_ebXFcEfsyzYGCS6NsGfzf9d43nLu_TIdw,2504
50
50
  langfun/core/llms/fake.py,sha256=dVzOrW27RZ1p3DdQoRCRZs_vfoQcTcNrlWxia7oqmvw,2499
51
51
  langfun/core/llms/fake_test.py,sha256=Qk_Yoi4Z7P9o6f8Q_BZkaSlvxH89ZVsDxnVIbSBRBXk,3555
52
52
  langfun/core/llms/google_genai.py,sha256=n8zyJwh9UCTgb6-8LyvmjVNFGZQ4-zfzZ0ulkhHAnR8,8624
53
- langfun/core/llms/google_genai_test.py,sha256=MPU4eLd9CDQhjUeaNO_2VFirg0ZJOwNaMtgm1X-hICc,7412
54
- langfun/core/llms/llama_cpp.py,sha256=sJ9TOismqwGJ7QhgdYknWTEkqrbeZpWYc_nClOh36NU,2320
53
+ langfun/core/llms/google_genai_test.py,sha256=_UcGTfl16-aDUlEWFC2W2F8y9jPUs53RBYA6MOCpGXw,7525
54
+ langfun/core/llms/llama_cpp.py,sha256=Y_KkMUf3Xfac49koMUtUslKl3h-HWp3-ntq7Jaa3bdo,2385
55
55
  langfun/core/llms/llama_cpp_test.py,sha256=ZxC6defGd_HX9SFRU9U4cJiQnBKundbOrchbXuC1Z2M,1683
56
- langfun/core/llms/openai.py,sha256=BV8NWjB1b6A1X4Kff8Pub5AECodsngZnXqeBvRIHFM0,11331
57
- langfun/core/llms/openai_test.py,sha256=yfw7A-4Zo9u1cIkAMk39evE-tO7z6isNYTXiSnJXDQw,7599
56
+ langfun/core/llms/openai.py,sha256=uOJDflucpKZv3TPZwaeDSp9QMs2oDFuzh5Jm5j4dlm4,11680
57
+ langfun/core/llms/openai_test.py,sha256=ulzp5uzEmEvnqZ21D0FP6eaiH1xMQ59FaLHoqA0lTgc,7570
58
58
  langfun/core/llms/cache/__init__.py,sha256=QAo3InUMDM_YpteNnVCSejI4zOsnjSMWKJKzkb3VY64,993
59
59
  langfun/core/llms/cache/base.py,sha256=cFfYvOIUae842pncqCAsRvqXCk2AnAsRYVx0mcIoAeY,3338
60
60
  langfun/core/llms/cache/in_memory.py,sha256=YfFyJEhLs73cUiB0ZfhMxYpdE8Iuxxw-dvMFwGHTSHw,4742
61
- langfun/core/llms/cache/in_memory_test.py,sha256=WYLg_SlUdkUxIdBYnbksMqwVLFuzcNLsPTEJSQavtr0,8459
61
+ langfun/core/llms/cache/in_memory_test.py,sha256=guHHjislh1Mj3-GBARICMh-qq5gh4fwZQ7SI5kQEAeQ,8510
62
62
  langfun/core/memories/__init__.py,sha256=HpghfZ-w1NQqzJXBx8Lz0daRhB2rcy2r9Xm491SBhC4,773
63
63
  langfun/core/memories/conversation_history.py,sha256=c9amD8hCxGFiZuVAzkP0dOMWSp8L90uvwkOejjuBqO0,1835
64
64
  langfun/core/memories/conversation_history_test.py,sha256=AaW8aNoFjxNusanwJDV0r3384Mg0eAweGmPx5DIkM0Y,2052
@@ -95,8 +95,8 @@ langfun/core/templates/demonstration.py,sha256=vCrgYubdZM5Umqcgp8NUVGXgr4P_c-fik
95
95
  langfun/core/templates/demonstration_test.py,sha256=SafcDQ0WgI7pw05EmPI2S4v1t3ABKzup8jReCljHeK4,2162
96
96
  langfun/core/templates/selfplay.py,sha256=yhgrJbiYwq47TgzThmHrDQTF4nDrTI09CWGhuQPNv-s,2273
97
97
  langfun/core/templates/selfplay_test.py,sha256=IB5rWbjK_9CTkqEo1BclQPzFAKcIiusJckH8J19HFgI,2096
98
- langfun-0.0.2.dev20240407.dist-info/LICENSE,sha256=WNHhf_5RCaeuKWyq_K39vmp9F28LxKsB4SpomwSZ2L0,11357
99
- langfun-0.0.2.dev20240407.dist-info/METADATA,sha256=AstNjq_o1Rpd5C6aHkrQ78z_Fe38CMBXUlnwz4ArvwY,3405
100
- langfun-0.0.2.dev20240407.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
101
- langfun-0.0.2.dev20240407.dist-info/top_level.txt,sha256=RhlEkHxs1qtzmmtWSwYoLVJAc1YrbPtxQ52uh8Z9VvY,8
102
- langfun-0.0.2.dev20240407.dist-info/RECORD,,
98
+ langfun-0.0.2.dev20240412.dist-info/LICENSE,sha256=WNHhf_5RCaeuKWyq_K39vmp9F28LxKsB4SpomwSZ2L0,11357
99
+ langfun-0.0.2.dev20240412.dist-info/METADATA,sha256=9k3LWLg191e7teg-qoLBiTu9Oct_y_KH9IoLp7hjJXg,3405
100
+ langfun-0.0.2.dev20240412.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
101
+ langfun-0.0.2.dev20240412.dist-info/top_level.txt,sha256=RhlEkHxs1qtzmmtWSwYoLVJAc1YrbPtxQ52uh8Z9VvY,8
102
+ langfun-0.0.2.dev20240412.dist-info/RECORD,,