langfun 0.1.2.dev202502230803__py3-none-any.whl → 0.1.2.dev202502250804__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -795,6 +795,84 @@ class Session(pg.Object, pg.views.html.HtmlTreeView.Extension):
795
795
  **kwargs
796
796
  )
797
797
 
798
+ def query_prompt(
799
+ self,
800
+ prompt: Union[str, lf.Template, Any],
801
+ schema: Union[
802
+ lf_structured.Schema, Type[Any], list[Type[Any]], dict[str, Any], None
803
+ ] = None,
804
+ **kwargs,
805
+ ) -> Any:
806
+ """Calls `lf.query_prompt` and associates it with the current invocation.
807
+
808
+ The following code are equivalent:
809
+
810
+ Code 1:
811
+ ```
812
+ session.query_prompt(...)
813
+ ```
814
+
815
+ Code 2:
816
+ ```
817
+ with session.track_queries() as queries:
818
+ output = lf.query_prompt(...)
819
+ ```
820
+ The former is preferred when `lf.query_prompt` is directly called by the
821
+ action.
822
+ If `lf.query_prompt` is called by a function that does not have access to
823
+ the
824
+ session, the latter should be used.
825
+
826
+ Args:
827
+ prompt: The prompt to query.
828
+ schema: The schema to use for the query.
829
+ **kwargs: Additional keyword arguments to pass to `lf.query_prompt`.
830
+
831
+ Returns:
832
+ The result of the query.
833
+ """
834
+ with self.track_queries():
835
+ return lf_structured.query_prompt(prompt, schema=schema, **kwargs)
836
+
837
+ def query_output(
838
+ self,
839
+ response: Union[str, lf.Template, Any],
840
+ schema: Union[
841
+ lf_structured.Schema, Type[Any], list[Type[Any]], dict[str, Any], None
842
+ ] = None,
843
+ **kwargs,
844
+ ) -> Any:
845
+ """Calls `lf.query_output` and associates it with the current invocation.
846
+
847
+ The following code are equivalent:
848
+
849
+ Code 1:
850
+ ```
851
+ session.query_output(...)
852
+ ```
853
+
854
+ Code 2:
855
+ ```
856
+ with session.track_queries() as queries:
857
+ output = lf.query_output(...)
858
+ ```
859
+ The former is preferred when `lf.query_output` is directly called by the
860
+ action.
861
+ If `lf.query_output` is called by a function that does not have access to
862
+ the
863
+ session, the latter should be used.
864
+
865
+ Args:
866
+ response: The response to query.
867
+ schema: The schema to use for the query.
868
+ **kwargs: Additional keyword arguments to pass to `lf.query_prompt`.
869
+
870
+ Returns:
871
+ The result of the query.
872
+ """
873
+ with self.track_queries():
874
+ return lf_structured.query_output(response, schema=schema, **kwargs)
875
+
798
876
  def _log(self, level: lf.logging.LogLevel, message: str, **kwargs):
799
877
  self._current_action.current_phase.append(
800
878
  lf.logging.LogEntry(
@@ -101,7 +101,7 @@ class EvaluationTest(unittest.TestCase):
101
101
  self.assertEqual(s.dir, os.path.join(s.root_dir, s.id))
102
102
  self.assertEqual(s.hash, s.clone().hash)
103
103
  # Test persistent hash.
104
- self.assertEqual(s.hash, 'ae86c703')
104
+ self.assertEqual(s.hash, '31a1c20a')
105
105
  self.assertEqual(
106
106
  s.hash, s.clone(override={'max_workers': 2, 'lm.timeout': 20}).hash
107
107
  )
@@ -211,7 +211,7 @@ class EvaluationTest(unittest.TestCase):
211
211
  s.result,
212
212
  dict(
213
213
  experiment_setup=dict(
214
- id='Evaluation@0fade07d',
214
+ id='Evaluation@e06b5967',
215
215
  dir=s.dir,
216
216
  model='StaticSequence',
217
217
  prompt_template='{{example.question}}',
@@ -376,7 +376,7 @@ class EvaluationTest(unittest.TestCase):
376
376
  s.children[0].dir, os.path.join(s.root_dir, s.children[0].id)
377
377
  )
378
378
  # Test persistent hash.
379
- self.assertEqual(s.hash, 'b66a4e88')
379
+ self.assertEqual(s.hash, '9f4bc85b')
380
380
 
381
381
  summary = s.run(verbose=True)
382
382
  self.assertEqual(len(summary.evaluations), 2)
@@ -526,10 +526,10 @@ class SuiteTest(unittest.TestCase):
526
526
  lm=lm
527
527
  )
528
528
  # Test for persistent hash.
529
- self.assertEqual(s.hash, '26e6cc25')
529
+ self.assertEqual(s.hash, '2910c323')
530
530
  s.run()
531
531
  expected = {
532
- 'Evaluation@0fade07d': dict(
532
+ 'Evaluation@e06b5967': dict(
533
533
  experiment_setup=dict(
534
534
  id=s.children[0].id,
535
535
  dir=s.children[0].dir,
@@ -555,7 +555,7 @@ class SuiteTest(unittest.TestCase):
555
555
  ),
556
556
  usage=s.children[0].result.usage,
557
557
  ),
558
- 'Evaluation@ae86c703': dict(
558
+ 'Evaluation@31a1c20a': dict(
559
559
  experiment_setup=dict(
560
560
  id=s.children[1].children[0].id,
561
561
  dir=s.children[1].children[0].dir,
@@ -103,7 +103,7 @@ class MatchingTest(unittest.TestCase):
103
103
  s.result,
104
104
  dict(
105
105
  experiment_setup=dict(
106
- id='MyTask@739a174b',
106
+ id='MyTask@a98a284d',
107
107
  dir=s.dir,
108
108
  model='StaticSequence',
109
109
  prompt_template='{{example.question}}',
@@ -81,7 +81,7 @@ class ScoringTest(unittest.TestCase):
81
81
  s.result,
82
82
  dict(
83
83
  experiment_setup=dict(
84
- id='ConstraintFollowing@5c88a5eb',
84
+ id='ConstraintFollowing@90671d5e',
85
85
  dir=s.dir,
86
86
  model='StaticSequence',
87
87
  prompt_template='{{example}}',
@@ -106,10 +106,10 @@ class LangFuncCallTest(unittest.TestCase):
106
106
  "LangFunc(template_str='Hello', clean=True,"
107
107
  ' lm=ExcitedEchoer(sampling_options=LMSamplingOptions(temperature=None,'
108
108
  ' max_tokens=None, n=1, top_k=40, top_p=None, stop=None,'
109
- ' random_seed=None, logprobs=False, top_logprobs=None), cache=None,'
110
- ' max_concurrency=None, timeout=120.0, max_attempts=5,'
111
- ' retry_interval=(5, 60), exponential_backoff=True,'
112
- ' max_retry_interval=300, debug=False))',
109
+ ' random_seed=None, logprobs=False, top_logprobs=None,'
110
+ ' max_thinking_tokens=None), cache=None, max_concurrency=None,'
111
+ ' timeout=120.0, max_attempts=5, retry_interval=(5, 60),'
112
+ ' exponential_backoff=True, max_retry_interval=300, debug=False))',
113
113
  )
114
114
 
115
115
  l = LangFunc('Hello')
@@ -560,6 +560,10 @@ class LMSamplingOptions(component.Component):
560
560
  ),
561
561
  ] = None
562
562
 
563
+ max_thinking_tokens: Annotated[
564
+ int | None, 'Number of max thinking tokens.'
565
+ ] = None
566
+
563
567
  def cache_key(self) -> tuple[Any, ...]:
564
568
  """Returns a tuple of current values as cache key."""
565
569
  return (
@@ -127,6 +127,8 @@ from langfun.core.llms.openai import Gpt35
127
127
 
128
128
  # Anthropic models.
129
129
 
130
+ from langfun.core.llms.anthropic import Claude37
131
+ from langfun.core.llms.anthropic import Claude37Sonnet_20250219
130
132
  from langfun.core.llms.anthropic import Claude35Sonnet
131
133
  from langfun.core.llms.anthropic import Claude35Sonnet_20241022
132
134
  from langfun.core.llms.anthropic import Claude35Haiku
@@ -139,6 +141,7 @@ from langfun.core.llms.anthropic import Claude3Haiku
139
141
  from langfun.core.llms.anthropic import Claude3Haiku_20240307
140
142
 
141
143
  from langfun.core.llms.vertexai import VertexAIAnthropic
144
+ from langfun.core.llms.vertexai import VertexAIClaude37Sonnet_20250219
142
145
  from langfun.core.llms.vertexai import VertexAIClaude35Sonnet_20241022
143
146
  from langfun.core.llms.vertexai import VertexAIClaude35Haiku_20241022
144
147
  from langfun.core.llms.vertexai import VertexAIClaude3Opus_20240229
@@ -88,6 +88,32 @@ SUPPORTED_MODELS = [
88
88
  max_output_tokens_per_minute=80_000,
89
89
  ),
90
90
  ),
91
+ AnthropicModelInfo(
92
+ model_id='claude-3-7-sonnet-20250219',
93
+ provider='Anthropic',
94
+ in_service=True,
95
+ description='Claude 3.7 Sonnet model (2/19/2025).',
96
+ release_date=datetime.datetime(2025, 2, 19),
97
+ input_modalities=(
98
+ AnthropicModelInfo.INPUT_IMAGE_TYPES
99
+ + AnthropicModelInfo.INPUT_DOC_TYPES
100
+ ),
101
+ context_length=lf.ModelInfo.ContextLength(
102
+ max_input_tokens=200_000,
103
+ max_output_tokens=128_000,
104
+ ),
105
+ pricing=lf.ModelInfo.Pricing(
106
+ cost_per_1m_cached_input_tokens=0.3,
107
+ cost_per_1m_input_tokens=3,
108
+ cost_per_1m_output_tokens=15,
109
+ ),
110
+ rate_limits=AnthropicModelInfo.RateLimits(
111
+ # Tier 4 rate limits
112
+ max_requests_per_minute=4000,
113
+ max_input_tokens_per_minute=400_000,
114
+ max_output_tokens_per_minute=80_000,
115
+ ),
116
+ ),
91
117
  AnthropicModelInfo(
92
118
  model_id='claude-3-5-sonnet-20241022',
93
119
  provider='Anthropic',
@@ -139,9 +165,34 @@ SUPPORTED_MODELS = [
139
165
  cost_per_1m_output_tokens=15,
140
166
  ),
141
167
  rate_limits=AnthropicModelInfo.RateLimits(
142
- # Tier 4 rate limits
143
- max_requests_per_minute=4000,
144
- max_input_tokens_per_minute=400_000,
168
+ max_requests_per_minute=100,
169
+ max_input_tokens_per_minute=1_000_000,
170
+ max_output_tokens_per_minute=80_000,
171
+ ),
172
+ ),
173
+ AnthropicModelInfo(
174
+ model_id='claude-3-7-sonnet@20250219',
175
+ alias_for='claude-3-7-sonnet-20250219',
176
+ provider='VertexAI',
177
+ in_service=True,
178
+ description='Claude 3.7 Sonnet model served on VertexAI (02/19/2025).',
179
+ release_date=datetime.datetime(2025, 2, 19),
180
+ input_modalities=(
181
+ AnthropicModelInfo.INPUT_IMAGE_TYPES
182
+ + AnthropicModelInfo.INPUT_DOC_TYPES
183
+ ),
184
+ context_length=lf.ModelInfo.ContextLength(
185
+ max_input_tokens=200_000,
186
+ max_output_tokens=128_000,
187
+ ),
188
+ pricing=lf.ModelInfo.Pricing(
189
+ cost_per_1m_cached_input_tokens=0.3,
190
+ cost_per_1m_input_tokens=3,
191
+ cost_per_1m_output_tokens=15,
192
+ ),
193
+ rate_limits=AnthropicModelInfo.RateLimits(
194
+ max_requests_per_minute=100,
195
+ max_input_tokens_per_minute=1_000_000,
145
196
  max_output_tokens_per_minute=80_000,
146
197
  ),
147
198
  ),
@@ -457,8 +508,7 @@ class Anthropic(rest.REST):
457
508
  'x-api-key': self._api_key,
458
509
  'anthropic-version': self.api_version,
459
510
  'content-type': 'application/json',
460
- # TODO(yifenglu): Remove beta flag once the feature is fully supported.
461
- 'anthropic-beta': 'pdfs-2024-09-25',
511
+ 'anthropic-beta': 'output-128k-2025-02-19',
462
512
  }
463
513
 
464
514
  @functools.cached_property
@@ -506,6 +556,17 @@ class Anthropic(rest.REST):
506
556
  args['top_k'] = options.top_k
507
557
  if options.top_p is not None:
508
558
  args['top_p'] = options.top_p
559
+ if options.max_thinking_tokens is not None:
560
+ args['thinking'] = {
561
+ 'type': 'enabled',
562
+ # Minimum budget is 1,024 tokens.
563
+ 'budget_tokens': options.max_thinking_tokens,
564
+ }
565
+ # Thinking isn’t compatible with temperature, top_p, or top_k.
566
+ # https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking#important-considerations-when-using-extended-thinking
567
+ args.pop('temperature', None)
568
+ args.pop('top_k', None)
569
+ args.pop('top_p', None)
509
570
  return args
510
571
 
511
572
  def _content_from_message(self, prompt: lf.Message) -> list[dict[str, Any]]:
@@ -559,9 +620,27 @@ class Anthropic(rest.REST):
559
620
  def _message_from_content(self, content: list[dict[str, Any]]) -> lf.Message:
560
621
  """Converts Anthropic's content protocol to message."""
561
622
  # Refer: https://docs.anthropic.com/claude/reference/messages-examples
562
- return lf.AIMessage.from_chunks(
623
+ # Thinking: https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking#implementing-extended-thinking # pylint: disable=line-too-long
624
+ response = lf.AIMessage.from_chunks(
563
625
  [x['text'] for x in content if x['type'] == 'text']
564
626
  )
627
+ thinking = lf.AIMessage.from_chunks(
628
+ [x['thinking'] for x in content if x['type'] == 'thinking']
629
+ )
630
+ # thinking is added into the metadata.thinking field.
631
+ response.set('thinking', thinking)
632
+ return response
633
+
634
+
635
+ class Claude37(Anthropic):
636
+ """Base class for Claude 3.7 models."""
637
+
638
+
639
+ # pylint: disable=invalid-name
640
+ class Claude37Sonnet_20250219(Claude37):
641
+ """Claude 3.7 Sonnet model (latest)."""
642
+
643
+ model = 'claude-3-7-sonnet-20250219'
565
644
 
566
645
 
567
646
  class Claude35(Anthropic):
@@ -302,6 +302,12 @@ class VertexAIAnthropic(VertexAI, anthropic.Anthropic):
302
302
  # pylint: disable=invalid-name
303
303
 
304
304
 
305
+ class VertexAIClaude37Sonnet_20250219(VertexAIAnthropic):
306
+ """Anthropic's Claude 3.7 model on VertexAI."""
307
+ model = 'claude-3-7-sonnet@20250219'
308
+ location = 'us-east5'
309
+
310
+
305
311
  class VertexAIClaude35Sonnet_20241022(VertexAIAnthropic):
306
312
  """Anthropic's Claude 3.5 Sonnet model on VertexAI."""
307
313
  model = 'claude-3-5-sonnet-v2@20241022'
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: langfun
3
- Version: 0.1.2.dev202502230803
3
+ Version: 0.1.2.dev202502250804
4
4
  Summary: Langfun: Language as Functions.
5
5
  Home-page: https://github.com/google/langfun
6
6
  Author: Langfun Authors
@@ -7,8 +7,8 @@ langfun/core/concurrent_test.py,sha256=rc5T-2giWgtbwNuN6gmei7Uwo66HsJeeRtXZCpya_
7
7
  langfun/core/console.py,sha256=V_mOiFi9oGh8gLsUeR56pdFDkuvYOpvQt7DY1KUTWTA,2535
8
8
  langfun/core/console_test.py,sha256=pBOcuNMJdVELywvroptfcRtJMsegMm3wSlHAL2TdxVk,1679
9
9
  langfun/core/langfunc.py,sha256=G50YgoVZ0y1GFw2ev41MlOqr6qa8YakbvNC0h_E0PiA,11140
10
- langfun/core/langfunc_test.py,sha256=fKIAqcSNI_7M6nwoZW77HEam8Oa6vcWhsCNgVJanzb4,8822
11
- langfun/core/language_model.py,sha256=tTEpQeu3jw1vr0iW42mTHPztvG66puSRgYDYuWQyVSU,45688
10
+ langfun/core/langfunc_test.py,sha256=kK486D5R3ifI6jN8ycTfnFwwghHAoLCj-g5F67izMZM,8848
11
+ langfun/core/language_model.py,sha256=ogcEKbZHP2TySOmUxL-sKGvlNTTHt0b5haslw6UvS6g,45785
12
12
  langfun/core/language_model_test.py,sha256=iA5uo7rIj2jAtCYzMzhyNg1fWqE2Onn60bOO58q72C0,36454
13
13
  langfun/core/logging.py,sha256=W3mLEMXdo210Q5OX3a1ZTc4nU-xMy73-IfNKnsA-RFo,8051
14
14
  langfun/core/logging_test.py,sha256=N7-YvSXC8zvnr2SNwWHOykn1CFmqvIuTLDgn41Ku9JU,6642
@@ -26,7 +26,7 @@ langfun/core/subscription_test.py,sha256=Y4ZdbZEwm83YNZBxHff0QR4QUa4rdaNXA3_jfIc
26
26
  langfun/core/template.py,sha256=jNhYSrbLIn9kZOa03w5QZbyjgfnzJzE_ZrrMvvWY4t4,24929
27
27
  langfun/core/template_test.py,sha256=g7x4mgNIAXEEj-4W1D5whGfl5YikLEQoylKPzaeDomk,17069
28
28
  langfun/core/agentic/__init__.py,sha256=ndoDX0sAYsa3eVdXuu6nB-a-BH5TaK3urW6zAaFiyVs,1110
29
- langfun/core/agentic/action.py,sha256=yW5-2NRHIrQmmQEYmL83aIdSwaRfUez9mqCbME_aBWQ,25391
29
+ langfun/core/agentic/action.py,sha256=wG_vDH4Jlx0Hyj0g1gs9u0jIDwHUkQn4RiPkcoGG2Ks,27535
30
30
  langfun/core/agentic/action_eval.py,sha256=ZtjTh34S7XPIUqandQ0YwAtzw-S7ofuZ7rRXnRbUMdQ,4424
31
31
  langfun/core/agentic/action_eval_test.py,sha256=tRUkWmOE9p0rpNOq19xAY2oDEnYsEEykjg6sUpAwJk0,2832
32
32
  langfun/core/agentic/action_test.py,sha256=Gu7P5XQvzqbKawn2jjyTpWaARzzhzO04KkC1TuBnUnw,4612
@@ -44,13 +44,13 @@ langfun/core/coding/python/sandboxing.py,sha256=yeEdydMkfHk3Hj3-5ykeROpYyLbRfZ4B
44
44
  langfun/core/coding/python/sandboxing_test.py,sha256=H_0_pd-_uS-ci5yYhmDTR6-hyzosAFkExziAHndfdDo,2023
45
45
  langfun/core/eval/__init__.py,sha256=OEXr1ZRuvLuhJJfuQ1ZWQ-SvYzjyrtiAAEogYaB7E6o,1933
46
46
  langfun/core/eval/base.py,sha256=XXerMVkK4wREo7K1_aCyay6vDjw3mfs389XThAdzv50,75768
47
- langfun/core/eval/base_test.py,sha256=-LsIV9DXlDal0EnOlaWpibJvfef0NbxtZAm0OH_abAE,27189
47
+ langfun/core/eval/base_test.py,sha256=UJBsfsXNAfZpSSI6oEF7_VxPp13SzRRLRfuCnU7a4JM,27189
48
48
  langfun/core/eval/matching.py,sha256=AVKkGoc-BaHEzgSBamaAk3194TgqckDe_dinpS6LrXI,9323
49
- langfun/core/eval/matching_test.py,sha256=QCoYEuf4b_1bkHqUCuRzKMbXHrV3AB2FCOBivo1stC4,5249
49
+ langfun/core/eval/matching_test.py,sha256=rdawe6q3pWfKyW6Qk67b3LBN_zYTa7OfDYjl9mJj5P8,5249
50
50
  langfun/core/eval/patching.py,sha256=R0s2eAd1m97exQt06dmUL0V_MBG0W2Hxg7fhNB7cXW0,3866
51
51
  langfun/core/eval/patching_test.py,sha256=8kCd54Egjju22FMgtJuxEsrXkW8ifs-UUBHtrCG1L6w,4775
52
52
  langfun/core/eval/scoring.py,sha256=_DvnlgI1SdRVaOojao_AkV3pnenfCPOqyhvlg-Sw-5M,6322
53
- langfun/core/eval/scoring_test.py,sha256=O8olHbrUEg60gMxwOkWzKBJZpZoUlmVnBANX5Se2SXM,4546
53
+ langfun/core/eval/scoring_test.py,sha256=adQEeuDux11dt9lkJIzLYNmqYSi9h-Mi2Cr0lUUTx9I,4546
54
54
  langfun/core/eval/v2/__init__.py,sha256=qoa6zKdFXOFyCX6vay6OdgPf1eUhYGoHYAxe35qECGk,1628
55
55
  langfun/core/eval/v2/checkpointing.py,sha256=u-MrnwQbm0T-BDcn9pPXs_FeKPzMYm1-pVos0DiTqgM,11769
56
56
  langfun/core/eval/v2/checkpointing_test.py,sha256=R-R8SFzworuYnMmGGcvE9f4oiYkb8HMoZ0m4euF3pus,8466
@@ -73,8 +73,8 @@ langfun/core/eval/v2/reporting.py,sha256=7rL9LLmGYnQ5HIjqRqsOMkUlBl4BmFPEL6Vlofq
73
73
  langfun/core/eval/v2/reporting_test.py,sha256=UmYSAQvD3AIXsSyWQ-WD2uLtEISYpmBeoKY5u5Qwc8E,5696
74
74
  langfun/core/eval/v2/runners.py,sha256=DKEmSlGXjOXKWFdBhTpLy7tMsBHZHd1Brl3hWIngsSQ,15931
75
75
  langfun/core/eval/v2/runners_test.py,sha256=A37fKK2MvAVTiShsg_laluJzJ9AuAQn52k7HPbfD0Ks,11666
76
- langfun/core/llms/__init__.py,sha256=gph3HngayM-WExoaoKM-S6Ke7_MRtvEojKoaf9LusJ0,7740
77
- langfun/core/llms/anthropic.py,sha256=nov4_EoqDL0_DAW93Eb5p-fHE-AfvzA8Q7PtYfcQdMg,20548
76
+ langfun/core/llms/__init__.py,sha256=OnRL-OdOjSioEiNV7-QSxFlaX18VeH_XOyJVo7aEc0U,7924
77
+ langfun/core/llms/anthropic.py,sha256=MszirOzbpJg2xo65SJpKfJB33HTkGt0vhJUt8Bzt510,23475
78
78
  langfun/core/llms/anthropic_test.py,sha256=SSK7OTx3gMYE1NMAi_PqQqeNsCkZAcVJvl_OCEOhyzk,7145
79
79
  langfun/core/llms/compositional.py,sha256=csW_FLlgL-tpeyCOTVvfUQkMa_zCN5Y2I-YbSNuK27U,2872
80
80
  langfun/core/llms/compositional_test.py,sha256=4eTnOer-DncRKGaIJW2ZQQMLnt5r2R0UIx_DYOvGAQo,2027
@@ -96,7 +96,7 @@ langfun/core/llms/openai_compatible_test.py,sha256=I5WWL3lRo-WXnSoUKLkIEjXfwjoiH
96
96
  langfun/core/llms/openai_test.py,sha256=gwuO6aoa296iM2welWV9ua4KF8gEVGsEPakgbtkWkFQ,2687
97
97
  langfun/core/llms/rest.py,sha256=xdR4ar4y7YkeZTs_BHUyNOdhqoghztMcqyz1f9kTXH8,4054
98
98
  langfun/core/llms/rest_test.py,sha256=zWGiI08f9gXsoQPJS9TlX1zD2uQLrJUB-1VpAJXRHfs,3475
99
- langfun/core/llms/vertexai.py,sha256=UBOt58FHlzwM1EwYBt2ADk7K2AFg7HMdI2aEwQZLAc4,17473
99
+ langfun/core/llms/vertexai.py,sha256=Cf_QeFybIF-dALxdtwy7-ElepSv1ryd6im5-NijwUGE,17646
100
100
  langfun/core/llms/vertexai_test.py,sha256=dOprP_uLNmXHYxMoX_hMPMsjKR-e_B5nKHjhlMCQoOQ,4252
101
101
  langfun/core/llms/cache/__init__.py,sha256=QAo3InUMDM_YpteNnVCSejI4zOsnjSMWKJKzkb3VY64,993
102
102
  langfun/core/llms/cache/base.py,sha256=rt3zwmyw0y9jsSGW-ZbV1vAfLxQ7_3AVk0l2EySlse4,3918
@@ -148,8 +148,8 @@ langfun/core/templates/demonstration.py,sha256=vCrgYubdZM5Umqcgp8NUVGXgr4P_c-fik
148
148
  langfun/core/templates/demonstration_test.py,sha256=SafcDQ0WgI7pw05EmPI2S4v1t3ABKzup8jReCljHeK4,2162
149
149
  langfun/core/templates/selfplay.py,sha256=yhgrJbiYwq47TgzThmHrDQTF4nDrTI09CWGhuQPNv-s,2273
150
150
  langfun/core/templates/selfplay_test.py,sha256=Ot__1P1M8oJfoTp-M9-PQ6HUXqZKyMwvZ5f7yQ3yfyM,2326
151
- langfun-0.1.2.dev202502230803.dist-info/LICENSE,sha256=WNHhf_5RCaeuKWyq_K39vmp9F28LxKsB4SpomwSZ2L0,11357
152
- langfun-0.1.2.dev202502230803.dist-info/METADATA,sha256=MlT9MLl5GBBJ1I8GRfR9921WL3KpVjxKeQ6Rjx8X_T8,8172
153
- langfun-0.1.2.dev202502230803.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
154
- langfun-0.1.2.dev202502230803.dist-info/top_level.txt,sha256=RhlEkHxs1qtzmmtWSwYoLVJAc1YrbPtxQ52uh8Z9VvY,8
155
- langfun-0.1.2.dev202502230803.dist-info/RECORD,,
151
+ langfun-0.1.2.dev202502250804.dist-info/LICENSE,sha256=WNHhf_5RCaeuKWyq_K39vmp9F28LxKsB4SpomwSZ2L0,11357
152
+ langfun-0.1.2.dev202502250804.dist-info/METADATA,sha256=A61h0BEmyul-0mBhbeE_dJ9KqhwE3uxw_ofYEVvIAao,8172
153
+ langfun-0.1.2.dev202502250804.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
154
+ langfun-0.1.2.dev202502250804.dist-info/top_level.txt,sha256=RhlEkHxs1qtzmmtWSwYoLVJAc1YrbPtxQ52uh8Z9VvY,8
155
+ langfun-0.1.2.dev202502250804.dist-info/RECORD,,