langfun 0.1.2.dev202504080804__py3-none-any.whl → 0.1.2.dev202504100804__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langfun might be problematic. Click here for more details.
- langfun/core/llms/__init__.py +1 -0
- langfun/core/llms/anthropic.py +11 -6
- langfun/core/llms/anthropic_test.py +25 -3
- langfun/core/llms/gemini.py +10 -2
- langfun/core/llms/gemini_test.py +22 -1
- langfun/core/llms/openai_compatible.py +2 -1
- langfun/core/llms/openai_compatible_test.py +1 -1
- langfun/core/llms/vertexai.py +5 -0
- langfun/core/structured/querying.py +11 -0
- langfun/core/structured/querying_test.py +21 -1
- {langfun-0.1.2.dev202504080804.dist-info → langfun-0.1.2.dev202504100804.dist-info}/METADATA +1 -1
- {langfun-0.1.2.dev202504080804.dist-info → langfun-0.1.2.dev202504100804.dist-info}/RECORD +15 -15
- {langfun-0.1.2.dev202504080804.dist-info → langfun-0.1.2.dev202504100804.dist-info}/WHEEL +0 -0
- {langfun-0.1.2.dev202504080804.dist-info → langfun-0.1.2.dev202504100804.dist-info}/licenses/LICENSE +0 -0
- {langfun-0.1.2.dev202504080804.dist-info → langfun-0.1.2.dev202504100804.dist-info}/top_level.txt +0 -0
langfun/core/llms/__init__.py
CHANGED
@@ -74,6 +74,7 @@ from langfun.core.llms.vertexai import VertexAIGemini2ProExp_20250205
|
|
74
74
|
from langfun.core.llms.vertexai import VertexAIGemini2FlashThinkingExp_20250121
|
75
75
|
from langfun.core.llms.vertexai import VertexAIGeminiExp_20241206
|
76
76
|
from langfun.core.llms.vertexai import VertexAIGemini25ProExp_20250325
|
77
|
+
from langfun.core.llms.vertexai import VertexAIGemini25ProPreview_20250325
|
77
78
|
|
78
79
|
# For backward compatibility.
|
79
80
|
GeminiPro1_5 = Gemini15Pro
|
langfun/core/llms/anthropic.py
CHANGED
@@ -509,13 +509,18 @@ class Anthropic(rest.REST):
|
|
509
509
|
raise ValueError(f'Unsupported modality: {chunk!r}.')
|
510
510
|
return chunk
|
511
511
|
|
512
|
-
|
513
|
-
|
514
|
-
|
515
|
-
|
516
|
-
|
517
|
-
|
512
|
+
messages = []
|
513
|
+
if system_message := prompt.get('system_message'):
|
514
|
+
assert isinstance(system_message, lf.SystemMessage), type(system_message)
|
515
|
+
messages.append(
|
516
|
+
system_message.as_format(
|
517
|
+
'anthropic', chunk_preprocessor=modality_check
|
518
|
+
)
|
519
|
+
)
|
520
|
+
messages.append(
|
521
|
+
prompt.as_format('anthropic', chunk_preprocessor=modality_check)
|
518
522
|
)
|
523
|
+
request.update(messages=messages)
|
519
524
|
return request
|
520
525
|
|
521
526
|
def _request_args(self, options: lf.LMSamplingOptions) -> dict[str, Any]:
|
@@ -35,7 +35,8 @@ def mock_requests_post(url: str, json: dict[str, Any], **kwargs):
|
|
35
35
|
'content': [{
|
36
36
|
'type': 'text',
|
37
37
|
'text': (
|
38
|
-
|
38
|
+
'\n'.join(c['content'][0]['text'] for c in json['messages']) +
|
39
|
+
f' with temperature={json.get("temperature")}, '
|
39
40
|
f'top_k={json.get("top_k")}, '
|
40
41
|
f'top_p={json.get("top_p")}, '
|
41
42
|
f'max_tokens={json.get("max_tokens")}, '
|
@@ -140,11 +141,11 @@ class AnthropicTest(unittest.TestCase):
|
|
140
141
|
mock_request.side_effect = mock_requests_post
|
141
142
|
|
142
143
|
lm = anthropic.Claude3Haiku(api_key='fake key')
|
143
|
-
self.assertRegex(lm('
|
144
|
+
self.assertRegex(lm('hello').text, 'hello.*')
|
144
145
|
|
145
146
|
os.environ['ANTHROPIC_API_KEY'] = 'abc'
|
146
147
|
lm = anthropic.Claude3Haiku()
|
147
|
-
self.assertRegex(lm('
|
148
|
+
self.assertRegex(lm('hello').text, 'hello.*')
|
148
149
|
del os.environ['ANTHROPIC_API_KEY']
|
149
150
|
|
150
151
|
def test_call(self):
|
@@ -165,6 +166,27 @@ class AnthropicTest(unittest.TestCase):
|
|
165
166
|
self.assertIsNotNone(response.usage.total_tokens, 3)
|
166
167
|
self.assertGreater(response.usage.estimated_cost, 0)
|
167
168
|
|
169
|
+
def test_call_with_system_message(self):
|
170
|
+
with mock.patch('requests.Session.post') as mock_request:
|
171
|
+
mock_request.side_effect = mock_requests_post
|
172
|
+
lm = anthropic.Claude3Haiku(api_key='fake_key')
|
173
|
+
response = lm(
|
174
|
+
lf.UserMessage(
|
175
|
+
'hello', system_message=lf.SystemMessage('system')
|
176
|
+
),
|
177
|
+
temperature=0.0,
|
178
|
+
top_k=0.1,
|
179
|
+
top_p=0.2,
|
180
|
+
stop=['\n'],
|
181
|
+
)
|
182
|
+
self.assertEqual(
|
183
|
+
response.text,
|
184
|
+
(
|
185
|
+
'system\nhello with temperature=0.0, top_k=0.1, top_p=0.2, '
|
186
|
+
"max_tokens=4096, stop=['\\n']."
|
187
|
+
),
|
188
|
+
)
|
189
|
+
|
168
190
|
def test_mm_call(self):
|
169
191
|
with mock.patch('requests.Session.post') as mock_mm_request:
|
170
192
|
mock_mm_request.side_effect = mock_mm_requests_post
|
langfun/core/llms/gemini.py
CHANGED
@@ -582,9 +582,17 @@ class Gemini(rest.REST):
|
|
582
582
|
raise lf.ModalityError(f'Unsupported modality: {chunk!r}') from e
|
583
583
|
return chunk
|
584
584
|
|
585
|
-
|
585
|
+
contents = []
|
586
|
+
if system_message := prompt.get('system_message'):
|
587
|
+
assert isinstance(system_message, lf.SystemMessage), type(system_message)
|
588
|
+
contents.append(
|
589
|
+
system_message.as_format(
|
590
|
+
'gemini', chunk_preprocessor=modality_conversion)
|
591
|
+
)
|
592
|
+
contents.append(
|
586
593
|
prompt.as_format('gemini', chunk_preprocessor=modality_conversion)
|
587
|
-
|
594
|
+
)
|
595
|
+
request['contents'] = contents
|
588
596
|
return request
|
589
597
|
|
590
598
|
def _generation_config(
|
langfun/core/llms/gemini_test.py
CHANGED
@@ -38,7 +38,7 @@ example_image = (
|
|
38
38
|
def mock_requests_post(url: str, json: dict[str, Any], **kwargs):
|
39
39
|
del url, kwargs
|
40
40
|
c = pg.Dict(json['generationConfig'])
|
41
|
-
content =
|
41
|
+
content = '\n'.join(c['parts'][0]['text'] for c in json['contents'])
|
42
42
|
response = requests.Response()
|
43
43
|
response.status_code = 200
|
44
44
|
response._content = pg.to_json_str({
|
@@ -178,6 +178,27 @@ class GeminiTest(unittest.TestCase):
|
|
178
178
|
self.assertEqual(r.metadata.usage.prompt_tokens, 3)
|
179
179
|
self.assertEqual(r.metadata.usage.completion_tokens, 4)
|
180
180
|
|
181
|
+
def test_call_model_with_system_message(self):
|
182
|
+
with mock.patch('requests.Session.post') as mock_generate:
|
183
|
+
mock_generate.side_effect = mock_requests_post
|
184
|
+
|
185
|
+
lm = gemini.Gemini('gemini-1.5-pro', api_endpoint='')
|
186
|
+
r = lm(
|
187
|
+
lf.UserMessage('hello', system_message=lf.SystemMessage('system')),
|
188
|
+
temperature=2.0,
|
189
|
+
top_p=1.0,
|
190
|
+
top_k=20,
|
191
|
+
max_tokens=1024,
|
192
|
+
stop='\n',
|
193
|
+
)
|
194
|
+
self.assertEqual(
|
195
|
+
r.text,
|
196
|
+
(
|
197
|
+
'This is a response to system\nhello with temperature=2.0, '
|
198
|
+
'top_p=1.0, top_k=20, max_tokens=1024, stop=\n.'
|
199
|
+
),
|
200
|
+
)
|
201
|
+
|
181
202
|
|
182
203
|
if __name__ == '__main__':
|
183
204
|
unittest.main()
|
@@ -113,8 +113,9 @@ class OpenAICompatible(rest.REST):
|
|
113
113
|
# Users could use `metadata_system_message` to pass system message.
|
114
114
|
system_message = prompt.metadata.get('system_message')
|
115
115
|
if system_message:
|
116
|
+
assert isinstance(system_message, lf.SystemMessage), type(system_message)
|
116
117
|
messages.append(
|
117
|
-
|
118
|
+
system_message.as_format(
|
118
119
|
'openai', chunk_preprocessor=modality_check
|
119
120
|
)
|
120
121
|
)
|
langfun/core/llms/vertexai.py
CHANGED
@@ -166,6 +166,11 @@ class VertexAIGemini(VertexAI, gemini.Gemini):
|
|
166
166
|
#
|
167
167
|
|
168
168
|
|
169
|
+
class VertexAIGemini25ProPreview_20250325(VertexAIGemini): # pylint: disable=invalid-name
|
170
|
+
"""Gemini 2.5 Pro model launched on 03/25/2025."""
|
171
|
+
model = 'gemini-2.5-pro-preview-03-25'
|
172
|
+
|
173
|
+
|
169
174
|
class VertexAIGemini25ProExp_20250325(VertexAIGemini): # pylint: disable=invalid-name
|
170
175
|
"""Gemini 2.5 Pro model launched on 03/25/2025."""
|
171
176
|
model = 'gemini-2.5-pro-exp-03-25'
|
@@ -109,6 +109,7 @@ def query(
|
|
109
109
|
*,
|
110
110
|
lm: lf.LanguageModel | list[lf.LanguageModel] | None = None,
|
111
111
|
num_samples: int | list[int] = 1,
|
112
|
+
system_message: str | lf.Template | None = None,
|
112
113
|
examples: list[mapping.MappingExample] | None = None,
|
113
114
|
cache_seed: int | None = 0,
|
114
115
|
response_postprocess: Callable[[str], str] | None = None,
|
@@ -244,6 +245,8 @@ def query(
|
|
244
245
|
If `None`, the LM from `lf.context` will be used.
|
245
246
|
num_samples: Number of samples to generate. If a list is provided, its
|
246
247
|
length must match the number of models in `lm`.
|
248
|
+
system_message: System instructions to guide the model output. If None,
|
249
|
+
no system message will be used.
|
247
250
|
examples: Few-shot examples to guide the model output. Defaults to `None`.
|
248
251
|
cache_seed: Seed for caching the query. Queries with the same
|
249
252
|
`(lm, prompt, cache_seed)` will use cached responses. If `None`,
|
@@ -282,6 +285,7 @@ def query(
|
|
282
285
|
schema,
|
283
286
|
default=default,
|
284
287
|
lm=lm,
|
288
|
+
system_message=system_message,
|
285
289
|
examples=examples,
|
286
290
|
# Usually num_examples should not be large, so we multiple the user
|
287
291
|
# provided cache seed by 100 to avoid collision.
|
@@ -326,6 +330,13 @@ def query(
|
|
326
330
|
if isinstance(prompt, pg.Symbolic) and prompt.sym_partial and schema is None:
|
327
331
|
schema = prompt.__class__
|
328
332
|
|
333
|
+
# Attach system message as input template metadata, which will be passed
|
334
|
+
# through to the rendered message metadata under key `system_message`.
|
335
|
+
if system_message is not None:
|
336
|
+
kwargs['metadata_system_message'] = lf.Template.from_value(
|
337
|
+
system_message
|
338
|
+
).render(message_cls=lf.SystemMessage)
|
339
|
+
|
329
340
|
# Normalize query input.
|
330
341
|
if isinstance(prompt, (lf.Message, str)):
|
331
342
|
# Query with structured output.
|
@@ -45,14 +45,16 @@ class QueryTest(unittest.TestCase):
|
|
45
45
|
prompt,
|
46
46
|
schema,
|
47
47
|
examples: list[mapping.MappingExample] | None = None,
|
48
|
+
system_message: str | None = None,
|
48
49
|
*,
|
49
50
|
expected_snippet: str,
|
50
51
|
exact_match: bool = False,
|
51
52
|
expected_modalities: int = 0,
|
53
|
+
exepcted_system_message: str | None = None,
|
52
54
|
**kwargs,
|
53
55
|
):
|
54
56
|
m = querying.query(
|
55
|
-
prompt, schema=schema, examples=examples,
|
57
|
+
prompt, schema=schema, system_message=system_message, examples=examples,
|
56
58
|
**kwargs, returns_message=True
|
57
59
|
)
|
58
60
|
self.assertIsNotNone(m.lm_input)
|
@@ -64,6 +66,11 @@ class QueryTest(unittest.TestCase):
|
|
64
66
|
len([c for c in m.lm_input.chunk() if isinstance(c, lf.Modality)]),
|
65
67
|
expected_modalities,
|
66
68
|
)
|
69
|
+
if system_message is not None:
|
70
|
+
self.assertEqual(
|
71
|
+
m.lm_input.system_message.text,
|
72
|
+
exepcted_system_message,
|
73
|
+
)
|
67
74
|
|
68
75
|
def test_call(self):
|
69
76
|
lm = fake.StaticSequence(['1'])
|
@@ -114,6 +121,19 @@ class QueryTest(unittest.TestCase):
|
|
114
121
|
Activity(description='hello'),
|
115
122
|
)
|
116
123
|
|
124
|
+
def test_render_with_system_message(self):
|
125
|
+
lm = fake.StaticResponse('1')
|
126
|
+
self.assert_render(
|
127
|
+
'What is {{x}} + {{y}}?',
|
128
|
+
schema=None,
|
129
|
+
system_message='You are a helpful assistant.',
|
130
|
+
x=1,
|
131
|
+
y=2,
|
132
|
+
lm=lm.clone(),
|
133
|
+
expected_snippet='What is 1 + 2?',
|
134
|
+
exepcted_system_message='You are a helpful assistant.',
|
135
|
+
)
|
136
|
+
|
117
137
|
def test_str_to_structure_render(self):
|
118
138
|
lm = fake.StaticResponse('1')
|
119
139
|
self.assert_render(
|
@@ -81,9 +81,9 @@ langfun/core/eval/v2/reporting.py,sha256=yUIPCAMnp7InIzpv1DDWrcLO-75iiOUTpscj7sm
|
|
81
81
|
langfun/core/eval/v2/reporting_test.py,sha256=hcPJJaMtPulqERvHYTpId83WXdqDKnnexmULtK7WKwk,5686
|
82
82
|
langfun/core/eval/v2/runners.py,sha256=De4d5QQ-Tpw0nPDODQexDPy0ti-FEgzHBvfH78zqdtg,15945
|
83
83
|
langfun/core/eval/v2/runners_test.py,sha256=A37fKK2MvAVTiShsg_laluJzJ9AuAQn52k7HPbfD0Ks,11666
|
84
|
-
langfun/core/llms/__init__.py,sha256=
|
85
|
-
langfun/core/llms/anthropic.py,sha256=
|
86
|
-
langfun/core/llms/anthropic_test.py,sha256=
|
84
|
+
langfun/core/llms/__init__.py,sha256=bwVMDRc4ypUY8KntfAccjVbqFxUzSGmvB6kHTcXmkLk,8255
|
85
|
+
langfun/core/llms/anthropic.py,sha256=dxzg8JBP2GMmmREBh-zQg9R1huxib1pvcU3i1XhB6Pw,21953
|
86
|
+
langfun/core/llms/anthropic_test.py,sha256=aZkIcscX6FV2fdFqy2opBF_06vDPGDX6MhRqI9z7vRo,7864
|
87
87
|
langfun/core/llms/azure_openai.py,sha256=-KkSLaR54MlsIqz_XIwv0TnsBnvNTAxnjA2Q2O2u5KM,2733
|
88
88
|
langfun/core/llms/azure_openai_test.py,sha256=lkMZkQdJBV97fTM4C4z8qNfvr6spgiN5G4hvVUIVr0M,1735
|
89
89
|
langfun/core/llms/compositional.py,sha256=csW_FLlgL-tpeyCOTVvfUQkMa_zCN5Y2I-YbSNuK27U,2872
|
@@ -92,8 +92,8 @@ langfun/core/llms/deepseek.py,sha256=jvTxdXPr-vH6HNakn_Ootx1heDg8Fen2FUkUW36bpCs
|
|
92
92
|
langfun/core/llms/deepseek_test.py,sha256=DvROWPlDuow5E1lfoSkhyGt_ELA19JoQoDsTnRgDtTg,1847
|
93
93
|
langfun/core/llms/fake.py,sha256=xmgCkk9y0I4x0IT32SZ9_OT27aLadXH8PRiYNo5VTd4,3265
|
94
94
|
langfun/core/llms/fake_test.py,sha256=2h13qkwEz_JR0mtUDPxdAhQo7MueXaFSwsD2DIRDW9g,7653
|
95
|
-
langfun/core/llms/gemini.py,sha256=
|
96
|
-
langfun/core/llms/gemini_test.py,sha256=
|
95
|
+
langfun/core/llms/gemini.py,sha256=lkXCfc1LTo6SFQCA_7IH2tMooL-UrvrW6B5v4iQWKWQ,22953
|
96
|
+
langfun/core/llms/gemini_test.py,sha256=lv0jCGwsmbAngiWER5NGc3PLrLif5yz3dLNl4yFRrxI,6201
|
97
97
|
langfun/core/llms/google_genai.py,sha256=IANJlhTEvBNAgmK8Rsc_6xguQmwgE76GayESqae4NMk,4906
|
98
98
|
langfun/core/llms/google_genai_test.py,sha256=NKNtpebArQ9ZR7Qsnhd2prFIpMjleojy6o6VMXkJ1zY,1502
|
99
99
|
langfun/core/llms/groq.py,sha256=S9V10kFo3cgX89qPgt_umq-SpRnxEDLTt_hJmpERfbo,12066
|
@@ -101,12 +101,12 @@ langfun/core/llms/groq_test.py,sha256=P4EgexCqsh4K2x11w0UL_vz-YYNaPdQU0WsDAdnTRQ
|
|
101
101
|
langfun/core/llms/llama_cpp.py,sha256=Z7P3gc4xeIjc2bX0Ey1y5EUYJVMnMa2Q67PZ9iye9sE,1409
|
102
102
|
langfun/core/llms/llama_cpp_test.py,sha256=wfTO7nmUwL65U2kK9P9fcMt92JjNDuVia4G1E7znf_4,1086
|
103
103
|
langfun/core/llms/openai.py,sha256=zDi-wkV-r3vUZYoTFvU1gaNNVQVQytVuZ4CvTGLsRL8,39576
|
104
|
-
langfun/core/llms/openai_compatible.py,sha256=
|
105
|
-
langfun/core/llms/openai_compatible_test.py,sha256=
|
104
|
+
langfun/core/llms/openai_compatible.py,sha256=CGc8--B3uPmn_TpIBqo6QZLqZkm7tQ9x0S148S7KJrw,5280
|
105
|
+
langfun/core/llms/openai_compatible_test.py,sha256=2KNPxA8GG_H--j0oJhyFrnzi4lqEhH8Poz8HszsqXsQ,17042
|
106
106
|
langfun/core/llms/openai_test.py,sha256=gwuO6aoa296iM2welWV9ua4KF8gEVGsEPakgbtkWkFQ,2687
|
107
107
|
langfun/core/llms/rest.py,sha256=ucMKHXlmg6pYSIMhQSktLmTSGMSIiqO8fp1r_GiEhaU,4333
|
108
108
|
langfun/core/llms/rest_test.py,sha256=_zM7nV8DEVyoXNiQOnuwJ917mWjki0614H88rNmDboE,5020
|
109
|
-
langfun/core/llms/vertexai.py,sha256=
|
109
|
+
langfun/core/llms/vertexai.py,sha256=N8cX27vVBdiLr2ImpqH_GVihLEauWSlNvBBD9nhDsd8,18012
|
110
110
|
langfun/core/llms/vertexai_test.py,sha256=dOprP_uLNmXHYxMoX_hMPMsjKR-e_B5nKHjhlMCQoOQ,4252
|
111
111
|
langfun/core/llms/cache/__init__.py,sha256=QAo3InUMDM_YpteNnVCSejI4zOsnjSMWKJKzkb3VY64,993
|
112
112
|
langfun/core/llms/cache/base.py,sha256=rt3zwmyw0y9jsSGW-ZbV1vAfLxQ7_3AVk0l2EySlse4,3918
|
@@ -137,8 +137,8 @@ langfun/core/structured/mapping.py,sha256=of-EeBq0RgmkiUaSk2rVEDVCzgn_wXU8tRke7N
|
|
137
137
|
langfun/core/structured/mapping_test.py,sha256=OntYvfDitAf0tAnzQty3YS90vyEn6FY1Mi93r_ViEk8,9594
|
138
138
|
langfun/core/structured/parsing.py,sha256=MGvI7ypXlwfzr5XB8_TFU9Ei0_5reYqkWkv64eAy0EA,12015
|
139
139
|
langfun/core/structured/parsing_test.py,sha256=kNPrhpdPY3iWhUld0TFYU-Zgn44wC0d6YuQ9XdVbQ8o,22346
|
140
|
-
langfun/core/structured/querying.py,sha256=
|
141
|
-
langfun/core/structured/querying_test.py,sha256=
|
140
|
+
langfun/core/structured/querying.py,sha256=K4e_hx5Np1-KPsmN-A-6uTD7PRPEuM9ggROj_SBaWe0,24431
|
141
|
+
langfun/core/structured/querying_test.py,sha256=5gNYus8Vf6lxMM7rxQGIUR_lzQw4MY7eikQgA5NNA4U,34156
|
142
142
|
langfun/core/structured/schema.py,sha256=_iqhHEGDQsHk0AsybWnK44sOspTWkKJjci781PWD7x0,27988
|
143
143
|
langfun/core/structured/schema_generation.py,sha256=3AcuKvv3VOtKY5zMVqODrxfOuDxzoZtGeBxHlOWDOWw,5308
|
144
144
|
langfun/core/structured/schema_generation_test.py,sha256=RM9s71kMNg2jTePwInkiW9fK1ACN37eyPeF8OII-0zw,2950
|
@@ -156,8 +156,8 @@ langfun/core/templates/demonstration.py,sha256=vCrgYubdZM5Umqcgp8NUVGXgr4P_c-fik
|
|
156
156
|
langfun/core/templates/demonstration_test.py,sha256=SafcDQ0WgI7pw05EmPI2S4v1t3ABKzup8jReCljHeK4,2162
|
157
157
|
langfun/core/templates/selfplay.py,sha256=yhgrJbiYwq47TgzThmHrDQTF4nDrTI09CWGhuQPNv-s,2273
|
158
158
|
langfun/core/templates/selfplay_test.py,sha256=Ot__1P1M8oJfoTp-M9-PQ6HUXqZKyMwvZ5f7yQ3yfyM,2326
|
159
|
-
langfun-0.1.2.
|
160
|
-
langfun-0.1.2.
|
161
|
-
langfun-0.1.2.
|
162
|
-
langfun-0.1.2.
|
163
|
-
langfun-0.1.2.
|
159
|
+
langfun-0.1.2.dev202504100804.dist-info/licenses/LICENSE,sha256=WNHhf_5RCaeuKWyq_K39vmp9F28LxKsB4SpomwSZ2L0,11357
|
160
|
+
langfun-0.1.2.dev202504100804.dist-info/METADATA,sha256=UAg98lo428dnsbhT-fVt4sgTG-Ek6CLtRGfIzTszK30,7692
|
161
|
+
langfun-0.1.2.dev202504100804.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
|
162
|
+
langfun-0.1.2.dev202504100804.dist-info/top_level.txt,sha256=RhlEkHxs1qtzmmtWSwYoLVJAc1YrbPtxQ52uh8Z9VvY,8
|
163
|
+
langfun-0.1.2.dev202504100804.dist-info/RECORD,,
|
File without changes
|
{langfun-0.1.2.dev202504080804.dist-info → langfun-0.1.2.dev202504100804.dist-info}/licenses/LICENSE
RENAMED
File without changes
|
{langfun-0.1.2.dev202504080804.dist-info → langfun-0.1.2.dev202504100804.dist-info}/top_level.txt
RENAMED
File without changes
|