langfun 0.1.2.dev202501170804__py3-none-any.whl → 0.1.2.dev202501180803__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -27,11 +27,15 @@ from langfun.core.llms.fake import StaticSequence
27
27
  # Compositional models.
28
28
  from langfun.core.llms.compositional import RandomChoice
29
29
 
30
- # REST-based models.
30
+ # Base models by request/response protocol.
31
31
  from langfun.core.llms.rest import REST
32
+ from langfun.core.llms.openai_compatible import OpenAICompatible
33
+ from langfun.core.llms.gemini import Gemini
34
+ from langfun.core.llms.anthropic import Anthropic
32
35
 
33
- # VertexAI-based models.
36
+ # Base models by serving platforms.
34
37
  from langfun.core.llms.vertexai import VertexAI
38
+ from langfun.core.llms.groq import Groq
35
39
 
36
40
  # Gemini models.
37
41
  from langfun.core.llms.google_genai import GenAI
@@ -60,9 +64,6 @@ from langfun.core.llms.vertexai import VertexAIGeminiFlash1_5_002
60
64
  from langfun.core.llms.vertexai import VertexAIGeminiFlash1_5_001
61
65
  from langfun.core.llms.vertexai import VertexAIGeminiPro1
62
66
 
63
- # Base for OpenAI-compatible models.
64
- from langfun.core.llms.openai_compatible import OpenAICompatible
65
-
66
67
  # OpenAI models.
67
68
  from langfun.core.llms.openai import OpenAI
68
69
 
@@ -119,7 +120,6 @@ from langfun.core.llms.openai import Gpt3Ada
119
120
 
120
121
  # Anthropic models.
121
122
 
122
- from langfun.core.llms.anthropic import Anthropic
123
123
  from langfun.core.llms.anthropic import Claude35Sonnet
124
124
  from langfun.core.llms.anthropic import Claude35Sonnet20241022
125
125
  from langfun.core.llms.anthropic import Claude35Sonnet20240620
@@ -135,7 +135,17 @@ from langfun.core.llms.vertexai import VertexAIClaude3_Opus_20240229
135
135
 
136
136
  # Misc open source models.
137
137
 
138
- from langfun.core.llms.groq import Groq
138
+ # Gemma models.
139
+ from langfun.core.llms.groq import GroqGemma2_9B_IT
140
+ from langfun.core.llms.groq import GroqGemma_7B_IT
141
+
142
+ # Llama models.
143
+ from langfun.core.llms.vertexai import VertexAILlama
144
+ from langfun.core.llms.vertexai import VertexAILlama3_2_90B
145
+ from langfun.core.llms.vertexai import VertexAILlama3_1_405B
146
+ from langfun.core.llms.vertexai import VertexAILlama3_1_70B
147
+ from langfun.core.llms.vertexai import VertexAILlama3_1_8B
148
+
139
149
  from langfun.core.llms.groq import GroqLlama3_2_3B
140
150
  from langfun.core.llms.groq import GroqLlama3_2_1B
141
151
  from langfun.core.llms.groq import GroqLlama3_1_70B
@@ -143,18 +153,28 @@ from langfun.core.llms.groq import GroqLlama3_1_8B
143
153
  from langfun.core.llms.groq import GroqLlama3_70B
144
154
  from langfun.core.llms.groq import GroqLlama3_8B
145
155
  from langfun.core.llms.groq import GroqLlama2_70B
156
+
157
+ # Mistral models.
158
+ from langfun.core.llms.vertexai import VertexAIMistral
159
+ from langfun.core.llms.vertexai import VertexAIMistralLarge_20241121
160
+ from langfun.core.llms.vertexai import VertexAIMistralLarge_20240724
161
+ from langfun.core.llms.vertexai import VertexAIMistralNemo_20240724
162
+ from langfun.core.llms.vertexai import VertexAICodestral_20250113
163
+ from langfun.core.llms.vertexai import VertexAICodestral_20240529
164
+
146
165
  from langfun.core.llms.groq import GroqMistral_8x7B
147
- from langfun.core.llms.groq import GroqGemma2_9B_IT
148
- from langfun.core.llms.groq import GroqGemma_7B_IT
166
+
167
+ # DeepSeek models.
168
+ from langfun.core.llms.deepseek import DeepSeek
169
+ from langfun.core.llms.deepseek import DeepSeekChat
170
+
171
+ # Whisper models.
149
172
  from langfun.core.llms.groq import GroqWhisper_Large_v3
150
173
  from langfun.core.llms.groq import GroqWhisper_Large_v3Turbo
151
174
 
152
175
  # LLaMA C++ models.
153
176
  from langfun.core.llms.llama_cpp import LlamaCppRemote
154
177
 
155
- # DeepSeek models.
156
- from langfun.core.llms.deepseek import DeepSeek
157
- from langfun.core.llms.deepseek import DeepSeekChat
158
178
 
159
179
  # Placeholder for Google-internal imports.
160
180
 
@@ -380,7 +380,7 @@ class Gemini(rest.REST):
380
380
  return (
381
381
  cost_per_1m_input_tokens * num_input_tokens
382
382
  + cost_per_1m_output_tokens * num_output_tokens
383
- ) / 1000_1000
383
+ ) / 1000_000
384
384
 
385
385
  @property
386
386
  def model_id(self) -> str:
@@ -20,6 +20,7 @@ from typing import Annotated, Any, Literal
20
20
  import langfun.core as lf
21
21
  from langfun.core.llms import anthropic
22
22
  from langfun.core.llms import gemini
23
+ from langfun.core.llms import openai_compatible
23
24
  from langfun.core.llms import rest
24
25
  import pyglove as pg
25
26
 
@@ -108,7 +109,7 @@ class VertexAI(rest.REST):
108
109
  credentials = self.credentials
109
110
  if credentials is None:
110
111
  # Use default credentials.
111
- credentials = google_auth.default(
112
+ credentials, _ = google_auth.default(
112
113
  scopes=['https://www.googleapis.com/auth/cloud-platform']
113
114
  )
114
115
  self._credentials = credentials
@@ -281,3 +282,280 @@ class VertexAIClaude3_5_Haiku_20241022(VertexAIAnthropic):
281
282
  model = 'claude-3-5-haiku@20241022'
282
283
 
283
284
  # pylint: enable=invalid-name
285
+
286
+ #
287
+ # Llama models on Vertex AI.
288
+ # pylint: disable=line-too-long
289
+ # Pricing: https://cloud.google.com/vertex-ai/generative-ai/pricing?_gl=1*ukuk6u*_ga*MjEzMjc4NjM2My4xNzMzODg4OTg3*_ga_WH2QY8WWF5*MTczNzEzNDU1Mi4xMjQuMS4xNzM3MTM0NzczLjU5LjAuMA..#meta-models
290
+ # pylint: enable=line-too-long
291
+
292
+ LLAMA_MODELS = {
293
+ 'llama-3.2-90b-vision-instruct-maas': pg.Dict(
294
+ latest_update='2024-09-25',
295
+ in_service=True,
296
+ rpm=0,
297
+ tpm=0,
298
+ # Free during preview.
299
+ cost_per_1m_input_tokens=None,
300
+ cost_per_1m_output_tokens=None,
301
+ ),
302
+ 'llama-3.1-405b-instruct-maas': pg.Dict(
303
+ latest_update='2024-09-25',
304
+ in_service=True,
305
+ rpm=0,
306
+ tpm=0,
307
+ # GA.
308
+ cost_per_1m_input_tokens=5,
309
+ cost_per_1m_output_tokens=16,
310
+ ),
311
+ 'llama-3.1-70b-instruct-maas': pg.Dict(
312
+ latest_update='2024-09-25',
313
+ in_service=True,
314
+ rpm=0,
315
+ tpm=0,
316
+ # Free during preview.
317
+ cost_per_1m_input_tokens=None,
318
+ cost_per_1m_output_tokens=None,
319
+ ),
320
+ 'llama-3.1-8b-instruct-maas': pg.Dict(
321
+ latest_update='2024-09-25',
322
+ in_service=True,
323
+ rpm=0,
324
+ tpm=0,
325
+ # Free during preview.
326
+ cost_per_1m_input_tokens=None,
327
+ cost_per_1m_output_tokens=None,
328
+ )
329
+ }
330
+
331
+
332
+ @pg.use_init_args(['model'])
333
+ @pg.members([('api_endpoint', pg.typing.Str().freeze(''))])
334
+ class VertexAILlama(VertexAI, openai_compatible.OpenAICompatible):
335
+ """Llama models on VertexAI."""
336
+
337
+ model: pg.typing.Annotated[
338
+ pg.typing.Enum(pg.MISSING_VALUE, list(LLAMA_MODELS.keys())),
339
+ 'Llama model ID.',
340
+ ]
341
+
342
+ locations: Annotated[
343
+ Literal['us-central1'],
344
+ (
345
+ 'GCP locations with Llama models hosted. '
346
+ 'See https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/llama#regions-quotas'
347
+ )
348
+ ] = 'us-central1'
349
+
350
+ @property
351
+ def api_endpoint(self) -> str:
352
+ assert self._api_initialized
353
+ return (
354
+ f'https://{self._location}-aiplatform.googleapis.com/v1beta1/projects/'
355
+ f'{self._project}/locations/{self._location}/endpoints/'
356
+ f'openapi/chat/completions'
357
+ )
358
+
359
+ def request(
360
+ self,
361
+ prompt: lf.Message,
362
+ sampling_options: lf.LMSamplingOptions
363
+ ):
364
+ request = super().request(prompt, sampling_options)
365
+ request['model'] = f'meta/{self.model}'
366
+ return request
367
+
368
+ @property
369
+ def max_concurrency(self) -> int:
370
+ rpm = LLAMA_MODELS[self.model].get('rpm', 0)
371
+ tpm = LLAMA_MODELS[self.model].get('tpm', 0)
372
+ return self.rate_to_max_concurrency(
373
+ requests_per_min=rpm, tokens_per_min=tpm
374
+ )
375
+
376
+ def estimate_cost(
377
+ self,
378
+ num_input_tokens: int,
379
+ num_output_tokens: int
380
+ ) -> float | None:
381
+ """Estimate the cost based on usage."""
382
+ cost_per_1m_input_tokens = LLAMA_MODELS[self.model].get(
383
+ 'cost_per_1m_input_tokens', None
384
+ )
385
+ cost_per_1m_output_tokens = LLAMA_MODELS[self.model].get(
386
+ 'cost_per_1m_output_tokens', None
387
+ )
388
+ if cost_per_1m_output_tokens is None or cost_per_1m_input_tokens is None:
389
+ return None
390
+ return (
391
+ cost_per_1m_input_tokens * num_input_tokens
392
+ + cost_per_1m_output_tokens * num_output_tokens
393
+ ) / 1000_000
394
+
395
+
396
+ # pylint: disable=invalid-name
397
+ class VertexAILlama3_2_90B(VertexAILlama):
398
+ """Llama 3.2 90B vision instruct model on VertexAI."""
399
+
400
+ model = 'llama-3.2-90b-vision-instruct-maas'
401
+
402
+
403
+ class VertexAILlama3_1_405B(VertexAILlama):
404
+ """Llama 3.1 405B vision instruct model on VertexAI."""
405
+
406
+ model = 'llama-3.1-405b-instruct-maas'
407
+
408
+
409
+ class VertexAILlama3_1_70B(VertexAILlama):
410
+ """Llama 3.1 70B vision instruct model on VertexAI."""
411
+
412
+ model = 'llama-3.1-70b-instruct-maas'
413
+
414
+
415
+ class VertexAILlama3_1_8B(VertexAILlama):
416
+ """Llama 3.1 8B vision instruct model on VertexAI."""
417
+
418
+ model = 'llama-3.1-8b-instruct-maas'
419
+ # pylint: enable=invalid-name
420
+
421
+ #
422
+ # Mistral models on Vertex AI.
423
+ # pylint: disable=line-too-long
424
+ # Pricing: https://cloud.google.com/vertex-ai/generative-ai/pricing?_gl=1*ukuk6u*_ga*MjEzMjc4NjM2My4xNzMzODg4OTg3*_ga_WH2QY8WWF5*MTczNzEzNDU1Mi4xMjQuMS4xNzM3MTM0NzczLjU5LjAuMA..#mistral-models
425
+ # pylint: enable=line-too-long
426
+
427
+
428
+ MISTRAL_MODELS = {
429
+ 'mistral-large-2411': pg.Dict(
430
+ latest_update='2024-11-21',
431
+ in_service=True,
432
+ rpm=0,
433
+ tpm=0,
434
+ # GA.
435
+ cost_per_1m_input_tokens=2,
436
+ cost_per_1m_output_tokens=6,
437
+ ),
438
+ 'mistral-large@2407': pg.Dict(
439
+ latest_update='2024-07-24',
440
+ in_service=True,
441
+ rpm=0,
442
+ tpm=0,
443
+ # GA.
444
+ cost_per_1m_input_tokens=2,
445
+ cost_per_1m_output_tokens=6,
446
+ ),
447
+ 'mistral-nemo@2407': pg.Dict(
448
+ latest_update='2024-07-24',
449
+ in_service=True,
450
+ rpm=0,
451
+ tpm=0,
452
+ # GA.
453
+ cost_per_1m_input_tokens=0.15,
454
+ cost_per_1m_output_tokens=0.15,
455
+ ),
456
+ 'codestral-2501': pg.Dict(
457
+ latest_update='2025-01-13',
458
+ in_service=True,
459
+ rpm=0,
460
+ tpm=0,
461
+ # GA.
462
+ cost_per_1m_input_tokens=0.3,
463
+ cost_per_1m_output_tokens=0.9,
464
+ ),
465
+ 'codestral@2405': pg.Dict(
466
+ latest_update='2024-05-29',
467
+ in_service=True,
468
+ rpm=0,
469
+ tpm=0,
470
+ # GA.
471
+ cost_per_1m_input_tokens=0.2,
472
+ cost_per_1m_output_tokens=0.6,
473
+ ),
474
+ }
475
+
476
+
477
+ @pg.use_init_args(['model'])
478
+ @pg.members([('api_endpoint', pg.typing.Str().freeze(''))])
479
+ class VertexAIMistral(VertexAI, openai_compatible.OpenAICompatible):
480
+ """Mistral AI models on VertexAI."""
481
+
482
+ model: pg.typing.Annotated[
483
+ pg.typing.Enum(pg.MISSING_VALUE, list(MISTRAL_MODELS.keys())),
484
+ 'Mistral model ID.',
485
+ ]
486
+
487
+ locations: Annotated[
488
+ Literal['us-central1', 'europe-west4'],
489
+ (
490
+ 'GCP locations with Mistral models hosted. '
491
+ 'See https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/mistral#regions-quotas'
492
+ )
493
+ ] = 'us-central1'
494
+
495
+ @property
496
+ def api_endpoint(self) -> str:
497
+ assert self._api_initialized
498
+ return (
499
+ f'https://{self._location}-aiplatform.googleapis.com/v1/projects/'
500
+ f'{self._project}/locations/{self._location}/publishers/mistralai/'
501
+ f'models/{self.model}:rawPredict'
502
+ )
503
+
504
+ @property
505
+ def max_concurrency(self) -> int:
506
+ rpm = MISTRAL_MODELS[self.model].get('rpm', 0)
507
+ tpm = MISTRAL_MODELS[self.model].get('tpm', 0)
508
+ return self.rate_to_max_concurrency(
509
+ requests_per_min=rpm, tokens_per_min=tpm
510
+ )
511
+
512
+ def estimate_cost(
513
+ self,
514
+ num_input_tokens: int,
515
+ num_output_tokens: int
516
+ ) -> float | None:
517
+ """Estimate the cost based on usage."""
518
+ cost_per_1m_input_tokens = MISTRAL_MODELS[self.model].get(
519
+ 'cost_per_1m_input_tokens', None
520
+ )
521
+ cost_per_1m_output_tokens = MISTRAL_MODELS[self.model].get(
522
+ 'cost_per_1m_output_tokens', None
523
+ )
524
+ if cost_per_1m_output_tokens is None or cost_per_1m_input_tokens is None:
525
+ return None
526
+ return (
527
+ cost_per_1m_input_tokens * num_input_tokens
528
+ + cost_per_1m_output_tokens * num_output_tokens
529
+ ) / 1000_000
530
+
531
+
532
+ # pylint: disable=invalid-name
533
+ class VertexAIMistralLarge_20241121(VertexAIMistral):
534
+ """Mistral Large model on VertexAI released on 2024/11/21."""
535
+
536
+ model = 'mistral-large-2411'
537
+
538
+
539
+ class VertexAIMistralLarge_20240724(VertexAIMistral):
540
+ """Mistral Large model on VertexAI released on 2024/07/24."""
541
+
542
+ model = 'mistral-large@2407'
543
+
544
+
545
+ class VertexAIMistralNemo_20240724(VertexAIMistral):
546
+ """Mistral Nemo model on VertexAI released on 2024/07/24."""
547
+
548
+ model = 'mistral-nemo@2407'
549
+
550
+
551
+ class VertexAICodestral_20250113(VertexAIMistral):
552
+ """Mistral Nemo model on VertexAI released on 2024/07/24."""
553
+
554
+ model = 'codestral-2501'
555
+
556
+
557
+ class VertexAICodestral_20240529(VertexAIMistral):
558
+ """Mistral Nemo model on VertexAI released on 2024/05/29."""
559
+
560
+ model = 'codestral@2405'
561
+ # pylint: enable=invalid-name
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: langfun
3
- Version: 0.1.2.dev202501170804
3
+ Version: 0.1.2.dev202501180803
4
4
  Summary: Langfun: Language as Functions.
5
5
  Home-page: https://github.com/google/langfun
6
6
  Author: Langfun Authors
@@ -71,7 +71,7 @@ langfun/core/eval/v2/reporting.py,sha256=QOp5jX761Esvi5w_UIRLDqPY_XRO6ru02-DOrdq
71
71
  langfun/core/eval/v2/reporting_test.py,sha256=UmYSAQvD3AIXsSyWQ-WD2uLtEISYpmBeoKY5u5Qwc8E,5696
72
72
  langfun/core/eval/v2/runners.py,sha256=DKEmSlGXjOXKWFdBhTpLy7tMsBHZHd1Brl3hWIngsSQ,15931
73
73
  langfun/core/eval/v2/runners_test.py,sha256=A37fKK2MvAVTiShsg_laluJzJ9AuAQn52k7HPbfD0Ks,11666
74
- langfun/core/llms/__init__.py,sha256=wA6t_E3peTYTjsW6uOHnOs9wjQ_Tj1WYlhVVLk2Sjcg,6867
74
+ langfun/core/llms/__init__.py,sha256=50mJagAgkIhMwhOyHxGq_O5st4HhpnE-okeYzc7GU6c,7667
75
75
  langfun/core/llms/anthropic.py,sha256=z_DWDpR1VKNzv6wq-9CXLzWdqCDXRKuVFacJNpgBqAs,10826
76
76
  langfun/core/llms/anthropic_test.py,sha256=zZ2eSP8hhVv-RDSWxT7wX-NS5DfGfQmCjS9P0pusAHM,6556
77
77
  langfun/core/llms/compositional.py,sha256=csW_FLlgL-tpeyCOTVvfUQkMa_zCN5Y2I-YbSNuK27U,2872
@@ -80,7 +80,7 @@ langfun/core/llms/deepseek.py,sha256=Y7DlLUWrukbPVyBMesppd-m75Q-PxD0b3KnMKaoY_8I
80
80
  langfun/core/llms/deepseek_test.py,sha256=dS72i52bwMpCN4dJDvpJI59AnNChpwxS5eYYFrhGh90,1843
81
81
  langfun/core/llms/fake.py,sha256=gCHBYBLvBCsC78HI1hpoqXCS-p1FMTgY1P1qh_sGBPk,3070
82
82
  langfun/core/llms/fake_test.py,sha256=2h13qkwEz_JR0mtUDPxdAhQo7MueXaFSwsD2DIRDW9g,7653
83
- langfun/core/llms/gemini.py,sha256=tfM4vrt0WnvnrxRhWXZWh7Gp8dYYfMnSbi9uOstkSak,17399
83
+ langfun/core/llms/gemini.py,sha256=itwTCmQHRjwSjt7_UzFfaat23gyRL-El4qmJrg-OGVA,17398
84
84
  langfun/core/llms/gemini_test.py,sha256=2ERhYWCJwnfDTQbCaZHFuB1TdWJFrOBS7yyCBInIdQk,6129
85
85
  langfun/core/llms/google_genai.py,sha256=85Vmx5QmsziON03PRsFQINSu5NF6pAAuFFhUdDteWGc,3662
86
86
  langfun/core/llms/google_genai_test.py,sha256=JZf_cbQ4GGGpwiQCLjFJn7V4jxBBqgZhIx91AzbGKVo,1250
@@ -94,7 +94,7 @@ langfun/core/llms/openai_compatible_test.py,sha256=0uFYhCiuHo2Wrlgj16-GRG6rW8P6E
94
94
  langfun/core/llms/openai_test.py,sha256=m85YjGCvWvV5ZYagjC0FqI0FcqyCEVCbUUs8Wm3iUrc,2475
95
95
  langfun/core/llms/rest.py,sha256=sWbYUV8S3SuOg9giq7xwD-xDRfaF7NP_ig7bI52-Rj4,3442
96
96
  langfun/core/llms/rest_test.py,sha256=zWGiI08f9gXsoQPJS9TlX1zD2uQLrJUB-1VpAJXRHfs,3475
97
- langfun/core/llms/vertexai.py,sha256=JV9iHsCM3Ee-4nE1ENNkTXIYGxjCHxrEeir175YpCM8,7869
97
+ langfun/core/llms/vertexai.py,sha256=SVvLTqQZ6Ha8wZh3azkh4g3O838CpNkuP3XlgIrLMKo,15751
98
98
  langfun/core/llms/vertexai_test.py,sha256=6eLQOyeL5iGZOIWb39sFcf1TgYD_6TBGYdMO4UIvhf4,3333
99
99
  langfun/core/llms/cache/__init__.py,sha256=QAo3InUMDM_YpteNnVCSejI4zOsnjSMWKJKzkb3VY64,993
100
100
  langfun/core/llms/cache/base.py,sha256=rt3zwmyw0y9jsSGW-ZbV1vAfLxQ7_3AVk0l2EySlse4,3918
@@ -146,8 +146,8 @@ langfun/core/templates/demonstration.py,sha256=vCrgYubdZM5Umqcgp8NUVGXgr4P_c-fik
146
146
  langfun/core/templates/demonstration_test.py,sha256=SafcDQ0WgI7pw05EmPI2S4v1t3ABKzup8jReCljHeK4,2162
147
147
  langfun/core/templates/selfplay.py,sha256=yhgrJbiYwq47TgzThmHrDQTF4nDrTI09CWGhuQPNv-s,2273
148
148
  langfun/core/templates/selfplay_test.py,sha256=Ot__1P1M8oJfoTp-M9-PQ6HUXqZKyMwvZ5f7yQ3yfyM,2326
149
- langfun-0.1.2.dev202501170804.dist-info/LICENSE,sha256=WNHhf_5RCaeuKWyq_K39vmp9F28LxKsB4SpomwSZ2L0,11357
150
- langfun-0.1.2.dev202501170804.dist-info/METADATA,sha256=X3MDNl6D6StuwltvUclYhE20uKNQ2x8lY3CkPggJyI4,8172
151
- langfun-0.1.2.dev202501170804.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
152
- langfun-0.1.2.dev202501170804.dist-info/top_level.txt,sha256=RhlEkHxs1qtzmmtWSwYoLVJAc1YrbPtxQ52uh8Z9VvY,8
153
- langfun-0.1.2.dev202501170804.dist-info/RECORD,,
149
+ langfun-0.1.2.dev202501180803.dist-info/LICENSE,sha256=WNHhf_5RCaeuKWyq_K39vmp9F28LxKsB4SpomwSZ2L0,11357
150
+ langfun-0.1.2.dev202501180803.dist-info/METADATA,sha256=W9jkpCCOZx-Tl8sNz3y1IdVZNG48qcjs21airG2TTI0,8172
151
+ langfun-0.1.2.dev202501180803.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
152
+ langfun-0.1.2.dev202501180803.dist-info/top_level.txt,sha256=RhlEkHxs1qtzmmtWSwYoLVJAc1YrbPtxQ52uh8Z9VvY,8
153
+ langfun-0.1.2.dev202501180803.dist-info/RECORD,,