lalamo 0.5.7__py3-none-any.whl → 0.5.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
lalamo/__init__.py CHANGED
@@ -8,24 +8,24 @@ from lalamo.message_processor import (
8
8
  ToolSchema,
9
9
  UserMessage,
10
10
  )
11
- from lalamo.model_import import ModelSpec
12
- from lalamo.models import LanguageModel, Router
11
+ from lalamo.model_import import ModelSpec, import_model
12
+ from lalamo.models import ClassifierModel, LanguageModel
13
13
  from lalamo.speculator import (
14
14
  CollectTracesEvent,
15
15
  SpeculatorTrainingEvent,
16
16
  )
17
17
 
18
- __version__ = "0.5.7"
18
+ __version__ = "0.5.8"
19
19
 
20
20
  __all__ = [
21
21
  "AssistantMessage",
22
+ "ClassifierModel",
22
23
  "CollectTracesEvent",
23
24
  "ContentBlock",
24
25
  "Image",
25
26
  "LanguageModel",
26
27
  "Message",
27
28
  "ModelSpec",
28
- "Router",
29
29
  "SpeculatorTrainingEvent",
30
30
  "SystemMessage",
31
31
  "ToolSchema",
@@ -33,5 +33,6 @@ __all__ = [
33
33
  "collect_traces",
34
34
  "convert",
35
35
  "estimate_batchsize",
36
+ "import_model",
36
37
  "train",
37
38
  ]
lalamo/main.py CHANGED
@@ -43,7 +43,7 @@ from lalamo.model_import.common import (
43
43
  InitializingModelEvent,
44
44
  StatusEvent,
45
45
  )
46
- from lalamo.models import LanguageModelConfig, RouterConfig
46
+ from lalamo.models import ClassifierModelConfig, LanguageModelConfig
47
47
  from lalamo.modules import config_converter
48
48
  from lalamo.speculator.estimator import EstimateBatchsizeFromMemoryEvent, estimate_batchsize_from_memory
49
49
  from lalamo.speculator.inference import CollectTracesEvent, inference_collect_traces
@@ -149,7 +149,7 @@ def chat(
149
149
  messages.append(model.message_processor.parse_response(model_response_text))
150
150
 
151
151
 
152
- @app.command(help="Classify given message with a Router type of model.")
152
+ @app.command(help="Classify given message with a Classifier type of model.")
153
153
  def classify(
154
154
  model_path: Annotated[
155
155
  Path,
@@ -165,7 +165,7 @@ def classify(
165
165
  transient=True,
166
166
  ) as progress:
167
167
  loading_task = progress.add_task("🚀 [cyan]Loading model...[/cyan]")
168
- model = RouterConfig.load_model(model_path)
168
+ model = ClassifierModelConfig.load_model(model_path)
169
169
  progress.remove_task(loading_task)
170
170
  warmup_task = progress.add_task("🔥 Warming up...")
171
171
  model.classify_chat([UserMessage(content="warmup message")])
@@ -14,7 +14,7 @@ from jaxtyping import DTypeLike
14
14
  from tokenizers import Tokenizer
15
15
 
16
16
  from lalamo.message_processor import MessageProcessor, MessageProcessorConfig
17
- from lalamo.models import GenerationConfig, LanguageModel, LanguageModelConfig, Router, RouterConfig
17
+ from lalamo.models import ClassifierModel, ClassifierModelConfig, GenerationConfig, LanguageModel, LanguageModelConfig
18
18
  from lalamo.modules import Classifier, Decoder, LalamoModule
19
19
  from lalamo.quantization import QuantizationMode
20
20
 
@@ -72,7 +72,8 @@ class ModelMetadata:
72
72
  repo: str
73
73
  use_cases: tuple[UseCase, ...]
74
74
  model_type: ModelType
75
- model_config: LanguageModelConfig | RouterConfig
75
+ model_config: LanguageModelConfig | ClassifierModelConfig
76
+ grammar_start_tokens: tuple[str, ...]
76
77
 
77
78
 
78
79
  def download_file(
@@ -118,7 +119,7 @@ def download_config_file(
118
119
 
119
120
 
120
121
  class ImportResults(NamedTuple):
121
- model: LanguageModel | Router
122
+ model: LanguageModel | ClassifierModel
122
123
  metadata: ModelMetadata
123
124
 
124
125
 
@@ -145,6 +146,8 @@ def import_message_processor(
145
146
  case FileSpec(_) as file_spec:
146
147
  chat_template_file = download_file(file_spec, model_spec.repo, output_dir)
147
148
  prompt_template = chat_template_file.read_text()
149
+ case str() as template_string:
150
+ prompt_template = template_string
148
151
  case None:
149
152
  raise ValueError("No chat template specified.")
150
153
  else:
@@ -263,14 +266,14 @@ def _import_language_model(
263
266
  return language_model, language_model_config
264
267
 
265
268
 
266
- def _import_router(
269
+ def _import_classifier(
267
270
  model_spec: ModelSpec,
268
271
  *,
269
272
  context_length: int | None = None,
270
273
  precision: DTypeLike | None = None,
271
274
  accumulation_precision: DTypeLike = jnp.float32,
272
275
  progress_callback: Callable[[StatusEvent], None] | None = None,
273
- ) -> tuple[Router, RouterConfig]:
276
+ ) -> tuple[ClassifierModel, ClassifierModelConfig]:
274
277
  foreign_classifier_config_file = download_config_file(model_spec)
275
278
  foreign_classifier_config = model_spec.config_type.from_json(foreign_classifier_config_file)
276
279
  assert isinstance(foreign_classifier_config, ForeignClassifierConfig)
@@ -293,12 +296,12 @@ def _import_router(
293
296
 
294
297
  message_processor = import_message_processor(model_spec)
295
298
 
296
- router_config = RouterConfig(
299
+ classifier_model_config = ClassifierModelConfig(
297
300
  model_config=classifier.config,
298
301
  message_processor_config=message_processor.config,
299
302
  )
300
- router_model = Router(router_config, classifier, message_processor)
301
- return router_model, router_config
303
+ classifier_model = ClassifierModel(classifier_model_config, classifier, message_processor)
304
+ return classifier_model, classifier_model_config
302
305
 
303
306
 
304
307
  def import_model(
@@ -324,8 +327,8 @@ def import_model(
324
327
  accumulation_precision=accumulation_precision,
325
328
  progress_callback=progress_callback,
326
329
  )
327
- case ModelType.ROUTER_MODEL:
328
- model, config = _import_router(
330
+ case ModelType.CLASSIFIER_MODEL:
331
+ model, config = _import_classifier(
329
332
  model_spec,
330
333
  context_length=context_length,
331
334
  precision=precision,
@@ -344,5 +347,6 @@ def import_model(
344
347
  use_cases=model_spec.use_cases,
345
348
  model_type=model_spec.model_type,
346
349
  model_config=config,
350
+ grammar_start_tokens=model_spec.grammar_start_tokens,
347
351
  )
348
352
  return ImportResults(model, metadata)
@@ -5,7 +5,7 @@ from .gpt_oss import GPT_OSS_MODELS
5
5
  from .huggingface import HUGGINGFACE_MODELS
6
6
  from .llama import LLAMA_MODELS
7
7
  from .llamba import LLAMBA_MODELS
8
- from .mirai import MIRAI_ROUTER_MODELS
8
+ from .mirai import MIRAI_CLASSIFIER_MODELS
9
9
  from .mistral import MISTRAL_MODELS
10
10
 
11
11
  # from .pleias import PLEIAS_MODELS
@@ -35,7 +35,7 @@ ALL_MODEL_LISTS = [
35
35
  POLARIS_MODELS,
36
36
  QWEN_MODELS,
37
37
  REKA_MODELS,
38
- MIRAI_ROUTER_MODELS,
38
+ MIRAI_CLASSIFIER_MODELS,
39
39
  ]
40
40
 
41
41
  ALL_MODELS = [model for model_list in ALL_MODEL_LISTS for model in model_list]
@@ -32,7 +32,7 @@ __all__ = [
32
32
 
33
33
  class ModelType(StrEnum):
34
34
  LANGUAGE_MODEL = "language_model"
35
- ROUTER_MODEL = "router_model"
35
+ CLASSIFIER_MODEL = "classifier_model"
36
36
 
37
37
 
38
38
  def cast_if_float(array: Array, cast_to: DTypeLike) -> Array:
@@ -84,7 +84,7 @@ class ConfigMap:
84
84
  tokenizer: FileSpec = field(default=FileSpec("tokenizer.json"))
85
85
  tokenizer_config: FileSpec = field(default=FileSpec("tokenizer_config.json"))
86
86
  generation_config: FileSpec | None = field(default=FileSpec("generation_config.json"))
87
- chat_template: FileSpec | JSONFieldSpec | None = None
87
+ chat_template: FileSpec | JSONFieldSpec | str | None = None
88
88
 
89
89
 
90
90
  def _is_foreign_config_type(t: object) -> bool:
@@ -114,12 +114,29 @@ def _unstructure_foreign_config_factory(t: object, c: cattrs.Converter) -> Calla
114
114
  return _hook
115
115
 
116
116
 
117
+ def _structure_chat_template(value: object, _type: object) -> FileSpec | JSONFieldSpec | str | None:
118
+ if value is None:
119
+ return None
120
+ if isinstance(value, str):
121
+ return value
122
+ if isinstance(value, dict):
123
+ if "file_spec" in value and "field_name" in value:
124
+ return JSONFieldSpec(
125
+ file_spec=FileSpec(**value["file_spec"]),
126
+ field_name=value["field_name"],
127
+ )
128
+ if "filename" in value:
129
+ return FileSpec(**value)
130
+ raise ValueError(f"Invalid chat_template value: {value}")
131
+
132
+
117
133
  @dataclass(frozen=True)
118
134
  class ModelSpec:
119
135
  _converter: ClassVar[cattrs.Converter] = cattrs.Converter()
120
136
 
121
137
  _converter.register_structure_hook_factory(_is_foreign_config_type, _structure_foreign_config_factory)
122
138
  _converter.register_unstructure_hook_factory(_is_foreign_config_type, _unstructure_foreign_config_factory)
139
+ _converter.register_structure_hook(FileSpec | JSONFieldSpec | str | None, _structure_chat_template)
123
140
 
124
141
  vendor: str
125
142
  family: str
@@ -137,6 +154,7 @@ class ModelSpec:
137
154
  model_type: ModelType = ModelType.LANGUAGE_MODEL
138
155
  configs: ConfigMap = field(default=ConfigMap())
139
156
  use_cases: tuple[UseCase, ...] = tuple()
157
+ grammar_start_tokens: tuple[str, ...] = tuple()
140
158
 
141
159
  @classmethod
142
160
  def from_json(cls, json_data: dict) -> "ModelSpec":
@@ -162,6 +180,7 @@ def awq_model_spec(
162
180
  configs=model_spec.configs,
163
181
  weights_type=model_spec.weights_type,
164
182
  use_cases=model_spec.use_cases,
183
+ grammar_start_tokens=model_spec.grammar_start_tokens,
165
184
  )
166
185
 
167
186
 
@@ -2,9 +2,9 @@ from lalamo.model_import.decoder_configs.huggingface import ModernBERTConfig
2
2
 
3
3
  from .common import ConfigMap, FileSpec, ModelSpec, ModelType
4
4
 
5
- __all__ = ["MIRAI_ROUTER_MODELS"]
5
+ __all__ = ["MIRAI_CLASSIFIER_MODELS"]
6
6
 
7
- MIRAI_ROUTER_MODELS = [
7
+ MIRAI_CLASSIFIER_MODELS = [
8
8
  ModelSpec(
9
9
  vendor="trymirai",
10
10
  family="ModernBERT",
@@ -14,7 +14,7 @@ MIRAI_ROUTER_MODELS = [
14
14
  repo="trymirai/chat-moderation-router",
15
15
  config_type=ModernBERTConfig,
16
16
  use_cases=tuple(),
17
- model_type=ModelType("router_model"),
17
+ model_type=ModelType("classifier_model"),
18
18
  configs=ConfigMap(chat_template=FileSpec("chat_template.jinja")),
19
19
  ),
20
20
  ]
lalamo/models/__init__.py CHANGED
@@ -1,10 +1,10 @@
1
+ from .classifier import ClassifierModel, ClassifierModelConfig
1
2
  from .language_model import GenerationConfig, LanguageModel, LanguageModelConfig
2
- from .router import Router, RouterConfig
3
3
 
4
4
  __all__ = [
5
+ "ClassifierModel",
6
+ "ClassifierModelConfig",
5
7
  "GenerationConfig",
6
8
  "LanguageModel",
7
9
  "LanguageModelConfig",
8
- "Router",
9
- "RouterConfig",
10
10
  ]
@@ -13,29 +13,29 @@ from lalamo.modules import Classifier, ClassifierConfig, LalamoModule
13
13
  from .common import TextModel, TextModelConfig
14
14
 
15
15
  __all__ = [
16
- "Router",
17
- "RouterConfig",
16
+ "ClassifierModel",
17
+ "ClassifierModelConfig",
18
18
  ]
19
19
 
20
20
 
21
21
  @dataclass(frozen=True)
22
- class RouterConfig(TextModelConfig[ClassifierConfig]):
22
+ class ClassifierModelConfig(TextModelConfig[ClassifierConfig]):
23
23
  def init(
24
24
  self,
25
25
  model: LalamoModule,
26
26
  message_processor: MessageProcessor,
27
- ) -> "Router":
27
+ ) -> "ClassifierModel":
28
28
  assert isinstance(model, Classifier)
29
- return Router(self, model, message_processor)
29
+ return ClassifierModel(self, model, message_processor)
30
30
 
31
31
  @classmethod
32
- def load_model(cls, path: Path | str) -> "Router":
32
+ def load_model(cls, path: Path | str) -> "ClassifierModel":
33
33
  result = super().load_model(path)
34
- assert isinstance(result, Router)
34
+ assert isinstance(result, ClassifierModel)
35
35
  return result
36
36
 
37
37
 
38
- class Router(TextModel[RouterConfig, Classifier]):
38
+ class ClassifierModel(TextModel[ClassifierModelConfig, Classifier]):
39
39
  def label_output_logits(self, logits: Float[Array, "batch logits"]) -> dict[str, Float[Array, " batch"]]:
40
40
  output_labels = self.model.config.output_labels
41
41
  probabilities = jax.nn.sigmoid(logits)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lalamo
3
- Version: 0.5.7
3
+ Version: 0.5.8
4
4
  Summary: JAX library for optimization and export of models for use with the UZU inference engine.
5
5
  Requires-Python: <4,>=3.12
6
6
  Description-Content-Type: text/markdown
@@ -1,6 +1,6 @@
1
- lalamo/__init__.py,sha256=ynqoSW4I6eQ92AR9jzr_8XMEiGpDGjMHHGUrjlTkmb0,762
1
+ lalamo/__init__.py,sha256=ZJ5Cjq4OoGVrjba9zUYIYnFGRKZkCkhBLaakdt4D008,814
2
2
  lalamo/common.py,sha256=5NUFD26yQgOnEEk3LaQnce8n-VwJxILkEpFesHZhtQU,3820
3
- lalamo/main.py,sha256=Lqv-lU6hRSqbJeVOheZoKllK1LaPHTuR_8jNTPC7UZg,23956
3
+ lalamo/main.py,sha256=GgUT7lT48-XQuAEH7qzsDKG8Lx9iBf-sYBIRhZL9q7E,23978
4
4
  lalamo/message_processor.py,sha256=bSUAQg7CemLTnBV4LtPxJBicAalruDCA-JXjkTYPZ8U,5797
5
5
  lalamo/quantization.py,sha256=8o6ryIZLzzDYQuvBTboPfaVVdfijAKGpTxOcg3GKVD8,2752
6
6
  lalamo/registry_abc.py,sha256=ENjXiD_wEH100fNjG-W5Em1L_EQ0Lf0pdRhRGvf3qZk,2197
@@ -11,7 +11,7 @@ lalamo/data/huggingface_message.py,sha256=-7lN9eIcETQzt1Pnx3d4d8p3_I7WYMNf4mp1P9
11
11
  lalamo/data/lalamo_completions.py,sha256=U_m3UNSJASUFz3rJq_taZOtL_U4B8Oj-ndkTF-JH-v4,1509
12
12
  lalamo/data/utils.py,sha256=B96gLaULyStKYuR8wjFdTpFc6YIDC8EEvGh1eiMe_Ec,338
13
13
  lalamo/model_import/__init__.py,sha256=Z8pS9rbKKx1QgUy7KZtHxiNWlZhII3mdovT9d37vAxg,168
14
- lalamo/model_import/common.py,sha256=dS8HbDCSReaffJ0WWdVpYjk-VGv_Ga8C3GN7z6bdxlY,11992
14
+ lalamo/model_import/common.py,sha256=tdZsteRsxL6DVUFwHw_1eeNLckflOdAaIm7Wm9eJzxM,12311
15
15
  lalamo/model_import/huggingface_generation_config.py,sha256=mot6VQ6ezCtEhN6VjhnvaU-nR5P5T2BuBUgpFNnWJxU,1495
16
16
  lalamo/model_import/huggingface_tokenizer_config.py,sha256=xvwdmio7b9nhn2H3uMBVligiYj58JaCFCvHY3-8dBvM,2502
17
17
  lalamo/model_import/decoder_configs/__init__.py,sha256=1ZqMcEHvCJjMIZ9iNyY31XMXOaFxB-NbqIU01BtmcEk,641
@@ -33,24 +33,24 @@ lalamo/model_import/loaders/common.py,sha256=kkugV-bMQlN1zvGHoj3uc7z0FbXKoMtXEBT
33
33
  lalamo/model_import/loaders/executorch.py,sha256=t2Ey_mBMNC8bTSTdYWjuGXdPTRoohFlYrqtWyNkBU_8,9219
34
34
  lalamo/model_import/loaders/huggingface.py,sha256=ITA0Y_kCDFL4Tanuvd1NWUvV77WEn0VEzkcX5Whlwys,29835
35
35
  lalamo/model_import/loaders/utils.py,sha256=eiX3WKFRrAfBY-dugodscNInl5o5w3KmVcgma4atpGY,2456
36
- lalamo/model_import/model_specs/__init__.py,sha256=STy1Au_mwJYcl_zSei6FEp8mmGZg-Fia1tpUVAO9x9Y,1167
37
- lalamo/model_import/model_specs/common.py,sha256=2E6mtpUS613jsp_SDiROOvvWFB6T5U2tDkbIBXVWjmI,5780
36
+ lalamo/model_import/model_specs/__init__.py,sha256=V7S5Uo3GVBUG7KD0czMtmWZcQ-FJgryTZlxC7Abn_c0,1175
37
+ lalamo/model_import/model_specs/common.py,sha256=RVPlNWHG_5OvU1W3YcOpqYz59Dh8plDmd7z1xNrqmaY,6585
38
38
  lalamo/model_import/model_specs/deepseek.py,sha256=Umef93_ZBuq93yYsejIRNwj3udoln1gHfrv3SK5jyMo,417
39
39
  lalamo/model_import/model_specs/gemma.py,sha256=irWgylL-pc7y3Gn5DK3fjKoCT9kJWH3B7mTa-1Gmxqc,1306
40
40
  lalamo/model_import/model_specs/gpt_oss.py,sha256=PLo0QGrXKdX61ReTRdyOaP_EH3Dmj5lp3fpJjZRwRVA,542
41
41
  lalamo/model_import/model_specs/huggingface.py,sha256=eF8ItF5reFrFkjYxwiAJcFwUAlN6CpXfM-aQ8a92ItM,430
42
42
  lalamo/model_import/model_specs/llama.py,sha256=Ml-xvRGlXBT9NJhmEpwgNo6C84oBSMYgA1_PrCYGcAw,990
43
43
  lalamo/model_import/model_specs/llamba.py,sha256=Ic3sWTv34FLJ4fG6OR_Mc5goGJQR6fa5b2WbVXbn9FA,1471
44
- lalamo/model_import/model_specs/mirai.py,sha256=20sLTZZQ_kn8ULfID71xdnVhjW7OjcJDf96BvVnCBUQ,605
44
+ lalamo/model_import/model_specs/mirai.py,sha256=eifYVV5-fABiLH6rr82_DiVFtDyqpW0vbvXCYsQQzto,617
45
45
  lalamo/model_import/model_specs/mistral.py,sha256=HAojorjOqsJn2DoMBzYRw8A70qCslhFEsE9AF5xumlg,1278
46
46
  lalamo/model_import/model_specs/pleias.py,sha256=5sRpZGYwLdsav6bLiW-459y1Cs9iJKgKkBIuGsOxtsQ,368
47
47
  lalamo/model_import/model_specs/polaris.py,sha256=Mw1-6bByjDmPIKlIUIV46CsmV5xUp_laI5Qquo5DmAQ,520
48
48
  lalamo/model_import/model_specs/qwen.py,sha256=qzLmTveATmnwNFQSFJlffcXw7syFnrCmKf9ggkkkw1Y,7050
49
49
  lalamo/model_import/model_specs/reka.py,sha256=dOUYbEMMvovQdzQuBO_DCsjGI39syhoKCvnxLkNEDCw,423
50
- lalamo/models/__init__.py,sha256=HfAYc4mteKu7BMyIP8aTFliLKGWJzLk1kPcdKurb8eo,243
50
+ lalamo/models/__init__.py,sha256=Vn5PcvSqKppIchkSZwQVTn_GpRvOOzZVxo5PUeDl6N8,283
51
+ lalamo/models/classifier.py,sha256=LvL54crCVi4HVSIXuoaSLB_5jtcx74GL7kgdy2Y16Zc,2094
51
52
  lalamo/models/common.py,sha256=PDteofGxjSBWYw_mPxbN1DTUba70aOURrAIjl13SSHc,2954
52
53
  lalamo/models/language_model.py,sha256=QPeVEyhutSze7fSNhvOvwSoYt24QMk-dtTJkos38amY,13465
53
- lalamo/models/router.py,sha256=7KZqHVhr2TA7Qh76KfwrvyfztfZnV-P-Ss11O8dzbRg,2013
54
54
  lalamo/modules/__init__.py,sha256=xWJ4OPAF4gKd0evYwXIK5kTnbH6nI55oLAePcoDDHQ0,3730
55
55
  lalamo/modules/activations.py,sha256=U3qTQtZawPAUcoqbkIJnmTYcaNiQuSPMLcBeJ398GhI,1022
56
56
  lalamo/modules/classifier.py,sha256=_jtJ3INEq1dJP5HpUmcDk9YYzpRYlQ04zvFGaWBV6Lg,12101
@@ -80,9 +80,9 @@ lalamo/speculator/estimator.py,sha256=4D8dPZCWsrpORb7y8pQ6VsiIg1Cblvvxe6gXCoYtcD
80
80
  lalamo/speculator/inference.py,sha256=5GntUgj0HQLeLn3HIHnVX8EEO0EBzmKeP5-_U7kdFAM,3670
81
81
  lalamo/speculator/ngram.py,sha256=95mdfAWhx4d5XOnOwhyhElnvcy6nlUjYhcbJzqDs414,5875
82
82
  lalamo/speculator/utils.py,sha256=0wZoMMIzzk0Q-3zq5H5f-JBplePNHxywndkrNtOJOyo,1697
83
- lalamo-0.5.7.dist-info/licenses/LICENSE,sha256=diHRfjSEJHD1nnEeMIfMRCjR3UERf8bT3eseD6b1ayA,1072
84
- lalamo-0.5.7.dist-info/METADATA,sha256=ZzSQiR7KYoAXkBKOToa2bi9tCPRvVXvhnN5y6AO7wyQ,3146
85
- lalamo-0.5.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
86
- lalamo-0.5.7.dist-info/entry_points.txt,sha256=qli7qTfnBk5WP10rOGXXEckHMtt-atJMDWd8jN89Uks,43
87
- lalamo-0.5.7.dist-info/top_level.txt,sha256=VHvWL5JN5XRG36NsN_MieJ7EwRihEOrEjyDaTdFJ-aI,7
88
- lalamo-0.5.7.dist-info/RECORD,,
83
+ lalamo-0.5.8.dist-info/licenses/LICENSE,sha256=diHRfjSEJHD1nnEeMIfMRCjR3UERf8bT3eseD6b1ayA,1072
84
+ lalamo-0.5.8.dist-info/METADATA,sha256=miYVR0hj7X-d1X09Bwaqf9-zKUqmljZ2qrhkV1rLICQ,3146
85
+ lalamo-0.5.8.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
86
+ lalamo-0.5.8.dist-info/entry_points.txt,sha256=qli7qTfnBk5WP10rOGXXEckHMtt-atJMDWd8jN89Uks,43
87
+ lalamo-0.5.8.dist-info/top_level.txt,sha256=VHvWL5JN5XRG36NsN_MieJ7EwRihEOrEjyDaTdFJ-aI,7
88
+ lalamo-0.5.8.dist-info/RECORD,,
File without changes