lalamo 0.5.5__py3-none-any.whl → 0.5.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
lalamo/__init__.py CHANGED
@@ -15,7 +15,7 @@ from lalamo.speculator import (
15
15
  SpeculatorTrainingEvent,
16
16
  )
17
17
 
18
- __version__ = "0.5.5"
18
+ __version__ = "0.5.7"
19
19
 
20
20
  __all__ = [
21
21
  "AssistantMessage",
lalamo/models/common.py CHANGED
@@ -75,7 +75,7 @@ class TextModel[ConfigT, ModelT: Decoder | Classifier](LalamoModule[ConfigT]):
75
75
  if messages is None:
76
76
  messages = [UserMessage("Tell me about London")]
77
77
 
78
- token_ids = jnp.array(self.message_processor.tokenize_request(messages))[None:]
78
+ token_ids = jnp.array(self.message_processor.tokenize_request(messages))[None, :]
79
79
  _, num_tokens = token_ids.shape
80
80
  token_positions = jnp.arange(num_tokens)[None, :]
81
81
  return self.model(token_ids=token_ids, token_positions=token_positions, return_activation_trace=True)
@@ -19,13 +19,14 @@ def estimate_memory_from_batchsize(
19
19
  memory_analysis = (
20
20
  jax.jit(
21
21
  functools.partial(
22
- model.generate_tokens,
22
+ LanguageModel.generate_tokens,
23
23
  max_output_length=max_output_length,
24
24
  num_top_logits_to_return=num_logits_per_token,
25
25
  ),
26
26
  backend="cpu", # cuda backend tries to allocate in .compile() and ooms
27
27
  )
28
28
  .lower(
29
+ model,
29
30
  prompt_token_ids=jax.ShapeDtypeStruct((batch_size, max_input_length), jnp.int32),
30
31
  prompt_lengths_without_padding=jax.ShapeDtypeStruct((batch_size,), jnp.int32),
31
32
  )
@@ -30,12 +30,13 @@ def inference_collect_traces(
30
30
  generate_tokens_compiled = (
31
31
  jax.jit(
32
32
  functools.partial(
33
- model.generate_tokens,
33
+ LanguageModel.generate_tokens,
34
34
  max_output_length=max_output_length,
35
35
  num_top_logits_to_return=num_top_logits_to_collect,
36
36
  ),
37
37
  )
38
38
  .lower(
39
+ model,
39
40
  prompt_token_ids=jax.ShapeDtypeStruct((batch_size, max_input_length), jnp.int32),
40
41
  prompt_lengths_without_padding=jax.ShapeDtypeStruct((batch_size,), jnp.int32),
41
42
  )
@@ -60,6 +61,7 @@ def inference_collect_traces(
60
61
  )
61
62
 
62
63
  generated = generate_tokens_compiled(
64
+ model,
63
65
  prompt_token_ids=padded,
64
66
  prompt_lengths_without_padding=length_without_padding,
65
67
  )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lalamo
3
- Version: 0.5.5
3
+ Version: 0.5.7
4
4
  Summary: JAX library for optimization and export of models for use with the UZU inference engine.
5
5
  Requires-Python: <4,>=3.12
6
6
  Description-Content-Type: text/markdown
@@ -1,4 +1,4 @@
1
- lalamo/__init__.py,sha256=tkT6ErAGkHuPqJ6O32MPLbltRBh__kfmPM-SpU7iVko,762
1
+ lalamo/__init__.py,sha256=ynqoSW4I6eQ92AR9jzr_8XMEiGpDGjMHHGUrjlTkmb0,762
2
2
  lalamo/common.py,sha256=5NUFD26yQgOnEEk3LaQnce8n-VwJxILkEpFesHZhtQU,3820
3
3
  lalamo/main.py,sha256=Lqv-lU6hRSqbJeVOheZoKllK1LaPHTuR_8jNTPC7UZg,23956
4
4
  lalamo/message_processor.py,sha256=bSUAQg7CemLTnBV4LtPxJBicAalruDCA-JXjkTYPZ8U,5797
@@ -48,7 +48,7 @@ lalamo/model_import/model_specs/polaris.py,sha256=Mw1-6bByjDmPIKlIUIV46CsmV5xUp_
48
48
  lalamo/model_import/model_specs/qwen.py,sha256=qzLmTveATmnwNFQSFJlffcXw7syFnrCmKf9ggkkkw1Y,7050
49
49
  lalamo/model_import/model_specs/reka.py,sha256=dOUYbEMMvovQdzQuBO_DCsjGI39syhoKCvnxLkNEDCw,423
50
50
  lalamo/models/__init__.py,sha256=HfAYc4mteKu7BMyIP8aTFliLKGWJzLk1kPcdKurb8eo,243
51
- lalamo/models/common.py,sha256=b2woUDgHhCUEowo6tEjQb4J7iRVQvrptyRQTYMKrLis,2952
51
+ lalamo/models/common.py,sha256=PDteofGxjSBWYw_mPxbN1DTUba70aOURrAIjl13SSHc,2954
52
52
  lalamo/models/language_model.py,sha256=QPeVEyhutSze7fSNhvOvwSoYt24QMk-dtTJkos38amY,13465
53
53
  lalamo/models/router.py,sha256=7KZqHVhr2TA7Qh76KfwrvyfztfZnV-P-Ss11O8dzbRg,2013
54
54
  lalamo/modules/__init__.py,sha256=xWJ4OPAF4gKd0evYwXIK5kTnbH6nI55oLAePcoDDHQ0,3730
@@ -76,13 +76,13 @@ lalamo/modules/token_mixers/state/kv_cache.py,sha256=QfnS3XgSmyDI9MBUbeLI4ABHLxi
76
76
  lalamo/modules/token_mixers/state/mamba_state.py,sha256=LHzJvNE6MkB7nrsZSNto6pxbnMJCl--JOoe9Fkcc9Mg,1642
77
77
  lalamo/speculator/__init__.py,sha256=9-tmZcbCom_lIGpJYn6xLlnEahFLFidpqmgkafmu--k,456
78
78
  lalamo/speculator/common.py,sha256=PudF_gkpe5_nQ-57sAC-foE1xCy_H2Axh5KwRoA86lo,587
79
- lalamo/speculator/estimator.py,sha256=SFF24TtxGVbb3hEziZ40A7aNdBd6_ZZJSX-CqmPhvA8,2503
80
- lalamo/speculator/inference.py,sha256=EzNLmdP-AOtIN8C39-fq-3ZnKX_yxOOwMufDEmi54zM,3624
79
+ lalamo/speculator/estimator.py,sha256=4D8dPZCWsrpORb7y8pQ6VsiIg1Cblvvxe6gXCoYtcD4,2530
80
+ lalamo/speculator/inference.py,sha256=5GntUgj0HQLeLn3HIHnVX8EEO0EBzmKeP5-_U7kdFAM,3670
81
81
  lalamo/speculator/ngram.py,sha256=95mdfAWhx4d5XOnOwhyhElnvcy6nlUjYhcbJzqDs414,5875
82
82
  lalamo/speculator/utils.py,sha256=0wZoMMIzzk0Q-3zq5H5f-JBplePNHxywndkrNtOJOyo,1697
83
- lalamo-0.5.5.dist-info/licenses/LICENSE,sha256=diHRfjSEJHD1nnEeMIfMRCjR3UERf8bT3eseD6b1ayA,1072
84
- lalamo-0.5.5.dist-info/METADATA,sha256=rv7bkPZ8-ZluxxZJHkGzT724Luws5wiMnGBAXNJbM58,3146
85
- lalamo-0.5.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
86
- lalamo-0.5.5.dist-info/entry_points.txt,sha256=qli7qTfnBk5WP10rOGXXEckHMtt-atJMDWd8jN89Uks,43
87
- lalamo-0.5.5.dist-info/top_level.txt,sha256=VHvWL5JN5XRG36NsN_MieJ7EwRihEOrEjyDaTdFJ-aI,7
88
- lalamo-0.5.5.dist-info/RECORD,,
83
+ lalamo-0.5.7.dist-info/licenses/LICENSE,sha256=diHRfjSEJHD1nnEeMIfMRCjR3UERf8bT3eseD6b1ayA,1072
84
+ lalamo-0.5.7.dist-info/METADATA,sha256=ZzSQiR7KYoAXkBKOToa2bi9tCPRvVXvhnN5y6AO7wyQ,3146
85
+ lalamo-0.5.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
86
+ lalamo-0.5.7.dist-info/entry_points.txt,sha256=qli7qTfnBk5WP10rOGXXEckHMtt-atJMDWd8jN89Uks,43
87
+ lalamo-0.5.7.dist-info/top_level.txt,sha256=VHvWL5JN5XRG36NsN_MieJ7EwRihEOrEjyDaTdFJ-aI,7
88
+ lalamo-0.5.7.dist-info/RECORD,,
File without changes