optimum-rbln 0.2.1a1__py3-none-any.whl → 0.2.1a2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
optimum/rbln/__init__.py CHANGED
@@ -71,6 +71,7 @@ _import_structure = {
71
71
  "RBLNRobertaForSequenceClassification",
72
72
  "RBLNRobertaForMaskedLM",
73
73
  "RBLNViTForImageClassification",
74
+ "RBLNBertForMaskedLM",
74
75
  ],
75
76
  "diffusers": [
76
77
  "RBLNStableDiffusionPipeline",
@@ -141,6 +142,7 @@ if TYPE_CHECKING:
141
142
  RBLNAutoModelForVision2Seq,
142
143
  RBLNBartForConditionalGeneration,
143
144
  RBLNBartModel,
145
+ RBLNBertForMaskedLM,
144
146
  RBLNBertForQuestionAnswering,
145
147
  RBLNBertModel,
146
148
  RBLNCLIPTextModel,
@@ -12,5 +12,5 @@ __version__: str
12
12
  __version_tuple__: VERSION_TUPLE
13
13
  version_tuple: VERSION_TUPLE
14
14
 
15
- __version__ = version = '0.2.1a1'
15
+ __version__ = version = '0.2.1a2'
16
16
  __version_tuple__ = version_tuple = (0, 2, 1)
optimum/rbln/ops/attn.py CHANGED
@@ -152,16 +152,16 @@ def register_rbln_custom_attention_add_softmax():
152
152
  """
153
153
  return (
154
154
  q,
155
- torch.empty(1, *kcache.shape[1:], device=kcache.device),
156
- torch.empty(1, *vcache.shape[1:], device=vcache.device),
155
+ torch.empty(*kcache.shape, device=kcache.device),
156
+ torch.empty(*vcache.shape, device=vcache.device),
157
157
  )
158
158
 
159
159
  @register_fake("rbln_custom_ops::attn_decode_add_softmax")
160
160
  def attn_decode_add_softmax_abstract(q, k, v, m, kcache, vcache, seq, partition):
161
161
  return (
162
162
  q,
163
- torch.empty(1, *kcache.shape[1:], device=kcache.device),
164
- torch.empty(1, *vcache.shape[1:], device=vcache.device),
163
+ torch.empty(*kcache.shape, device=kcache.device),
164
+ torch.empty(*vcache.shape, device=vcache.device),
165
165
  )
166
166
 
167
167
  torch.library.define(
@@ -35,6 +35,7 @@ _import_structure = {
35
35
  "RBLNBartForConditionalGeneration",
36
36
  "RBLNBartModel",
37
37
  "RBLNBertModel",
38
+ "RBLNBertForMaskedLM",
38
39
  "RBLNBertForQuestionAnswering",
39
40
  "RBLNCLIPTextModel",
40
41
  "RBLNCLIPTextModelWithProjection",
@@ -92,6 +93,7 @@ if TYPE_CHECKING:
92
93
  RBLNAutoModelForVision2Seq,
93
94
  RBLNBartForConditionalGeneration,
94
95
  RBLNBartModel,
96
+ RBLNBertForMaskedLM,
95
97
  RBLNBertForQuestionAnswering,
96
98
  RBLNBertModel,
97
99
  RBLNCLIPTextModel,
@@ -33,7 +33,7 @@ _import_structure = {
33
33
  "RBLNAutoModelForVision2Seq",
34
34
  ],
35
35
  "bart": ["RBLNBartForConditionalGeneration", "RBLNBartModel"],
36
- "bert": ["RBLNBertModel", "RBLNBertForQuestionAnswering"],
36
+ "bert": ["RBLNBertModel", "RBLNBertForQuestionAnswering", "RBLNBertForMaskedLM"],
37
37
  "clip": ["RBLNCLIPTextModel", "RBLNCLIPTextModelWithProjection", "RBLNCLIPVisionModel"],
38
38
  "dpt": ["RBLNDPTForDepthEstimation"],
39
39
  "exaone": ["RBLNExaoneForCausalLM"],
@@ -67,7 +67,7 @@ if TYPE_CHECKING:
67
67
  RBLNAutoModelForVision2Seq,
68
68
  )
69
69
  from .bart import RBLNBartForConditionalGeneration, RBLNBartModel
70
- from .bert import RBLNBertForQuestionAnswering, RBLNBertModel
70
+ from .bert import RBLNBertForMaskedLM, RBLNBertForQuestionAnswering, RBLNBertModel
71
71
  from .clip import RBLNCLIPTextModel, RBLNCLIPTextModelWithProjection, RBLNCLIPVisionModel
72
72
  from .dpt import RBLNDPTForDepthEstimation
73
73
  from .exaone import RBLNExaoneForCausalLM
@@ -12,4 +12,4 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
- from .modeling_bert import RBLNBertForQuestionAnswering, RBLNBertModel
15
+ from .modeling_bert import RBLNBertForMaskedLM, RBLNBertForQuestionAnswering, RBLNBertModel
@@ -20,7 +20,7 @@ from transformers import PretrainedConfig
20
20
 
21
21
  from ....modeling import RBLNModel
22
22
  from ....modeling_config import RBLNCompileConfig, RBLNConfig
23
- from ...modeling_generic import RBLNModelForQuestionAnswering
23
+ from ...modeling_generic import RBLNModelForMaskedLM, RBLNModelForQuestionAnswering
24
24
 
25
25
 
26
26
  logger = logging.getLogger(__name__)
@@ -100,5 +100,9 @@ class RBLNBertModel(RBLNModel):
100
100
  return rbln_config
101
101
 
102
102
 
103
+ class RBLNBertForMaskedLM(RBLNModelForMaskedLM):
104
+ rbln_model_input_names = ["input_ids", "attention_mask", "token_type_ids"]
105
+
106
+
103
107
  class RBLNBertForQuestionAnswering(RBLNModelForQuestionAnswering):
104
108
  rbln_model_input_names = ["input_ids", "attention_mask", "token_type_ids"]
@@ -544,15 +544,19 @@ class DecoderOnlyAttention(nn.Module):
544
544
  super().__init__()
545
545
  self._original_mod = self_attn
546
546
  self.layer_idx = self_attn.layer_idx
547
- self.num_heads = self._original_mod.num_heads
547
+ self.num_heads = getattr(self._original_mod, "num_heads", None) or getattr(
548
+ self._original_mod.config, "num_attention_heads"
549
+ )
548
550
  self.head_dim = self._original_mod.head_dim
549
551
  self._phase = "prefill"
550
552
  self.scale = torch.tensor(self.get_attn_scale())
551
553
 
552
554
  if hasattr(self._original_mod, "num_key_value_heads"):
553
555
  self.num_key_value_heads = self._original_mod.num_key_value_heads
556
+ elif hasattr(self._original_mod, "config") and hasattr(self._original_mod.config, "num_key_value_heads"):
557
+ self.num_key_value_heads = self._original_mod.config.num_key_value_heads
554
558
  else:
555
- self.num_key_value_heads = self._original_mod.num_heads
559
+ self.num_key_value_heads = self.num_heads
556
560
 
557
561
  self.attention = self.get_attention()
558
562
  self.__post_init__()
@@ -420,7 +420,7 @@ class Seq2SeqSelfAttention(nn.Module):
420
420
  pass
421
421
 
422
422
  def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int) -> torch.Tensor:
423
- return tensor.view(bsz, 1, seq_len, 1, self.num_heads, self.head_dim).transpose(2, 4)
423
+ return tensor.view(bsz, seq_len, 1, self.num_heads, self.head_dim).transpose(1, 3)
424
424
 
425
425
  def projection(self, hidden_states) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
426
426
  """Projects input hidden states into query, key, and value representations.
@@ -450,38 +450,21 @@ class Seq2SeqSelfAttention(nn.Module):
450
450
  key_states = self._shape(key_states, -1, bsz)
451
451
  value_states = self._shape(value_states, -1, bsz)
452
452
 
453
- all_key_states = []
454
- all_value_states = []
455
- all_attn_output = []
456
- for b_idx in range(bsz):
457
- query_state = query_states[b_idx]
458
- key_state = key_states[b_idx]
459
- value_state = value_states[b_idx]
460
- attn_mask = attention_mask[b_idx].unsqueeze(0).unsqueeze(2)
461
- past_key_state = past_key_value[0].view(bsz, self.num_heads, 1, -1, self.head_dim)
462
- past_value_state = past_key_value[1].view(bsz, self.num_heads, 1, -1, self.head_dim)
463
-
464
- attn_output, key_state, value_state = self.attn_decode(
465
- query_state,
466
- key_state,
467
- value_state,
468
- attn_mask,
469
- past_key_state,
470
- past_value_state,
471
- cache_position[b_idx][0],
472
- torch.tensor(1.0, dtype=torch.float32), # scale
473
- )
474
-
475
- attn_output = attn_output.view(1, self.num_heads, -1, self.head_dim).transpose(1, 2)
476
- attn_output = attn_output.reshape(1, -1, self.num_heads * self.head_dim)
477
-
478
- all_key_states.append(key_state.squeeze(2))
479
- all_value_states.append(value_state.squeeze(2))
480
- all_attn_output.append(attn_output)
453
+ attn_output, key_states, value_states = self.attn_decode(
454
+ query_states,
455
+ key_states,
456
+ value_states,
457
+ attention_mask.unsqueeze(
458
+ 2
459
+ ), # Unsqueeze group axis since CustomKernel expects it for group query attention
460
+ past_key_value[0].view(bsz, self.num_heads, 1, -1, self.head_dim),
461
+ past_key_value[1].view(bsz, self.num_heads, 1, -1, self.head_dim),
462
+ cache_position.squeeze(1),
463
+ torch.tensor(1.0, dtype=torch.float32), # scale
464
+ )
481
465
 
482
- key_states = torch.cat(all_key_states, dim=0)
483
- value_states = torch.cat(all_value_states, dim=0)
484
- attn_output = torch.cat(all_attn_output, dim=0)
466
+ attn_output = attn_output.view(bsz, self.num_heads, -1, self.head_dim).transpose(1, 2)
467
+ attn_output = attn_output.reshape(bsz, -1, self.num_heads * self.head_dim)
485
468
 
486
469
  attn_output = self.out_proj(attn_output)
487
470
  present_key_value = (key_states, value_states)
@@ -147,6 +147,11 @@ class T5CrossAttention(nn.Module):
147
147
  def __init__(self, attn):
148
148
  super().__init__()
149
149
  self.attn = attn
150
+ self.q = attn.q
151
+ self.o = attn.o
152
+ self.n_heads = attn.n_heads
153
+ self.key_value_proj_dim = attn.key_value_proj_dim
154
+ self.inner_dim = attn.inner_dim
150
155
 
151
156
  def forward(
152
157
  self,
@@ -155,9 +160,27 @@ class T5CrossAttention(nn.Module):
155
160
  attention_mask: torch.Tensor = None,
156
161
  key_value_states: torch.Tensor = None,
157
162
  ):
158
- return self.attn(
159
- hidden_states=hidden_states,
160
- past_key_value=past_key_value,
161
- position_bias=attention_mask,
162
- key_value_states=key_value_states,
163
- )
163
+ batch_size = hidden_states.shape[0]
164
+
165
+ query_states = self.q(hidden_states)
166
+ query_states = query_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
167
+
168
+ # reuse k,v, cross_attentions
169
+ key_states = past_key_value[0]
170
+ value_states = past_key_value[1]
171
+
172
+ # compute scores, equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
173
+ scores = torch.matmul(query_states, key_states.transpose(3, 2))
174
+ scores += attention_mask
175
+
176
+ # (batch_size, n_heads, seq_length, key_length)
177
+ attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)
178
+ attn_output = torch.matmul(attn_weights, value_states)
179
+
180
+ attn_output = attn_output.transpose(1, 2).contiguous()
181
+ attn_output = attn_output.view(batch_size, -1, self.inner_dim)
182
+ attn_output = self.o(attn_output)
183
+
184
+ outputs = (attn_output, past_key_value)
185
+
186
+ return outputs
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: optimum-rbln
3
- Version: 0.2.1a1
3
+ Version: 0.2.1a2
4
4
  Summary: Optimum RBLN is the interface between the Hugging Face Transformers and Diffusers libraries and RBLN accelerators. It provides a set of tools enabling easy model loading and inference on single and multiple rbln device settings for different downstream tasks.
5
5
  Project-URL: Homepage, https://rebellions.ai
6
6
  Project-URL: Documentation, https://docs.rbln.ai
@@ -28,7 +28,7 @@ Requires-Dist: packaging>=24.1
28
28
  Requires-Dist: torch<=2.5.1
29
29
  Requires-Dist: torchaudio<=2.5.1
30
30
  Requires-Dist: torchvision<=0.20.1
31
- Requires-Dist: transformers==4.45.2
31
+ Requires-Dist: transformers==4.48.3
32
32
  Description-Content-Type: text/markdown
33
33
 
34
34
 
@@ -1,5 +1,5 @@
1
- optimum/rbln/__init__.py,sha256=zTum8bjtVKdT9dq56vyQZy1eG2UUhEbiaGyvtokVDok,6036
2
- optimum/rbln/__version__.py,sha256=Hh8jxA0Z8Gd3AqkgqwtySq_B5l73iIwQzKQ1Y-52wbw,413
1
+ optimum/rbln/__init__.py,sha256=sLCjJu_MLZEKDOwHIlJP4u4GzGZx-1kqHTYGw5B4xDg,6096
2
+ optimum/rbln/__version__.py,sha256=RUdGjwBqCynJjA53NyAfXHitjfqAoWaGNCXiO_9cNsw,413
3
3
  optimum/rbln/modeling.py,sha256=OQGLkzlE3vD3O-ZeE1Z0jK-QCqWy1V46pSCOdmehFTI,8267
4
4
  optimum/rbln/modeling_base.py,sha256=sU5Tr3SmhQZPsbKz5xo-FqMU1gC4Xd4m9xZVIx2NY7I,20359
5
5
  optimum/rbln/modeling_config.py,sha256=7104bxmrvKW4Q6XTruQayiIGl8GHDFmPkJ3cknMIInE,11335
@@ -34,26 +34,26 @@ optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_x
34
34
  optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py,sha256=3aB1Rw-OgKytQOHwOaShbEvq_XVHPOGvsGm8pstEmKU,930
35
35
  optimum/rbln/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py,sha256=MzVP1wscaO1sUIiBIPJqG6zuGyez9VUbA42-JSIm-mk,930
36
36
  optimum/rbln/ops/__init__.py,sha256=MbAHevg59fXQOFgrnsFFZ97s8-YrgvYCYML_sgKEEfM,816
37
- optimum/rbln/ops/attn.py,sha256=rB0xYhUxpb1o1JISMNJnPffr7qF8DwnuecOSMWPVGoA,9728
37
+ optimum/rbln/ops/attn.py,sha256=QYvSMg4ps_PenHwpLVhuYRoOAFvHIo19nY0ZEdj4nTE,9700
38
38
  optimum/rbln/ops/flash_attn.py,sha256=Zn5nkouY3kk6MBivQpPjgGh4oepjpi8F3tnTrmrNfpg,2304
39
39
  optimum/rbln/ops/kv_cache_update.py,sha256=9W4WCO1Dtfy0u5i978JJRa7uLbqrfR2lHuoPynb07fw,3143
40
- optimum/rbln/transformers/__init__.py,sha256=8NHC8MpmWc8fteSBtFNJ729LON6FRHI2J7zifCAAXZ4,4107
40
+ optimum/rbln/transformers/__init__.py,sha256=SdOjpa4Iufo6aOJPvjQwD_vz28dmmNV9AtF5Cz9ajLM,4167
41
41
  optimum/rbln/transformers/modeling_alias.py,sha256=yx7FnZQWAnrWzivaO5hI7T6i-fyLzt2tMIXG2oDNbPo,1657
42
42
  optimum/rbln/transformers/modeling_generic.py,sha256=SD7XjpjnCn-ejNAUWgkaaHV6Fv2Y6K-hbXEXXb9W_H4,18177
43
43
  optimum/rbln/transformers/modeling_rope_utils.py,sha256=3zwkhYUyTZhxCJUSmwCc88iiY1TppRWEY9ShwUqNB2k,14293
44
- optimum/rbln/transformers/models/__init__.py,sha256=GBCGLGdo_HMEkGhZSevOgt-M9KMETaKUqRmgEf3WpQE,3639
44
+ optimum/rbln/transformers/models/__init__.py,sha256=wucrA1ybpDfNcrySwdVeK5PZEYl-3ONXJvGpHGTvteo,3683
45
45
  optimum/rbln/transformers/models/auto/__init__.py,sha256=GvGbb3ZpMv-h6euXeZ42jSizoOfrL2O1uvpAnfKxYEo,1034
46
46
  optimum/rbln/transformers/models/auto/auto_factory.py,sha256=IK9jFrJ3EEzYQa9_aKpcp2TO68M5YGkA-HcfBVpA2QU,7027
47
47
  optimum/rbln/transformers/models/auto/modeling_auto.py,sha256=Un9qoqdy3dO8JBza_bTJF_6_fRVNM9QisihSgTRFI-o,3933
48
48
  optimum/rbln/transformers/models/bart/__init__.py,sha256=32HPe0_GIO0hp9U464Iv6Jd7M-1nop9g8hA1UZMHhyw,674
49
49
  optimum/rbln/transformers/models/bart/bart_architecture.py,sha256=dTkgMpNkyh4vT_mZU5tQ5bvH_lRZfRjaJ1gIHvJkmgs,5479
50
50
  optimum/rbln/transformers/models/bart/modeling_bart.py,sha256=ADRbE-5N3xJ60AzzjJ4BZs_THmB71qs4XTr9iFqsEqE,5667
51
- optimum/rbln/transformers/models/bert/__init__.py,sha256=_pEfofte9Ss8y2dgCbwl3FCHr6X2LNSm9VMMMS1vVh0,670
52
- optimum/rbln/transformers/models/bert/modeling_bert.py,sha256=ikAcgsx5zIeMbmyUWusWTuX35q_gm8ZehEll-xOIJFc,4497
51
+ optimum/rbln/transformers/models/bert/__init__.py,sha256=YVV7k_laU6yJBawZrgjIWjRmIF-Y4oQQHqyf8lsraQs,691
52
+ optimum/rbln/transformers/models/bert/modeling_bert.py,sha256=flzCLWqCaCnQLfWCVSRmQke_CEAXzcG0DOoUM8EAqkg,4649
53
53
  optimum/rbln/transformers/models/clip/__init__.py,sha256=ssJqlEt318ti2QaEakGh_tO3Ap1VSPCVF-ymUuvjAJs,698
54
54
  optimum/rbln/transformers/models/clip/modeling_clip.py,sha256=LGytQ33WGV2qqXnj_1dMiPN63ytL1JlNQlc3aXaG1bA,5705
55
55
  optimum/rbln/transformers/models/decoderonly/__init__.py,sha256=pDogsdpJKKB5rqnVFrRjwfhUvOSV-jZ3oARMsqSvOOQ,665
56
- optimum/rbln/transformers/models/decoderonly/decoderonly_architecture.py,sha256=73sWpSDgm6_ztiGPsGEljJo-a7AisTAqFb7s-UaShVE,36292
56
+ optimum/rbln/transformers/models/decoderonly/decoderonly_architecture.py,sha256=BjQHwoPZfM-KUQzxm4AU-PdmoMgLxnCG6kfSpGjUvrk,36578
57
57
  optimum/rbln/transformers/models/decoderonly/modeling_decoderonly.py,sha256=mAgRRMGVHvTUjJBDlmUOjNhSNjprKSD7tLeFknrx0Rw,25810
58
58
  optimum/rbln/transformers/models/dpt/__init__.py,sha256=gP1tkR3XMNlHq1GT87ugIVvb2o_1eAUg1JaniXjy1Lw,651
59
59
  optimum/rbln/transformers/models/dpt/modeling_dpt.py,sha256=HS_f7bL2CvbWP_7NGMzPYb0GdHRE2xHF0e1DkzlRdRE,3411
@@ -85,10 +85,10 @@ optimum/rbln/transformers/models/qwen2/modeling_qwen2.py,sha256=9-aFDvjMzPNUyGOz
85
85
  optimum/rbln/transformers/models/qwen2/qwen2_architecture.py,sha256=XlNAMYAcDLohnSAhIFGKOPuCB5XLgzYs5ABWdeQSaZs,720
86
86
  optimum/rbln/transformers/models/seq2seq/__init__.py,sha256=EmEMV4rOYqKyruX85d0fR73-b8N6BSD6CPcbpYdBuVk,651
87
87
  optimum/rbln/transformers/models/seq2seq/modeling_seq2seq.py,sha256=4GHlLf6xm7a8YegYVX-zhIqk4ouwHCzQuj8Z-jXSFJw,15407
88
- optimum/rbln/transformers/models/seq2seq/seq2seq_architecture.py,sha256=JTcLA-xnSs6KD8xUA6RY0QMWoXChnHvmhyKa_4039UY,19053
88
+ optimum/rbln/transformers/models/seq2seq/seq2seq_architecture.py,sha256=15yoF-wyhcLcK-Z2MOUmyPlkOMNTVOJ013uBepqtpxA,18387
89
89
  optimum/rbln/transformers/models/t5/__init__.py,sha256=1skR1RmnG62WTAP3-F5P1x-V_ReFhMyirH3u56vWwvc,675
90
90
  optimum/rbln/transformers/models/t5/modeling_t5.py,sha256=MFs-3yYviV1QqSpsTB2GarTEs9wGH5AYofksLQLMBXg,8043
91
- optimum/rbln/transformers/models/t5/t5_architecture.py,sha256=_K_IROovNH7zZIj2E1datCLIWbRpAg181p03GDTTtXE,6209
91
+ optimum/rbln/transformers/models/t5/t5_architecture.py,sha256=kkjErS42mW2jv5O_xL7BaKobvvqy7BGmYOowKyHakvI,7189
92
92
  optimum/rbln/transformers/models/wav2vec2/__init__.py,sha256=YpgA0K-vyg9veh0eL_jxauosbRpb_kpGKHvvQLBspKM,649
93
93
  optimum/rbln/transformers/models/wav2vec2/modeling_wav2vec2.py,sha256=Ws0tw2j9Mp8BREP6nI-Ann_U0rhkqofaQFCKoepDYRA,3837
94
94
  optimum/rbln/transformers/models/whisper/__init__.py,sha256=ktnNe5ri3ycCWZ_W_voFB9y9-vgGgxS1X9s8LBRZmWc,665
@@ -108,7 +108,7 @@ optimum/rbln/utils/model_utils.py,sha256=DfD_Z2qvZHqcddXqnzTM1AN8khanj3-DXK2lJvV
108
108
  optimum/rbln/utils/runtime_utils.py,sha256=5-DYniyP59nx-mrrbi7AqA77L85b4Cm5oLpaxidSyss,3699
109
109
  optimum/rbln/utils/save_utils.py,sha256=W5ON-90xLcz1suFlZwOhmYB5Mf9XSTu00xrfTfyL88U,3608
110
110
  optimum/rbln/utils/submodule.py,sha256=oZoGrItB8WqY4i-K9WJPlLlcLohc1YGB9OHB8_XZw3A,4071
111
- optimum_rbln-0.2.1a1.dist-info/METADATA,sha256=QqxzPcaWJ57Dtaws6oRFO_tCjZPjZkWQldxpWHXpg9w,5300
112
- optimum_rbln-0.2.1a1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
113
- optimum_rbln-0.2.1a1.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
114
- optimum_rbln-0.2.1a1.dist-info/RECORD,,
111
+ optimum_rbln-0.2.1a2.dist-info/METADATA,sha256=hwr_UclFUgkNbtwCanMy_5lF56FPX8WG0GGz3B0cJCc,5300
112
+ optimum_rbln-0.2.1a2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
113
+ optimum_rbln-0.2.1a2.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
114
+ optimum_rbln-0.2.1a2.dist-info/RECORD,,