liger-kernel-nightly 0.5.2.dev20241216214323__py3-none-any.whl → 0.5.2.dev20241218221959__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
@@ -47,6 +47,7 @@ class LigerFusedLinearCPOFunction(LigerFusedLinearPreferenceBase):
47
47
  alpha=1.0,
48
48
  compute_nll_loss=True,
49
49
  compiled=True,
50
+ is_encoder_decoder=False,
50
51
  ):
51
52
  return LigerFusedLinearPreferenceBase.forward(
52
53
  ctx,
@@ -60,12 +61,13 @@ class LigerFusedLinearCPOFunction(LigerFusedLinearPreferenceBase):
60
61
  beta=beta,
61
62
  compute_nll_loss=compute_nll_loss,
62
63
  compiled=compiled,
64
+ is_encoder_decoder=is_encoder_decoder,
63
65
  )
64
66
 
65
67
  @staticmethod
66
68
  def backward(ctx, *grad_output):
67
69
  grads = LigerFusedLinearPreferenceBase.backward(ctx, grad_output)[:4]
68
- return *grads, None, None, None, None, None
70
+ return *grads, None, None, None, None, None, None
69
71
 
70
72
 
71
73
  class LigerFusedLinearCPOLoss(torch.nn.Module):
@@ -80,11 +82,16 @@ class LigerFusedLinearCPOLoss(torch.nn.Module):
80
82
  alpha: float = 1.0,
81
83
  compute_nll_loss: bool = True,
82
84
  compiled: bool = True,
85
+ is_encoder_decoder: bool = False,
83
86
  ):
84
87
  """
85
88
  Args:
86
89
  ignore_index (int): Index to ignore in the loss.
87
90
  beta (float): Weight for the odds ratio loss.
91
+ alpha (float): Weight for the NLL loss.
92
+ compute_nll_loss (bool): Whether to compute NLL loss.
93
+ compiled (bool): Whether to compile the loss function.
94
+ is_encoder_decoder (bool): Whether the model is an encoder-decoder model.
88
95
  """
89
96
  super().__init__()
90
97
  self.ignore_index = ignore_index
@@ -92,6 +99,7 @@ class LigerFusedLinearCPOLoss(torch.nn.Module):
92
99
  self.alpha = alpha
93
100
  self.compute_nll_loss = compute_nll_loss
94
101
  self.compiled = compiled
102
+ self.is_encoder_decoder = is_encoder_decoder
95
103
 
96
104
  def forward(self, lin_weight, _input, target, bias=None):
97
105
  return LigerFusedLinearCPOFunction.apply(
@@ -104,4 +112,5 @@ class LigerFusedLinearCPOLoss(torch.nn.Module):
104
112
  self.alpha,
105
113
  self.compute_nll_loss,
106
114
  self.compiled,
115
+ self.is_encoder_decoder,
107
116
  )
@@ -67,6 +67,7 @@ class LigerFusedLinearDPOFunction(LigerFusedLinearPreferenceBase):
67
67
  compute_nll_loss=True,
68
68
  compiled=True,
69
69
  use_ref_model=True,
70
+ is_encoder_decoder=False,
70
71
  ):
71
72
  return LigerFusedLinearPreferenceBase.forward(
72
73
  ctx=ctx,
@@ -83,12 +84,13 @@ class LigerFusedLinearDPOFunction(LigerFusedLinearPreferenceBase):
83
84
  ref_input=ref_input,
84
85
  ref_weight=ref_weight,
85
86
  ref_bias=ref_bias,
87
+ is_encoder_decoder=is_encoder_decoder,
86
88
  )
87
89
 
88
90
  @staticmethod
89
91
  def backward(ctx, *grad_output):
90
92
  grads = LigerFusedLinearPreferenceBase.backward(ctx, grad_output)[:4]
91
- return *grads, None, None, None, None, None, None, None, None
93
+ return *grads, None, None, None, None, None, None, None, None, None
92
94
 
93
95
 
94
96
  class LigerFusedLinearDPOLoss(torch.nn.Module):
@@ -103,6 +105,7 @@ class LigerFusedLinearDPOLoss(torch.nn.Module):
103
105
  compute_nll_loss: bool = True,
104
106
  compiled: bool = True,
105
107
  use_ref_model: bool = False,
108
+ is_encoder_decoder: bool = False,
106
109
  ):
107
110
  """
108
111
  Args:
@@ -111,6 +114,7 @@ class LigerFusedLinearDPOLoss(torch.nn.Module):
111
114
  compute_nll_loss (bool): Whether to compute the NLL loss.
112
115
  compiled (bool): Whether to use the torch compiled kernel.
113
116
  use_ref_model (bool): Whether to use a reference model for the DPO loss.
117
+ is_encoder_decoder (bool): Whether the model is an encoder-decoder model.
114
118
  """
115
119
  super().__init__()
116
120
  self.ignore_index = ignore_index
@@ -118,6 +122,7 @@ class LigerFusedLinearDPOLoss(torch.nn.Module):
118
122
  self.compute_nll_loss = compute_nll_loss
119
123
  self.compiled = compiled
120
124
  self.use_ref_model = use_ref_model
125
+ self.is_encoder_decoder = is_encoder_decoder
121
126
 
122
127
  def forward(
123
128
  self,
@@ -142,4 +147,5 @@ class LigerFusedLinearDPOLoss(torch.nn.Module):
142
147
  self.compute_nll_loss,
143
148
  self.compiled,
144
149
  self.use_ref_model,
150
+ self.is_encoder_decoder,
145
151
  )
@@ -26,6 +26,7 @@ class LigerFusedLinearPreferenceBase(torch.autograd.Function):
26
26
  ignore_index=-100,
27
27
  alpha=1.0,
28
28
  beta=0.1,
29
+ is_encoder_decoder=False,
29
30
  compute_nll_loss=True,
30
31
  compiled=True,
31
32
  use_ref_model=False,
@@ -56,6 +57,7 @@ class LigerFusedLinearPreferenceBase(torch.autograd.Function):
56
57
  ignore_index (int): Index to ignore for loss computation.
57
58
  alpha (float): Weight for the NLL loss.
58
59
  beta (float): Weight for the preference loss.
60
+ is_encoder_decoder (bool): Whether the model is an encoder-decoder model.
59
61
  compute_nll_loss (bool): Whether to compute NLL loss.
60
62
  compiled (bool): Whether to use torch compile for chunk accumulation.
61
63
  use_ref_model (bool): Whether to use a reference model for the alignment loss.
@@ -94,6 +96,7 @@ class LigerFusedLinearPreferenceBase(torch.autograd.Function):
94
96
  use_ref_model=use_ref_model,
95
97
  ref_weight=ref_weight,
96
98
  ref_bias=ref_bias,
99
+ is_encoder_decoder=is_encoder_decoder,
97
100
  **loss_kwargs,
98
101
  )
99
102
 
@@ -282,33 +285,48 @@ class LigerFusedLinearPreferenceBase(torch.autograd.Function):
282
285
  bias=None,
283
286
  ignore_index=-100,
284
287
  compute_nll_loss=True,
288
+ is_encoder_decoder=False,
285
289
  ):
286
- len_chosen_chunk = target_chunk.shape[0] // 2
290
+ # Calculate logits and log probabilities
287
291
  logits_chunk = input_chunk @ weight.t()
288
292
  if bias is not None:
289
- logits_chunk = logits_chunk + bias
293
+ logits_chunk += bias
290
294
  log_probs_chunk = F.log_softmax(logits_chunk.float(), dim=-1)
291
295
 
296
+ # Split chunk into chosen and rejected portions
297
+ len_chosen_chunk = target_chunk.shape[0] // 2
298
+
299
+ # Handle sequence shifting for non-encoder-decoder models
300
+ if not is_encoder_decoder:
301
+ logits_chunk = logits_chunk[:, :-1]
302
+ log_probs_chunk = log_probs_chunk[:, :-1]
303
+ target_chunk = target_chunk[:, 1:]
304
+
305
+ # Calculate NLL loss for chosen sequences
292
306
  chosen_nll_loss = 0.0
293
307
  if compute_nll_loss:
308
+ chosen_probs = log_probs_chunk[:len_chosen_chunk]
309
+ chosen_targets = target_chunk[:len_chosen_chunk]
294
310
  chosen_nll_loss = F.nll_loss(
295
- log_probs_chunk[:len_chosen_chunk].view(-1, log_probs_chunk.shape[-1]),
296
- target_chunk[:len_chosen_chunk].view(-1),
311
+ chosen_probs.reshape(-1, chosen_probs.shape[-1]),
312
+ chosen_targets.reshape(-1),
297
313
  reduction="sum",
298
314
  ignore_index=ignore_index,
299
315
  )
300
316
 
317
+ # Calculate per-token log probabilities
301
318
  loss_mask = target_chunk != ignore_index
302
319
  label_chunk = torch.where(loss_mask, target_chunk, 0)
303
-
304
320
  per_token_logps = log_probs_chunk.gather(-1, label_chunk.unsqueeze(-1)).squeeze(
305
321
  -1
306
322
  )
307
323
  average_log_prob = (per_token_logps * loss_mask).sum(-1) / loss_mask.sum(-1)
308
324
 
309
- chosen_logps = average_log_prob[:len_chosen_chunk]
310
- rejected_logps = average_log_prob[len_chosen_chunk:]
311
-
325
+ # Split results for chosen and rejected
326
+ chosen_logps, rejected_logps = (
327
+ average_log_prob[:len_chosen_chunk],
328
+ average_log_prob[len_chosen_chunk:],
329
+ )
312
330
  chosen_logits = logits_chunk[:len_chosen_chunk]
313
331
  rejected_logits = logits_chunk[len_chosen_chunk:]
314
332
 
@@ -331,6 +349,7 @@ class LigerFusedLinearPreferenceBase(torch.autograd.Function):
331
349
  ignore_index=-100,
332
350
  alpha=1.0,
333
351
  beta=0.1,
352
+ is_encoder_decoder=False,
334
353
  compute_nll_loss=True,
335
354
  use_ref_model=False,
336
355
  ref_input_chunk=None,
@@ -350,6 +369,7 @@ class LigerFusedLinearPreferenceBase(torch.autograd.Function):
350
369
  ignore_index (int): Index to ignore for loss computation.
351
370
  alpha (float): Weight for the NLL loss.
352
371
  beta (float): Weight for the preference loss.
372
+ is_encoder_decoder (bool): Whether the model is an encoder-decoder model.
353
373
  compute_nll_loss (bool): Whether to compute NLL loss.
354
374
  use_ref_model (bool): Whether to use a reference model for the alignment loss.
355
375
  ref_weight (torch.Tensor): Reference weight tensor. Shape: (vocab_size, hidden_size).
@@ -369,33 +389,43 @@ class LigerFusedLinearPreferenceBase(torch.autograd.Function):
369
389
  bias=bias,
370
390
  ignore_index=ignore_index,
371
391
  compute_nll_loss=compute_nll_loss,
392
+ is_encoder_decoder=is_encoder_decoder,
372
393
  )
373
- chosen_nll_loss = (
374
- chosen_nll_loss
375
- / (full_target[: full_target.shape[0] // 2] != ignore_index).sum()
376
- )
377
- chosen_logits_mean = chosen_logits.sum() / (
378
- full_target.shape[0] // 2 * input_chunk.shape[1] * weight.shape[0]
379
- )
380
- rejected_logits_mean = rejected_logits.sum() / (
381
- full_target.shape[0] // 2 * input_chunk.shape[1] * weight.shape[0]
382
- )
394
+ if not is_encoder_decoder:
395
+ chosen_nll_loss = (
396
+ chosen_nll_loss
397
+ / (full_target[: full_target.shape[0] // 2, 1:] != ignore_index).sum()
398
+ )
399
+ chosen_logits_mean = chosen_logits.sum() / (
400
+ full_target.shape[0] // 2 * (input_chunk.shape[1] - 1) * weight.shape[0]
401
+ )
402
+ rejected_logits_mean = rejected_logits.sum() / (
403
+ full_target.shape[0] // 2 * (input_chunk.shape[1] - 1) * weight.shape[0]
404
+ )
405
+ else:
406
+ chosen_nll_loss = (
407
+ chosen_nll_loss
408
+ / (full_target[: full_target.shape[0] // 2] != ignore_index).sum()
409
+ )
410
+ chosen_logits_mean = chosen_logits.sum() / (
411
+ full_target.shape[0] // 2 * input_chunk.shape[1] * weight.shape[0]
412
+ )
413
+ rejected_logits_mean = rejected_logits.sum() / (
414
+ full_target.shape[0] // 2 * input_chunk.shape[1] * weight.shape[0]
415
+ )
383
416
 
384
417
  if use_ref_model:
385
418
  with torch.no_grad():
386
- (
387
- ref_chosen_logps,
388
- ref_rejected_logps,
389
- ref_chosen_logits,
390
- ref_rejected_logits,
391
- ref_chosen_nll_loss,
392
- ) = LigerFusedLinearPreferenceBase.chunk_forward(
393
- ref_input_chunk,
394
- ref_weight,
395
- target_chunk,
396
- ref_bias,
397
- ignore_index=ignore_index,
398
- compute_nll_loss=False, # We don't need NLL loss for the reference model
419
+ (ref_chosen_logps, ref_rejected_logps, _, _, _) = (
420
+ LigerFusedLinearPreferenceBase.chunk_forward(
421
+ ref_input_chunk,
422
+ ref_weight,
423
+ target_chunk,
424
+ ref_bias,
425
+ ignore_index=ignore_index,
426
+ compute_nll_loss=False, # We don't need NLL loss for the reference model
427
+ is_encoder_decoder=is_encoder_decoder, # assume the ref model is the same family
428
+ )
399
429
  )
400
430
  loss_kwargs["ref_chosen_logps"] = ref_chosen_logps
401
431
  loss_kwargs["ref_rejected_logps"] = ref_rejected_logps
@@ -57,6 +57,7 @@ class LigerFusedLinearORPOFunction(LigerFusedLinearPreferenceBase):
57
57
  beta=0.1,
58
58
  compute_nll_loss=True,
59
59
  compiled=True,
60
+ is_encoder_decoder=False,
60
61
  ):
61
62
  return LigerFusedLinearPreferenceBase.forward(
62
63
  ctx=ctx,
@@ -69,12 +70,13 @@ class LigerFusedLinearORPOFunction(LigerFusedLinearPreferenceBase):
69
70
  beta=beta,
70
71
  compute_nll_loss=compute_nll_loss,
71
72
  compiled=compiled,
73
+ is_encoder_decoder=is_encoder_decoder,
72
74
  )
73
75
 
74
76
  @staticmethod
75
77
  def backward(ctx, *grad_output):
76
78
  grads = LigerFusedLinearPreferenceBase.backward(ctx, grad_output)[:4]
77
- return *grads, None, None, None, None
79
+ return *grads, None, None, None, None, None
78
80
 
79
81
 
80
82
  class LigerFusedLinearORPOLoss(torch.nn.Module):
@@ -88,17 +90,22 @@ class LigerFusedLinearORPOLoss(torch.nn.Module):
88
90
  beta: float = 0.1,
89
91
  compute_nll_loss: bool = True,
90
92
  compiled: bool = True,
93
+ is_encoder_decoder: bool = False,
91
94
  ):
92
95
  """
93
96
  Args:
94
97
  ignore_index (int): Index to ignore in the loss.
95
98
  beta (float): Weight for the odds ratio loss.
99
+ compute_nll_loss (bool): Whether to compute NLL loss.
100
+ compiled (bool): Whether to compile the loss function.
101
+ is_encoder_decoder (bool): Whether the model is an encoder-decoder model.
96
102
  """
97
103
  super().__init__()
98
104
  self.ignore_index = ignore_index
99
105
  self.beta = beta
100
106
  self.compute_nll_loss = compute_nll_loss
101
107
  self.compiled = compiled
108
+ self.is_encoder_decoder = is_encoder_decoder
102
109
 
103
110
  def forward(self, lin_weight, _input, target, bias=None):
104
111
  return LigerFusedLinearORPOFunction.apply(
@@ -110,4 +117,5 @@ class LigerFusedLinearORPOLoss(torch.nn.Module):
110
117
  self.beta,
111
118
  self.compute_nll_loss,
112
119
  self.compiled,
120
+ self.is_encoder_decoder,
113
121
  )
@@ -38,7 +38,7 @@ def lce_forward_deprecated(
38
38
  cache_position: Optional[torch.LongTensor] = None,
39
39
  ) -> Union[Tuple, MoeCausalLMOutputWithPast]:
40
40
  r"""
41
- Copy paste Mixtral's forward from transfomers v4.44.2 but replace torch cross entropy with liger fused linear cross entropy
41
+ Copy paste Mixtral's forward from transformers v4.44.2 but replace torch cross entropy with liger fused linear cross entropy
42
42
 
43
43
 
44
44
  Args:
@@ -17,7 +17,7 @@ class _FSDPForwardRedirection:
17
17
  This is needed in cases where we call a submodule of a FSDP module. For instance, when we want to call only
18
18
  the `LlamaModel` part out of a FSDP-wrapped `LlamaForCausalLM` to get the hidden states without involving
19
19
  GPU-memory-heavy `lm_head` and cross entropy computation, doing this directly (i.e. `model.model.forward()`)
20
- will not work because the first `nn.Emebedding` layer is not independently wrapped as a FSDP module (because of
20
+ will not work because the first `nn.Embedding` layer is not independently wrapped as a FSDP module (because of
21
21
  the transformer-based wrapping policy), and not calling it through FSDP root module forward will not all-gather
22
22
  its parameter, thus resulting in "RuntimeError: 'weight' must be 2-D" error. Similarly, if we want to call just
23
23
  the `lm_head` part of a model, we need this trick too to properly get its params all-gathered.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: liger_kernel_nightly
3
- Version: 0.5.2.dev20241216214323
3
+ Version: 0.5.2.dev20241218221959
4
4
  Summary: Efficient Triton kernels for LLM Training
5
5
  License: BSD 2-CLAUSE LICENSE
6
6
  Copyright 2024 LinkedIn Corporation
@@ -3,12 +3,12 @@ liger_kernel/env_report.py,sha256=ok9PMXtO-8uLj_feCJI4h9hz2NtolZ2AG_OJTW5qmo4,18
3
3
  liger_kernel/utils.py,sha256=HJa-xVKOohDn6pLVIx-Fv0V9h0QAL3qZGQNRICI-OpI,249
4
4
  liger_kernel/chunked_loss/README.md,sha256=K6rucm6nqHpWCmxUOhBYcE3apwQxAy0TfRUippR7Icw,2243
5
5
  liger_kernel/chunked_loss/__init__.py,sha256=R2wCcz4Y0kTAve926DH3k182XKezpXeACMHj05g9Mm8,346
6
- liger_kernel/chunked_loss/cpo_loss.py,sha256=Qu1Ul2A12sp6CqIT-atPbHWFb_LLtINEA9mOpIRx_0g,3097
7
- liger_kernel/chunked_loss/dpo_loss.py,sha256=9S67SzKkLyoBmHGx8bkmthSNHlCT2ikBy9CCdb7wGj0,4381
6
+ liger_kernel/chunked_loss/cpo_loss.py,sha256=jtA7jA92Gv2raLzJ2QScPqgyi-S04a6aKUMRROdR3-w,3591
7
+ liger_kernel/chunked_loss/dpo_loss.py,sha256=tpBw6fAVq2mujo0_NS98L1NP--m1hYqi1qHGAyfg52g,4690
8
8
  liger_kernel/chunked_loss/functional.py,sha256=9Gr-YXIuEzEJkBUhDx3G2fuQayckLor7cC7svhmPML4,549
9
9
  liger_kernel/chunked_loss/fused_linear_distillation.py,sha256=2BH6DCPjsR2zS6zcwFPcIIZRhLF8SohjGdKsAJ_301o,10222
10
- liger_kernel/chunked_loss/fused_linear_preference.py,sha256=AsovMdfsOjgWVxtDhZ_rXqpahMsKTg8YueXnZcHt1XQ,16376
11
- liger_kernel/chunked_loss/orpo_loss.py,sha256=ZuKGjbkIYzV4UzvupNdq6vyxCp7-BztQkUt8ZnFvKos,3531
10
+ liger_kernel/chunked_loss/fused_linear_preference.py,sha256=iHegoQ18amhXzMNLNyzntxmtz_6JSOgougHTN_rbwfY,17936
11
+ liger_kernel/chunked_loss/orpo_loss.py,sha256=XkVnsJ6Qmn3lxvprXRiySl9Hbx6-UNzWDCFXu_pY6Uc,3973
12
12
  liger_kernel/chunked_loss/simpo_loss.py,sha256=Wa4LOlDG9PbJkOOkKg8hbKvnKgg7OTBz6-qIkwPK1yw,3275
13
13
  liger_kernel/ops/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
14
  liger_kernel/ops/cross_entropy.py,sha256=oG5hfrlmnlF5lOoZRhHRglObxgH4B0KadjWMJj9EWPM,15860
@@ -49,18 +49,18 @@ liger_kernel/transformers/model/gemma.py,sha256=R4huxuR48gkLrdT8KqV7As2v9dZtEmcG
49
49
  liger_kernel/transformers/model/gemma2.py,sha256=zxQsxCRqkoxCES3GJPVI7soUuF3J5HZDlvJgaBos1zM,10836
50
50
  liger_kernel/transformers/model/llama.py,sha256=RinsgC_eR-YNvZd2SHPQxZ4eyR3uViaTFCM3SvI5nks,10426
51
51
  liger_kernel/transformers/model/mistral.py,sha256=XpL1rlWg_llvW3z_Hf_d8WQs7uQaH4ds7EZ2SxjQHsU,5144
52
- liger_kernel/transformers/model/mixtral.py,sha256=nyDS1dBpsOXYC2DuW59Hgu7ZrGftrHuWPfNqjcNPIxs,11503
52
+ liger_kernel/transformers/model/mixtral.py,sha256=JlNS6DA6SJqeHDk7j2LZymPQ3wngrTIo3wUGFBqHuJs,11504
53
53
  liger_kernel/transformers/model/mllama.py,sha256=mesNCgj0Ea1O-fqRD4LVxDJ1CR2abY_zAzK_bfVzkiU,11222
54
54
  liger_kernel/transformers/model/phi3.py,sha256=xUZPlaPKwknLjHc3uUW3EPodm1h0vD3G7Qnhh51v-Io,10332
55
55
  liger_kernel/transformers/model/qwen2.py,sha256=EyhSSzQOskGjSnCsKMZpd1s5IAIlHd5PBO3q0MoCs00,9619
56
56
  liger_kernel/transformers/model/qwen2_vl.py,sha256=bIQe2bWiY--G84FhCD29Gdi64_qHP6vbcGsK6vKysQE,8547
57
57
  liger_kernel/transformers/trainer/__init__.py,sha256=c4OQVJmhNOloj0JYSEc0j_cQuBbzGWILfaowUR1hmRw,210
58
- liger_kernel/transformers/trainer/orpo_trainer.py,sha256=GCwwYjZbnu-X5TYKSv4hz4EPkZtH2o45X1xHv4p-Pik,7680
58
+ liger_kernel/transformers/trainer/orpo_trainer.py,sha256=O2k2vdHl-O1S-U61aEmyUFu3QrEuNAipQa2oUBb3HAA,7679
59
59
  liger_kernel/triton/__init__.py,sha256=yfRe0zMb47QnqjecZWG7LnanfCTzeku7SgWRAwNVmzU,101
60
60
  liger_kernel/triton/monkey_patch.py,sha256=5BcGKTtdqeYchypBIBopGIWPx1-cFALz7sOKoEsqXJ0,1584
61
- liger_kernel_nightly-0.5.2.dev20241216214323.dist-info/LICENSE,sha256=OhzLDHJ0to4a8sodVLELZiCFylZ1NAAYLs-HrjPy0ag,1312
62
- liger_kernel_nightly-0.5.2.dev20241216214323.dist-info/METADATA,sha256=ybbjoZ_TBOi601YdSxUk8hi6A9LLKq8SOUlE94VKkvs,21055
63
- liger_kernel_nightly-0.5.2.dev20241216214323.dist-info/NOTICE,sha256=njwnoPZLh9AN8SJQzxvCGLHi-8X__AvWRze6joNXIY8,2066
64
- liger_kernel_nightly-0.5.2.dev20241216214323.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
65
- liger_kernel_nightly-0.5.2.dev20241216214323.dist-info/top_level.txt,sha256=2eghu4hA3LnkM7ElW92tQ8zegWKgSbeo-k-aGe1YnvY,13
66
- liger_kernel_nightly-0.5.2.dev20241216214323.dist-info/RECORD,,
61
+ liger_kernel_nightly-0.5.2.dev20241218221959.dist-info/LICENSE,sha256=OhzLDHJ0to4a8sodVLELZiCFylZ1NAAYLs-HrjPy0ag,1312
62
+ liger_kernel_nightly-0.5.2.dev20241218221959.dist-info/METADATA,sha256=3Af4_e7ToJ34MQGPqIg94fXvRKApkHFb6dV7evsm494,21055
63
+ liger_kernel_nightly-0.5.2.dev20241218221959.dist-info/NOTICE,sha256=njwnoPZLh9AN8SJQzxvCGLHi-8X__AvWRze6joNXIY8,2066
64
+ liger_kernel_nightly-0.5.2.dev20241218221959.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
65
+ liger_kernel_nightly-0.5.2.dev20241218221959.dist-info/top_level.txt,sha256=2eghu4hA3LnkM7ElW92tQ8zegWKgSbeo-k-aGe1YnvY,13
66
+ liger_kernel_nightly-0.5.2.dev20241218221959.dist-info/RECORD,,