blksprs 2.1.1__py3-none-any.whl → 2.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- blksprs/__init__.py +1 -1
- blksprs/ops/conversion.py +1 -1
- blksprs/ops/flow.py +1 -1
- blksprs/ops/matmul.py +1 -1
- blksprs/ops/softmax.py +66 -41
- blksprs/utils/tools.py +5 -0
- {blksprs-2.1.1.dist-info → blksprs-2.1.3.dist-info}/METADATA +3 -3
- {blksprs-2.1.1.dist-info → blksprs-2.1.3.dist-info}/RECORD +10 -10
- {blksprs-2.1.1.dist-info → blksprs-2.1.3.dist-info}/WHEEL +0 -0
- {blksprs-2.1.1.dist-info → blksprs-2.1.3.dist-info}/top_level.txt +0 -0
blksprs/__init__.py
CHANGED
blksprs/ops/conversion.py
CHANGED
|
@@ -56,7 +56,7 @@ def to_sparse(x: Tensor, sparsity_layout: Tensor,
|
|
|
56
56
|
def to_sparse_forward(x: Tensor, _: Tensor,
|
|
57
57
|
sparsity_lut: Tensor, sparsity_block_size: int, n_sparse_blocks: int) -> Tensor:
|
|
58
58
|
with torch.no_grad():
|
|
59
|
-
output = torch.
|
|
59
|
+
output = torch.empty(size=(n_sparse_blocks, sparsity_block_size, sparsity_block_size),
|
|
60
60
|
dtype=x.dtype, device=x.device)
|
|
61
61
|
|
|
62
62
|
x_b, x_r, x_c = x.size()
|
blksprs/ops/flow.py
CHANGED
|
@@ -14,7 +14,7 @@ def flow_pull_forward(x: Tensor, sparsity_layout_o: Tensor,
|
|
|
14
14
|
sparsity_lut: Tensor, sparsity_reverse_lut: Tensor,
|
|
15
15
|
sparsity_block_size: int, n_sparse_blocks: int) -> Tensor:
|
|
16
16
|
with torch.no_grad():
|
|
17
|
-
output = torch.
|
|
17
|
+
output = torch.empty(size=(n_sparse_blocks, sparsity_block_size, sparsity_block_size),
|
|
18
18
|
dtype=x.dtype, device=x.device)
|
|
19
19
|
|
|
20
20
|
x_b, x_r, x_c = x.size()
|
blksprs/ops/matmul.py
CHANGED
|
@@ -62,7 +62,7 @@ def matmul_forward(x: Tensor, y: Tensor,
|
|
|
62
62
|
_: Tensor, sparsity_lut_o: Tensor,
|
|
63
63
|
sparsity_block_size: int, n_sparse_blocks: int) -> Tensor:
|
|
64
64
|
with torch.no_grad():
|
|
65
|
-
output = torch.
|
|
65
|
+
output = torch.empty(size=(n_sparse_blocks, sparsity_block_size, sparsity_block_size),
|
|
66
66
|
dtype=x.dtype, device=x.device)
|
|
67
67
|
|
|
68
68
|
x_b, x_r, x_c = x.size()
|
blksprs/ops/softmax.py
CHANGED
|
@@ -9,7 +9,7 @@ from triton import language as tl
|
|
|
9
9
|
|
|
10
10
|
from blksprs.ops.misc.row_wise import row_wise_sum, row_wise_max, row_wise_sub
|
|
11
11
|
from blksprs.utils.blksprs_tensor import BlksprsTensor
|
|
12
|
-
from blksprs.utils.tools import stride
|
|
12
|
+
from blksprs.utils.tools import stride, ceil_pow2
|
|
13
13
|
from blksprs.utils.autotuning import get_autotune_configs, prune_autotune_configs
|
|
14
14
|
from blksprs.utils.validation import validate_contiguous, validate_dimensions, validate_device, \
|
|
15
15
|
validate_sparsity, validate_sparsity_block_size, validate_dtype_float_32
|
|
@@ -66,7 +66,7 @@ def softmax_forward(x: Tensor, sparsity_layout: Tensor,
|
|
|
66
66
|
sparsity_lut: Tensor,
|
|
67
67
|
sparsity_reverse_lut_rws: Tensor,
|
|
68
68
|
sparsity_block_size: int) -> Tensor:
|
|
69
|
-
output = torch.
|
|
69
|
+
output = torch.empty_like(x)
|
|
70
70
|
|
|
71
71
|
x_row_wise_max, sparsity_layout_rwm = row_wise_max(x, sparsity_layout, sparsity_block_size,
|
|
72
72
|
flag_slice_only=True)
|
|
@@ -114,7 +114,7 @@ def softmax_backward_wrapper(ctx, grad_output):
|
|
|
114
114
|
def softmax_backward(grad_output: Tensor, o: Tensor, sparsity_lut: Tensor, sparsity_layout: Tensor,
|
|
115
115
|
sparsity_block_size: int) -> Tensor:
|
|
116
116
|
with torch.no_grad():
|
|
117
|
-
grad_x = torch.
|
|
117
|
+
grad_x = torch.empty_like(o, dtype=torch.float)
|
|
118
118
|
|
|
119
119
|
s, sparsity_layout_s = row_wise_sum(grad_output * o, sparsity_layout, sparsity_block_size, flag_slice_only=True)
|
|
120
120
|
|
|
@@ -349,15 +349,17 @@ def softmax_fused(x: BlksprsTensor, sparsity_layout: Tensor, sparsity_block_size
|
|
|
349
349
|
lut = softmax_fused_build_lut(lut, sparsity_layout)
|
|
350
350
|
|
|
351
351
|
return BlksprsTensor(softmax_fused_forward(x, sparsity_layout,
|
|
352
|
-
lut["
|
|
352
|
+
lut["sparsity_reverse_lut_sorted"],
|
|
353
|
+
lut["max_blocks_line"],
|
|
353
354
|
sparsity_block_size))
|
|
354
355
|
|
|
355
356
|
|
|
356
357
|
@triton_op("blksprs::softmax_fused_forward", mutates_args={})
|
|
357
358
|
def softmax_fused_forward(x: Tensor, sparsity_layout: Tensor,
|
|
358
|
-
|
|
359
|
+
sparsity_reverse_lut_sorted: Tensor,
|
|
360
|
+
max_blocks_line: int,
|
|
359
361
|
sparsity_block_size: int) -> Tensor:
|
|
360
|
-
output = torch.
|
|
362
|
+
output = torch.empty_like(x)
|
|
361
363
|
|
|
362
364
|
x_b, x_r, x_c = x.size()
|
|
363
365
|
x_b_s, x_r_s, x_c_s = stride(x)
|
|
@@ -373,25 +375,31 @@ def softmax_fused_forward(x: Tensor, sparsity_layout: Tensor,
|
|
|
373
375
|
x_b, x_b_s, x_r_s, x_c_s,
|
|
374
376
|
output,
|
|
375
377
|
s_l_b, s_l_b_s, s_l_r_s, s_l_c, s_l_c_s,
|
|
376
|
-
|
|
378
|
+
sparsity_reverse_lut_sorted,
|
|
379
|
+
max_blocks_line,
|
|
377
380
|
sparsity_block_size))
|
|
378
381
|
|
|
379
382
|
return output
|
|
380
383
|
|
|
381
384
|
|
|
382
385
|
def softmax_fused_backward_wrapper(ctx, grad_output):
|
|
383
|
-
o, sparsity_layout,
|
|
386
|
+
o, sparsity_layout, sparsity_reverse_lut_sorted = ctx.saved_tensors
|
|
387
|
+
max_blocks_line = ctx.max_blocks_line
|
|
384
388
|
sparsity_block_size = ctx.sparsity_block_size
|
|
385
389
|
|
|
386
|
-
return softmax_fused_backward(grad_output, o,
|
|
387
|
-
sparsity_block_size), None, None, None, None
|
|
390
|
+
return softmax_fused_backward(grad_output, o, sparsity_reverse_lut_sorted, sparsity_layout,
|
|
391
|
+
max_blocks_line, sparsity_block_size), None, None, None, None
|
|
388
392
|
|
|
389
393
|
|
|
390
394
|
@triton_op("blksprs::softmax_fused_backward", mutates_args={})
|
|
391
|
-
def softmax_fused_backward(grad_output: Tensor,
|
|
395
|
+
def softmax_fused_backward(grad_output: Tensor,
|
|
396
|
+
o: Tensor,
|
|
397
|
+
sparsity_reverse_lut_sorted: Tensor,
|
|
398
|
+
sparsity_layout: Tensor,
|
|
399
|
+
max_blocks_line: int,
|
|
392
400
|
sparsity_block_size: int) -> Tensor:
|
|
393
401
|
with torch.no_grad():
|
|
394
|
-
grad_x = torch.
|
|
402
|
+
grad_x = torch.empty_like(o)
|
|
395
403
|
|
|
396
404
|
g_b, g_r, g_c = grad_output.size()
|
|
397
405
|
g_b_s, g_r_s, g_c_s = stride(grad_output)
|
|
@@ -410,8 +418,9 @@ def softmax_fused_backward(grad_output: Tensor, o: Tensor, sparsity_reverse_lut:
|
|
|
410
418
|
o,
|
|
411
419
|
o_b, o_b_s, o_r_s, o_c_s,
|
|
412
420
|
s_l_b, s_l_b_s, s_l_r_s, s_l_c, s_l_c_s,
|
|
413
|
-
|
|
421
|
+
sparsity_reverse_lut_sorted,
|
|
414
422
|
grad_x,
|
|
423
|
+
max_blocks_line,
|
|
415
424
|
sparsity_block_size))
|
|
416
425
|
|
|
417
426
|
return grad_x
|
|
@@ -428,8 +437,9 @@ def softmax_fused_backward(grad_output: Tensor, o: Tensor, sparsity_reverse_lut:
|
|
|
428
437
|
def softmax_fused_kernel(x,
|
|
429
438
|
x_b, x_b_s, x_r_s, x_c_s,
|
|
430
439
|
o,
|
|
431
|
-
s_l_b, s_l_b_s, s_l_r_s, s_l_c
|
|
432
|
-
|
|
440
|
+
s_l_b, s_l_b_s, s_l_r_s, s_l_c, s_l_c_s,
|
|
441
|
+
r_lut_s,
|
|
442
|
+
mbs: tl.constexpr,
|
|
433
443
|
sparsity_block_size: tl.constexpr,
|
|
434
444
|
TRITON_BLOCK_SIZE: tl.constexpr) -> None:
|
|
435
445
|
# Get triton block indices
|
|
@@ -440,21 +450,22 @@ def softmax_fused_kernel(x,
|
|
|
440
450
|
# Load reverse sparsity indices of row
|
|
441
451
|
blk_rev_idx = (pid_bat * s_l_b_s +
|
|
442
452
|
pid_row * s_l_r_s +
|
|
443
|
-
(tl.arange(0,
|
|
444
|
-
blk_rev_msk = (blk_rev_idx >= 0 and blk_rev_idx < s_l_b * s_l_b_s)
|
|
445
|
-
|
|
453
|
+
(tl.arange(0, mbs) * s_l_c_s))
|
|
454
|
+
blk_rev_msk = ((blk_rev_idx >= 0 and blk_rev_idx < s_l_b * s_l_b_s) and
|
|
455
|
+
(tl.arange(0, mbs) < s_l_c))
|
|
456
|
+
blk_rev = tl.load(r_lut_s + blk_rev_idx, mask=blk_rev_msk, other=-1).to(tl.int32)
|
|
446
457
|
|
|
447
458
|
if (not (tl.min(blk_rev) == -1 and
|
|
448
459
|
tl.max(blk_rev) == -1)):
|
|
449
460
|
# Extend sparsity indices to cover sparsity blocks
|
|
450
461
|
blk_rev_ext = tl.expand_dims(blk_rev, -1)
|
|
451
|
-
blk_rev_ext = tl.broadcast_to(blk_rev_ext, (
|
|
452
|
-
blk_rev_ext = tl.reshape(blk_rev_ext, (
|
|
462
|
+
blk_rev_ext = tl.broadcast_to(blk_rev_ext, (mbs, sparsity_block_size))
|
|
463
|
+
blk_rev_ext = tl.reshape(blk_rev_ext, (mbs * sparsity_block_size))
|
|
453
464
|
|
|
454
465
|
# Load line of x
|
|
455
466
|
blk_x_idx = (blk_rev_ext * x_b_s +
|
|
456
467
|
pid_lin * x_r_s +
|
|
457
|
-
(tl.arange(0,
|
|
468
|
+
(tl.arange(0, mbs * sparsity_block_size) % sparsity_block_size) * x_c_s)
|
|
458
469
|
blk_x_mask = ((blk_x_idx >= 0 and blk_x_idx < x_b * x_b_s)
|
|
459
470
|
and blk_rev_ext != -1)
|
|
460
471
|
blk_x = tl.load(x + blk_x_idx, mask=blk_x_mask, other=float("-inf"))
|
|
@@ -478,9 +489,10 @@ def softmax_fused_kernel_grad(g,
|
|
|
478
489
|
g_b, g_b_s, g_r_s, g_c_s,
|
|
479
490
|
x,
|
|
480
491
|
x_b, x_b_s, x_r_s, x_c_s,
|
|
481
|
-
s_l_b, s_l_b_s, s_l_r_s, s_l_c
|
|
482
|
-
|
|
492
|
+
s_l_b, s_l_b_s, s_l_r_s, s_l_c, s_l_c_s,
|
|
493
|
+
r_lut_s,
|
|
483
494
|
o,
|
|
495
|
+
mbs: tl.constexpr,
|
|
484
496
|
sparsity_block_size: tl.constexpr,
|
|
485
497
|
TRITON_BLOCK_SIZE: tl.constexpr) -> None:
|
|
486
498
|
# Get triton block indices
|
|
@@ -491,21 +503,22 @@ def softmax_fused_kernel_grad(g,
|
|
|
491
503
|
# Load reverse sparsity indices of row
|
|
492
504
|
blk_rev_idx = (pid_bat * s_l_b_s +
|
|
493
505
|
pid_row * s_l_r_s +
|
|
494
|
-
(tl.arange(0,
|
|
495
|
-
blk_rev_msk = (blk_rev_idx >= 0 and blk_rev_idx < s_l_b * s_l_b_s)
|
|
496
|
-
|
|
506
|
+
(tl.arange(0, mbs) * s_l_c_s))
|
|
507
|
+
blk_rev_msk = ((blk_rev_idx >= 0 and blk_rev_idx < s_l_b * s_l_b_s) and
|
|
508
|
+
(tl.arange(0, mbs) < s_l_c))
|
|
509
|
+
blk_rev = tl.load(r_lut_s + blk_rev_idx, mask=blk_rev_msk, other=-1).to(tl.int32)
|
|
497
510
|
|
|
498
511
|
if (not (tl.min(blk_rev) == -1 and
|
|
499
512
|
tl.max(blk_rev) == -1)):
|
|
500
513
|
# Extend sparsity indices to cover sparsity blocks
|
|
501
514
|
blk_rev_ext = tl.expand_dims(blk_rev, -1)
|
|
502
|
-
blk_rev_ext = tl.broadcast_to(blk_rev_ext, (
|
|
503
|
-
blk_rev_ext = tl.reshape(blk_rev_ext, (
|
|
515
|
+
blk_rev_ext = tl.broadcast_to(blk_rev_ext, (mbs, sparsity_block_size))
|
|
516
|
+
blk_rev_ext = tl.reshape(blk_rev_ext, (mbs * sparsity_block_size))
|
|
504
517
|
|
|
505
518
|
# Load line of g
|
|
506
519
|
blk_g_idx = (blk_rev_ext * g_b_s +
|
|
507
520
|
pid_lin * g_r_s +
|
|
508
|
-
(tl.arange(0,
|
|
521
|
+
(tl.arange(0, mbs * sparsity_block_size) % sparsity_block_size) * g_c_s)
|
|
509
522
|
blk_g_mask = ((blk_g_idx >= 0 and blk_g_idx < g_b * g_b_s)
|
|
510
523
|
and blk_rev_ext != -1)
|
|
511
524
|
blk_g = tl.load(g + blk_g_idx, mask=blk_g_mask)
|
|
@@ -513,7 +526,7 @@ def softmax_fused_kernel_grad(g,
|
|
|
513
526
|
# Load line of x
|
|
514
527
|
blk_x_idx = (blk_rev_ext * x_b_s +
|
|
515
528
|
pid_lin * x_r_s +
|
|
516
|
-
(tl.arange(0,
|
|
529
|
+
(tl.arange(0, mbs * sparsity_block_size) % sparsity_block_size) * x_c_s)
|
|
517
530
|
blk_x_mask = ((blk_x_idx >= 0 and blk_x_idx < x_b * x_b_s)
|
|
518
531
|
and blk_rev_ext != -1)
|
|
519
532
|
blk_x = tl.load(x + blk_x_idx, mask=blk_x_mask)
|
|
@@ -521,6 +534,7 @@ def softmax_fused_kernel_grad(g,
|
|
|
521
534
|
# Compute gradients
|
|
522
535
|
blk_grad = blk_x * (blk_g - tl.sum(blk_x * blk_g))
|
|
523
536
|
|
|
537
|
+
# Store output
|
|
524
538
|
tl.store(o + blk_x_idx, blk_grad, mask=blk_x_mask)
|
|
525
539
|
|
|
526
540
|
|
|
@@ -528,25 +542,36 @@ def softmax_fused_build_lut(lut: dict, sparsity_layout: Tensor):
|
|
|
528
542
|
if lut is None:
|
|
529
543
|
lut = dict()
|
|
530
544
|
|
|
531
|
-
if "
|
|
545
|
+
if "sparsity_reverse_lut_sorted" not in lut:
|
|
532
546
|
sparsity_layout_flat = sparsity_layout.reshape(-1)
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
547
|
+
sparsity_reverse_lut_sorted = (((torch.cumsum(sparsity_layout_flat, dim=-1) - 1) *
|
|
548
|
+
(sparsity_layout_flat == 1) -
|
|
549
|
+
(1 * (sparsity_layout_flat == 0)))
|
|
550
|
+
.reshape(sparsity_layout.size())
|
|
551
|
+
.sort(descending=True, dim=-1)[0]
|
|
552
|
+
.reshape(-1).contiguous())
|
|
553
|
+
lut["sparsity_reverse_lut_sorted"] = sparsity_reverse_lut_sorted
|
|
554
|
+
|
|
555
|
+
if "max_blocks_line" not in lut:
|
|
556
|
+
sparsity_reverse_lut_sorted = lut["sparsity_reverse_lut_sorted"]
|
|
557
|
+
max_blocks_line = ((torch.reshape(sparsity_reverse_lut_sorted, (-1, sparsity_layout.size(-1)))
|
|
558
|
+
!= -1)
|
|
559
|
+
.sum(dim=-1)
|
|
560
|
+
.max()
|
|
561
|
+
.item())
|
|
562
|
+
lut["max_blocks_line"] = ceil_pow2(max(max_blocks_line, 2))
|
|
563
|
+
|
|
564
|
+
validate_contiguous(sparsity_layout, lut["sparsity_reverse_lut_sorted"])
|
|
541
565
|
|
|
542
566
|
return lut
|
|
543
567
|
|
|
544
568
|
|
|
545
569
|
# noinspection PyUnusedLocal
|
|
546
570
|
def softmax_fused_setup_context(ctx, inputs, output):
|
|
547
|
-
(_, sparsity_layout,
|
|
571
|
+
(_, sparsity_layout, sparsity_reverse_lut_sorted, max_blocks_line, sparsity_block_size) = inputs
|
|
548
572
|
|
|
549
|
-
ctx.save_for_backward(output, sparsity_layout,
|
|
573
|
+
ctx.save_for_backward(output, sparsity_layout, sparsity_reverse_lut_sorted)
|
|
574
|
+
ctx.max_blocks_line = max_blocks_line
|
|
550
575
|
ctx.sparsity_block_size = sparsity_block_size
|
|
551
576
|
|
|
552
577
|
|
blksprs/utils/tools.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: blksprs
|
|
3
|
-
Version: 2.1.
|
|
3
|
+
Version: 2.1.3
|
|
4
4
|
Summary: A lightweight library for operations on block-sparse matrices in PyTorch.
|
|
5
5
|
Author-email: Felix Schön <schoen@kr.tuwien.ac.at>
|
|
6
6
|
Project-URL: Homepage, https://github.com/FelixSchoen/blksprs
|
|
@@ -196,8 +196,8 @@ def test_readme():
|
|
|
196
196
|
|
|
197
197
|
# Other available functions
|
|
198
198
|
bs.ops.transpose(o_sparse, sparsity_layout_o, sparsity_block_size)
|
|
199
|
-
bs.ops.softmax(o_sparse, sparsity_layout_o, sparsity_block_size)
|
|
200
|
-
bs.ops.softmax_fused(o_sparse, sparsity_layout_o, sparsity_block_size) # Significantly faster version that requires that rows of matrix fit into memory
|
|
199
|
+
bs.ops.softmax(o_sparse, sparsity_layout_o, sparsity_block_size, flag_fused=False)
|
|
200
|
+
bs.ops.softmax_fused(o_sparse, sparsity_layout_o, sparsity_block_size) # Significantly faster version that requires that rows of matrix fit into memory (default if flag is not set)
|
|
201
201
|
bs.ops.misc.row_wise_sum(o_sparse, sparsity_layout_o, sparsity_block_size)
|
|
202
202
|
bs.ops.misc.row_wise_max(o_sparse, sparsity_layout_o, sparsity_block_size)
|
|
203
203
|
|
|
@@ -1,13 +1,13 @@
|
|
|
1
|
-
blksprs/__init__.py,sha256=
|
|
1
|
+
blksprs/__init__.py,sha256=NRxydw4i9jg7WeDuojfEePdtdbughV9AZsEcT9yywK4,1615
|
|
2
2
|
blksprs/layouting/distribution_layout.py,sha256=ur1ty_2U-Hfj78hMWsLZvu7ZuGhzW3qGLKMc72DfTZM,5861
|
|
3
3
|
blksprs/layouting/sparsity_layout.py,sha256=eXHmu2h7K5Q-YUpfOxocJoeP_5ZoQFZf_eHLxRZQbYU,11207
|
|
4
|
-
blksprs/ops/conversion.py,sha256=
|
|
4
|
+
blksprs/ops/conversion.py,sha256=nv5gXiyZkUtk1kCIlPr0Vpaj4G8G6dJdW7StlbV3nDw,21914
|
|
5
5
|
blksprs/ops/distribution.py,sha256=0tPldv0ARzmCV1CU2jvfqpHBgOuHPrDFiCtqsLs7CZc,20789
|
|
6
|
-
blksprs/ops/flow.py,sha256=
|
|
7
|
-
blksprs/ops/matmul.py,sha256=
|
|
6
|
+
blksprs/ops/flow.py,sha256=oUn_xDT74220-EmnBnB8bRNtbS1mjbxWpm76PFsK22o,8246
|
|
7
|
+
blksprs/ops/matmul.py,sha256=ES9bpiCIRBxaynNIL5ftDP0c9LSArbj8YJqkPEzBaIU,11879
|
|
8
8
|
blksprs/ops/partitioning.py,sha256=cfQmY9BZqGTvvJorIhtb-EyuGRJGPraWR-wTKdb47aI,9954
|
|
9
9
|
blksprs/ops/repeat.py,sha256=TLYNxwPuT9y5K9xyM41WK5gnggAJF3lI61Q2K7zWjns,9035
|
|
10
|
-
blksprs/ops/softmax.py,sha256=
|
|
10
|
+
blksprs/ops/softmax.py,sha256=tfC_jaAKrA956rxGeb57klMuYRKTiyMCd5Zg5DIH3fc,23649
|
|
11
11
|
blksprs/ops/transpose.py,sha256=U-VAyLRT6_NDv9qYSFzBqfVlDeIpTqAMEXkqto0VF6w,4072
|
|
12
12
|
blksprs/ops/misc/broadcast_ops.py,sha256=-PrHiSJikZh8nXUmXxSCtFEP27TTxFr4wcrNxBjnimk,5987
|
|
13
13
|
blksprs/ops/misc/row_wise.py,sha256=n5FJjAuOd8BHBJQx4bsQwr-HmXkR9PYVAqfk77wjOFU,19653
|
|
@@ -15,9 +15,9 @@ blksprs/utils/autotuning.py,sha256=a-kmWRjJ3eED2XbjkQeOJSyW8bdIs27HgKMPvAKqWeU,2
|
|
|
15
15
|
blksprs/utils/benchmarking.py,sha256=dLabDscTFn5NkmOI1g7DnKeTneUYW3RIVv9MDF-8BKc,1271
|
|
16
16
|
blksprs/utils/blksprs_tensor.py,sha256=pfoz59aJixj_fIoFx76ySiygwRQUemmgjMKepZ2c4j0,244
|
|
17
17
|
blksprs/utils/processing.py,sha256=RNkEDc0g-sNHRuMPkRzNWU13d3_lIkXMJdoqES4yQTM,3738
|
|
18
|
-
blksprs/utils/tools.py,sha256=
|
|
18
|
+
blksprs/utils/tools.py,sha256=TKygEKge4wJtJnXXDg8BTL8vzBpqIJsQ_A3_5FmLpcE,859
|
|
19
19
|
blksprs/utils/validation.py,sha256=G8eQlvJVMKfEX3k2AwBD0A6Ck-gFoRLpLNY6HXsB3fA,4348
|
|
20
|
-
blksprs-2.1.
|
|
21
|
-
blksprs-2.1.
|
|
22
|
-
blksprs-2.1.
|
|
23
|
-
blksprs-2.1.
|
|
20
|
+
blksprs-2.1.3.dist-info/METADATA,sha256=6ZrxPPpkLwXgmq1d-4VQBNPNjlRm76dEMI-LJyiqlfI,9712
|
|
21
|
+
blksprs-2.1.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
22
|
+
blksprs-2.1.3.dist-info/top_level.txt,sha256=qyp0IHeY3H2GQA97i4hk_To5rRBS2YcE1HRPSLy04fk,8
|
|
23
|
+
blksprs-2.1.3.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|