lopace 0.1.1__tar.gz → 0.1.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lopace-0.1.1/lopace.egg-info → lopace-0.1.3}/PKG-INFO +36 -1
- {lopace-0.1.1 → lopace-0.1.3}/README.md +33 -0
- {lopace-0.1.1 → lopace-0.1.3/lopace.egg-info}/PKG-INFO +36 -1
- {lopace-0.1.1 → lopace-0.1.3}/pyproject.toml +4 -2
- {lopace-0.1.1 → lopace-0.1.3}/scripts/generate_visualizations.py +99 -0
- {lopace-0.1.1 → lopace-0.1.3}/setup.py +3 -1
- {lopace-0.1.1 → lopace-0.1.3}/LICENSE +0 -0
- {lopace-0.1.1 → lopace-0.1.3}/MANIFEST.in +0 -0
- {lopace-0.1.1 → lopace-0.1.3}/lopace/__init__.py +0 -0
- {lopace-0.1.1 → lopace-0.1.3}/lopace/compressor.py +0 -0
- {lopace-0.1.1 → lopace-0.1.3}/lopace.egg-info/SOURCES.txt +0 -0
- {lopace-0.1.1 → lopace-0.1.3}/lopace.egg-info/dependency_links.txt +0 -0
- {lopace-0.1.1 → lopace-0.1.3}/lopace.egg-info/requires.txt +0 -0
- {lopace-0.1.1 → lopace-0.1.3}/lopace.egg-info/top_level.txt +0 -0
- {lopace-0.1.1 → lopace-0.1.3}/requirements.txt +0 -0
- {lopace-0.1.1 → lopace-0.1.3}/scripts/__init__.py +0 -0
- {lopace-0.1.1 → lopace-0.1.3}/setup.cfg +0 -0
- {lopace-0.1.1 → lopace-0.1.3}/tests/__init__.py +0 -0
- {lopace-0.1.1 → lopace-0.1.3}/tests/test_compressor.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: lopace
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.3
|
|
4
4
|
Summary: Lossless Optimized Prompt Accurate Compression Engine
|
|
5
5
|
Home-page: https://github.com/connectaman/LoPace
|
|
6
6
|
Author: Aman Ulla
|
|
@@ -8,6 +8,8 @@ License: MIT
|
|
|
8
8
|
Project-URL: Homepage, https://github.com/connectaman/LoPace
|
|
9
9
|
Project-URL: Repository, https://github.com/connectaman/LoPace
|
|
10
10
|
Project-URL: Issues, https://github.com/connectaman/LoPace/issues
|
|
11
|
+
Project-URL: PyPI, https://pypi.org/project/lopace/
|
|
12
|
+
Project-URL: Hugging Face Spaces, https://huggingface.co/spaces/codewithaman/LoPace
|
|
11
13
|
Keywords: prompt,compression,tokenization,zstd,bpe,nlp
|
|
12
14
|
Classifier: Development Status :: 4 - Beta
|
|
13
15
|
Classifier: Intended Audience :: Developers
|
|
@@ -31,12 +33,44 @@ Dynamic: requires-python
|
|
|
31
33
|
|
|
32
34
|
# LoPace
|
|
33
35
|
|
|
36
|
+
<div align="center">
|
|
37
|
+
<img src="screenshots/logo-text.png" alt="LoPace Logo" width="600"/>
|
|
38
|
+
</div>
|
|
39
|
+
|
|
34
40
|
**Lossless Optimized Prompt Accurate Compression Engine**
|
|
35
41
|
|
|
36
42
|
A professional, open-source Python package for compressing and decompressing prompts using multiple techniques: Zstd, Token-based (BPE), and Hybrid methods. Achieve up to 80% space reduction while maintaining perfect lossless reconstruction.
|
|
37
43
|
|
|
38
44
|
[](https://opensource.org/licenses/MIT)
|
|
39
45
|
[](https://www.python.org/downloads/)
|
|
46
|
+
[](https://pypi.org/project/lopace/)
|
|
47
|
+
[](https://huggingface.co/spaces/codewithaman/LoPace)
|
|
48
|
+
|
|
49
|
+
## The Problem: Storage Challenges with Large Prompts
|
|
50
|
+
|
|
51
|
+
When building LLM applications, storing prompts efficiently becomes a critical challenge, especially as you scale:
|
|
52
|
+
|
|
53
|
+
- **💾 Massive Storage Overhead**: Large system prompts, context windows, and conversation histories consume significant database space. For applications serving thousands of users with multiple LLM calls, this translates to gigabytes or terabytes of storage requirements.
|
|
54
|
+
|
|
55
|
+
- **🚀 Performance Bottlenecks**: Storing uncompressed prompts increases database size, slows down queries, and increases I/O operations. As your user base grows, retrieval and storage operations become progressively slower.
|
|
56
|
+
|
|
57
|
+
- **💰 Cost Implications**: Larger databases mean higher cloud storage costs, increased backup times, and more expensive infrastructure. With LLM applications handling millions of prompts, these costs compound rapidly.
|
|
58
|
+
|
|
59
|
+
- **⚡ Latency Issues**: Loading large prompts from storage adds latency to your application. Multiple LLM calls per user session multiply this problem, creating noticeable delays in response times.
|
|
60
|
+
|
|
61
|
+
## The Solution: LoPace Compression Engine
|
|
62
|
+
|
|
63
|
+
LoPace solves these challenges by providing **lossless compression** that dramatically reduces storage requirements while maintaining fast compression and decompression speeds:
|
|
64
|
+
|
|
65
|
+
- **📉 Up to 80% Space Reduction**: The hybrid compression method can reduce prompt storage by 70-80% on average, meaning you store 5x less data while maintaining perfect fidelity.
|
|
66
|
+
|
|
67
|
+
- **⚡ Fast Processing**: Achieve 50-200 MB/s compression throughput with sub-linear scaling. Decompression is even faster (100-500 MB/s), ensuring minimal impact on application latency.
|
|
68
|
+
|
|
69
|
+
- **✅ 100% Lossless**: Perfect reconstruction guarantees your prompts are identical to the original - no data loss, no corruption, no compromises.
|
|
70
|
+
|
|
71
|
+
- **🎯 Production-Ready**: Optimized for database storage with minimal memory footprint (under 10 MB for typical use cases) and excellent scalability for millions of prompts.
|
|
72
|
+
|
|
73
|
+
Whether you're storing system prompts for thousands of users, maintaining conversation histories, or caching LLM interactions, LoPace helps you optimize storage costs and improve performance without sacrificing data integrity.
|
|
40
74
|
|
|
41
75
|
## Features
|
|
42
76
|
|
|
@@ -284,6 +318,7 @@ Comprehensive benchmarks were conducted on 10 diverse prompts across three size
|
|
|
284
318
|
### Speed & Throughput Metrics
|
|
285
319
|
|
|
286
320
|

|
|
321
|
+

|
|
287
322
|
|
|
288
323
|
**Key Insights:**
|
|
289
324
|
- **Compression speeds range from 50-200 MB/s** depending on method and prompt size
|
|
@@ -1,11 +1,43 @@
|
|
|
1
1
|
# LoPace
|
|
2
2
|
|
|
3
|
+
<div align="center">
|
|
4
|
+
<img src="screenshots/logo-text.png" alt="LoPace Logo" width="600"/>
|
|
5
|
+
</div>
|
|
6
|
+
|
|
3
7
|
**Lossless Optimized Prompt Accurate Compression Engine**
|
|
4
8
|
|
|
5
9
|
A professional, open-source Python package for compressing and decompressing prompts using multiple techniques: Zstd, Token-based (BPE), and Hybrid methods. Achieve up to 80% space reduction while maintaining perfect lossless reconstruction.
|
|
6
10
|
|
|
7
11
|
[](https://opensource.org/licenses/MIT)
|
|
8
12
|
[](https://www.python.org/downloads/)
|
|
13
|
+
[](https://pypi.org/project/lopace/)
|
|
14
|
+
[](https://huggingface.co/spaces/codewithaman/LoPace)
|
|
15
|
+
|
|
16
|
+
## The Problem: Storage Challenges with Large Prompts
|
|
17
|
+
|
|
18
|
+
When building LLM applications, storing prompts efficiently becomes a critical challenge, especially as you scale:
|
|
19
|
+
|
|
20
|
+
- **💾 Massive Storage Overhead**: Large system prompts, context windows, and conversation histories consume significant database space. For applications serving thousands of users with multiple LLM calls, this translates to gigabytes or terabytes of storage requirements.
|
|
21
|
+
|
|
22
|
+
- **🚀 Performance Bottlenecks**: Storing uncompressed prompts increases database size, slows down queries, and increases I/O operations. As your user base grows, retrieval and storage operations become progressively slower.
|
|
23
|
+
|
|
24
|
+
- **💰 Cost Implications**: Larger databases mean higher cloud storage costs, increased backup times, and more expensive infrastructure. With LLM applications handling millions of prompts, these costs compound rapidly.
|
|
25
|
+
|
|
26
|
+
- **⚡ Latency Issues**: Loading large prompts from storage adds latency to your application. Multiple LLM calls per user session multiply this problem, creating noticeable delays in response times.
|
|
27
|
+
|
|
28
|
+
## The Solution: LoPace Compression Engine
|
|
29
|
+
|
|
30
|
+
LoPace solves these challenges by providing **lossless compression** that dramatically reduces storage requirements while maintaining fast compression and decompression speeds:
|
|
31
|
+
|
|
32
|
+
- **📉 Up to 80% Space Reduction**: The hybrid compression method can reduce prompt storage by 70-80% on average, meaning you store 5x less data while maintaining perfect fidelity.
|
|
33
|
+
|
|
34
|
+
- **⚡ Fast Processing**: Achieve 50-200 MB/s compression throughput with sub-linear scaling. Decompression is even faster (100-500 MB/s), ensuring minimal impact on application latency.
|
|
35
|
+
|
|
36
|
+
- **✅ 100% Lossless**: Perfect reconstruction guarantees your prompts are identical to the original - no data loss, no corruption, no compromises.
|
|
37
|
+
|
|
38
|
+
- **🎯 Production-Ready**: Optimized for database storage with minimal memory footprint (under 10 MB for typical use cases) and excellent scalability for millions of prompts.
|
|
39
|
+
|
|
40
|
+
Whether you're storing system prompts for thousands of users, maintaining conversation histories, or caching LLM interactions, LoPace helps you optimize storage costs and improve performance without sacrificing data integrity.
|
|
9
41
|
|
|
10
42
|
## Features
|
|
11
43
|
|
|
@@ -253,6 +285,7 @@ Comprehensive benchmarks were conducted on 10 diverse prompts across three size
|
|
|
253
285
|
### Speed & Throughput Metrics
|
|
254
286
|
|
|
255
287
|

|
|
288
|
+

|
|
256
289
|
|
|
257
290
|
**Key Insights:**
|
|
258
291
|
- **Compression speeds range from 50-200 MB/s** depending on method and prompt size
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: lopace
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.3
|
|
4
4
|
Summary: Lossless Optimized Prompt Accurate Compression Engine
|
|
5
5
|
Home-page: https://github.com/connectaman/LoPace
|
|
6
6
|
Author: Aman Ulla
|
|
@@ -8,6 +8,8 @@ License: MIT
|
|
|
8
8
|
Project-URL: Homepage, https://github.com/connectaman/LoPace
|
|
9
9
|
Project-URL: Repository, https://github.com/connectaman/LoPace
|
|
10
10
|
Project-URL: Issues, https://github.com/connectaman/LoPace/issues
|
|
11
|
+
Project-URL: PyPI, https://pypi.org/project/lopace/
|
|
12
|
+
Project-URL: Hugging Face Spaces, https://huggingface.co/spaces/codewithaman/LoPace
|
|
11
13
|
Keywords: prompt,compression,tokenization,zstd,bpe,nlp
|
|
12
14
|
Classifier: Development Status :: 4 - Beta
|
|
13
15
|
Classifier: Intended Audience :: Developers
|
|
@@ -31,12 +33,44 @@ Dynamic: requires-python
|
|
|
31
33
|
|
|
32
34
|
# LoPace
|
|
33
35
|
|
|
36
|
+
<div align="center">
|
|
37
|
+
<img src="screenshots/logo-text.png" alt="LoPace Logo" width="600"/>
|
|
38
|
+
</div>
|
|
39
|
+
|
|
34
40
|
**Lossless Optimized Prompt Accurate Compression Engine**
|
|
35
41
|
|
|
36
42
|
A professional, open-source Python package for compressing and decompressing prompts using multiple techniques: Zstd, Token-based (BPE), and Hybrid methods. Achieve up to 80% space reduction while maintaining perfect lossless reconstruction.
|
|
37
43
|
|
|
38
44
|
[](https://opensource.org/licenses/MIT)
|
|
39
45
|
[](https://www.python.org/downloads/)
|
|
46
|
+
[](https://pypi.org/project/lopace/)
|
|
47
|
+
[](https://huggingface.co/spaces/codewithaman/LoPace)
|
|
48
|
+
|
|
49
|
+
## The Problem: Storage Challenges with Large Prompts
|
|
50
|
+
|
|
51
|
+
When building LLM applications, storing prompts efficiently becomes a critical challenge, especially as you scale:
|
|
52
|
+
|
|
53
|
+
- **💾 Massive Storage Overhead**: Large system prompts, context windows, and conversation histories consume significant database space. For applications serving thousands of users with multiple LLM calls, this translates to gigabytes or terabytes of storage requirements.
|
|
54
|
+
|
|
55
|
+
- **🚀 Performance Bottlenecks**: Storing uncompressed prompts increases database size, slows down queries, and increases I/O operations. As your user base grows, retrieval and storage operations become progressively slower.
|
|
56
|
+
|
|
57
|
+
- **💰 Cost Implications**: Larger databases mean higher cloud storage costs, increased backup times, and more expensive infrastructure. With LLM applications handling millions of prompts, these costs compound rapidly.
|
|
58
|
+
|
|
59
|
+
- **⚡ Latency Issues**: Loading large prompts from storage adds latency to your application. Multiple LLM calls per user session multiply this problem, creating noticeable delays in response times.
|
|
60
|
+
|
|
61
|
+
## The Solution: LoPace Compression Engine
|
|
62
|
+
|
|
63
|
+
LoPace solves these challenges by providing **lossless compression** that dramatically reduces storage requirements while maintaining fast compression and decompression speeds:
|
|
64
|
+
|
|
65
|
+
- **📉 Up to 80% Space Reduction**: The hybrid compression method can reduce prompt storage by 70-80% on average, meaning you store 5x less data while maintaining perfect fidelity.
|
|
66
|
+
|
|
67
|
+
- **⚡ Fast Processing**: Achieve 50-200 MB/s compression throughput with sub-linear scaling. Decompression is even faster (100-500 MB/s), ensuring minimal impact on application latency.
|
|
68
|
+
|
|
69
|
+
- **✅ 100% Lossless**: Perfect reconstruction guarantees your prompts are identical to the original - no data loss, no corruption, no compromises.
|
|
70
|
+
|
|
71
|
+
- **🎯 Production-Ready**: Optimized for database storage with minimal memory footprint (under 10 MB for typical use cases) and excellent scalability for millions of prompts.
|
|
72
|
+
|
|
73
|
+
Whether you're storing system prompts for thousands of users, maintaining conversation histories, or caching LLM interactions, LoPace helps you optimize storage costs and improve performance without sacrificing data integrity.
|
|
40
74
|
|
|
41
75
|
## Features
|
|
42
76
|
|
|
@@ -284,6 +318,7 @@ Comprehensive benchmarks were conducted on 10 diverse prompts across three size
|
|
|
284
318
|
### Speed & Throughput Metrics
|
|
285
319
|
|
|
286
320
|

|
|
321
|
+

|
|
287
322
|
|
|
288
323
|
**Key Insights:**
|
|
289
324
|
- **Compression speeds range from 50-200 MB/s** depending on method and prompt size
|
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "lopace"
|
|
7
|
-
version = "0.1.
|
|
7
|
+
version = "0.1.3"
|
|
8
8
|
description = "Lossless Optimized Prompt Accurate Compression Engine"
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
requires-python = ">=3.8"
|
|
@@ -34,4 +34,6 @@ dependencies = [
|
|
|
34
34
|
[project.urls]
|
|
35
35
|
Homepage = "https://github.com/connectaman/LoPace"
|
|
36
36
|
Repository = "https://github.com/connectaman/LoPace"
|
|
37
|
-
Issues = "https://github.com/connectaman/LoPace/issues"
|
|
37
|
+
Issues = "https://github.com/connectaman/LoPace/issues"
|
|
38
|
+
"PyPI" = "https://pypi.org/project/lopace/"
|
|
39
|
+
"Hugging Face Spaces" = "https://huggingface.co/spaces/codewithaman/LoPace"
|
|
@@ -796,6 +796,104 @@ def plot_scalability(df: pd.DataFrame, output_dir: Path):
|
|
|
796
796
|
print(f" Saved: scalability_analysis.svg")
|
|
797
797
|
|
|
798
798
|
|
|
799
|
+
def plot_original_vs_decompressed(output_dir: Path):
|
|
800
|
+
"""Plot original vs decompressed data comparison across multiple prompts."""
|
|
801
|
+
compressor = PromptCompressor(model="cl100k_base", zstd_level=15)
|
|
802
|
+
prompts = generate_test_prompts()
|
|
803
|
+
|
|
804
|
+
# Select a few diverse prompts for visualization
|
|
805
|
+
selected_prompts = [
|
|
806
|
+
("Small Prompt 1", prompts[0][1]),
|
|
807
|
+
("Medium Prompt 1", prompts[4][1]),
|
|
808
|
+
("Large Prompt 1", prompts[7][1]),
|
|
809
|
+
("Medium Prompt 2", prompts[5][1]),
|
|
810
|
+
("Small Prompt 2", prompts[1][1]),
|
|
811
|
+
]
|
|
812
|
+
|
|
813
|
+
# Use Hybrid method (best compression)
|
|
814
|
+
method = CompressionMethod.HYBRID
|
|
815
|
+
|
|
816
|
+
fig, axes = plt.subplots(len(selected_prompts), 1, figsize=(16, 14))
|
|
817
|
+
if len(selected_prompts) == 1:
|
|
818
|
+
axes = [axes]
|
|
819
|
+
|
|
820
|
+
fig.suptitle('Original vs Decompressed: Lossless Compression Verification',
|
|
821
|
+
fontsize=18, fontweight='bold', y=0.995)
|
|
822
|
+
|
|
823
|
+
for idx, (title, prompt) in enumerate(selected_prompts):
|
|
824
|
+
ax = axes[idx]
|
|
825
|
+
|
|
826
|
+
# Compress and decompress
|
|
827
|
+
compressed = compressor.compress(prompt, method)
|
|
828
|
+
decompressed = compressor.decompress(compressed, method)
|
|
829
|
+
|
|
830
|
+
# Verify losslessness
|
|
831
|
+
is_lossless = prompt == decompressed
|
|
832
|
+
|
|
833
|
+
# Create representation: show byte-by-byte or character-by-character
|
|
834
|
+
original_bytes = prompt.encode('utf-8')
|
|
835
|
+
decompressed_bytes = decompressed.encode('utf-8')
|
|
836
|
+
|
|
837
|
+
# Sample points for visualization (every Nth byte/char for performance)
|
|
838
|
+
sample_rate = max(1, len(original_bytes) // 200) # ~200 points max
|
|
839
|
+
sample_indices = np.arange(0, len(original_bytes), sample_rate)
|
|
840
|
+
|
|
841
|
+
# Get byte values (0-255) for visualization
|
|
842
|
+
original_byte_values = np.array([original_bytes[i] for i in sample_indices])
|
|
843
|
+
decompressed_byte_values = np.array([decompressed_bytes[i] for i in sample_indices])
|
|
844
|
+
|
|
845
|
+
# Normalize to 0-100 range for better visualization
|
|
846
|
+
original_normalized = (original_byte_values / 255.0) * 100
|
|
847
|
+
decompressed_normalized = (decompressed_byte_values / 255.0) * 100
|
|
848
|
+
|
|
849
|
+
# Plot original (blue line)
|
|
850
|
+
ax.plot(sample_indices, original_normalized, 'b-', linewidth=2.0,
|
|
851
|
+
label='Original', alpha=0.7)
|
|
852
|
+
|
|
853
|
+
# Plot decompressed (red line) - should overlap perfectly for lossless
|
|
854
|
+
ax.plot(sample_indices, decompressed_normalized, 'r-', linewidth=2.0,
|
|
855
|
+
label='Decompressed', alpha=0.7, linestyle='--')
|
|
856
|
+
|
|
857
|
+
# Mark key compression points (sample every Nth point)
|
|
858
|
+
step = max(1, len(sample_indices) // 20)
|
|
859
|
+
key_indices = sample_indices[::step]
|
|
860
|
+
key_original = original_normalized[::step]
|
|
861
|
+
ax.scatter(key_indices, key_original,
|
|
862
|
+
color='red', s=40, alpha=0.8, zorder=5,
|
|
863
|
+
label='Sample Points', marker='o', edgecolors='darkred', linewidths=1)
|
|
864
|
+
|
|
865
|
+
# Add text info
|
|
866
|
+
original_size = len(original_bytes)
|
|
867
|
+
compressed_size = len(compressed)
|
|
868
|
+
compression_ratio = original_size / compressed_size if compressed_size > 0 else 0
|
|
869
|
+
space_saved = (1 - compressed_size / original_size) * 100 if original_size > 0 else 0
|
|
870
|
+
|
|
871
|
+
info_text = (f"Size: {original_size} → {compressed_size} bytes "
|
|
872
|
+
f"({space_saved:.1f}% saved, {compression_ratio:.2f}x) | "
|
|
873
|
+
f"Lossless: {'✓' if is_lossless else '✗'}")
|
|
874
|
+
|
|
875
|
+
ax.text(0.02, 0.95, info_text, transform=ax.transAxes,
|
|
876
|
+
fontsize=10, verticalalignment='top',
|
|
877
|
+
bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.5),
|
|
878
|
+
fontweight='bold')
|
|
879
|
+
|
|
880
|
+
ax.set_ylabel(f'{title}\n(Normalized Byte Values)', fontweight='bold')
|
|
881
|
+
ax.set_xlabel('Byte Position' if idx == len(selected_prompts) - 1 else '', fontweight='bold')
|
|
882
|
+
ax.set_title(f'{title} - {len(original_bytes)} bytes', fontweight='bold', pad=10)
|
|
883
|
+
ax.grid(True, alpha=0.3, linestyle='--')
|
|
884
|
+
ax.legend(loc='upper right', framealpha=0.9, fontsize=9)
|
|
885
|
+
ax.set_ylim(-5, 105)
|
|
886
|
+
|
|
887
|
+
# Highlight that they overlap perfectly (lossless)
|
|
888
|
+
if is_lossless:
|
|
889
|
+
ax.axhspan(-5, 105, alpha=0.05, color='green', zorder=0)
|
|
890
|
+
|
|
891
|
+
plt.tight_layout(rect=[0, 0, 1, 0.99])
|
|
892
|
+
plt.savefig(output_dir / 'original_vs_decompressed.svg', format='svg', bbox_inches='tight')
|
|
893
|
+
plt.close()
|
|
894
|
+
print(f" Saved: original_vs_decompressed.svg")
|
|
895
|
+
|
|
896
|
+
|
|
799
897
|
def main():
|
|
800
898
|
"""Main function to generate all visualizations."""
|
|
801
899
|
# Create output directory
|
|
@@ -827,6 +925,7 @@ def main():
|
|
|
827
925
|
plot_memory_usage(df, output_dir)
|
|
828
926
|
plot_comprehensive_comparison(df, output_dir)
|
|
829
927
|
plot_scalability(df, output_dir)
|
|
928
|
+
plot_original_vs_decompressed(output_dir)
|
|
830
929
|
|
|
831
930
|
print("\n" + "=" * 70)
|
|
832
931
|
print("Visualization generation complete!")
|
|
@@ -10,7 +10,7 @@ with open("requirements.txt", "r", encoding="utf-8") as fh:
|
|
|
10
10
|
|
|
11
11
|
setup(
|
|
12
12
|
name="lopace",
|
|
13
|
-
version="0.1.
|
|
13
|
+
version="0.1.3",
|
|
14
14
|
author="Aman Ulla",
|
|
15
15
|
description="Lossless Optimized Prompt Accurate Compression Engine",
|
|
16
16
|
long_description=long_description,
|
|
@@ -36,5 +36,7 @@ setup(
|
|
|
36
36
|
project_urls={
|
|
37
37
|
"Bug Reports": "https://github.com/connectaman/LoPace/issues",
|
|
38
38
|
"Source": "https://github.com/connectaman/LoPace",
|
|
39
|
+
"PyPI": "https://pypi.org/project/lopace/",
|
|
40
|
+
"Hugging Face Spaces": "https://huggingface.co/spaces/codewithaman/LoPace",
|
|
39
41
|
},
|
|
40
42
|
)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|