lattifai 0.1.4__py3-none-any.whl → 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
lattifai/__init__.py CHANGED
@@ -1,3 +1,7 @@
1
+ import os
2
+ import sys
3
+ import warnings
4
+
1
5
  from .base_client import LattifAIError
2
6
  from .io import SubtitleIO
3
7
 
@@ -13,6 +17,65 @@ except Exception:
13
17
  __version__ = '0.1.0' # fallback version
14
18
 
15
19
 
20
+ # Check and auto-install k2 if not present
21
+ def _check_and_install_k2():
22
+ """Check if k2 is installed and attempt to install it if not."""
23
+ try:
24
+ import k2
25
+
26
+ return True
27
+ except ImportError:
28
+ pass
29
+
30
+ # k2 not found, try to install it
31
+ if os.environ.get('SKIP_K2_INSTALL'):
32
+ warnings.warn(
33
+ '\n' + '=' * 70 + '\n'
34
+ ' k2 is not installed and auto-installation is disabled.\n'
35
+ ' \n'
36
+ ' To use lattifai, please install k2 by running:\n'
37
+ ' \n'
38
+ ' install-k2\n'
39
+ ' \n' + '=' * 70,
40
+ RuntimeWarning,
41
+ stacklevel=2,
42
+ )
43
+ return False
44
+
45
+ print('\n' + '=' * 70)
46
+ print(' k2 is not installed. Attempting to install it now...')
47
+ print(' This is a one-time setup and may take a few minutes.')
48
+ print('=' * 70 + '\n')
49
+
50
+ try:
51
+ # Import and run the installation script
52
+ from scripts.install_k2 import install_k2_main
53
+
54
+ install_k2_main(dry_run=False)
55
+
56
+ print('\n' + '=' * 70)
57
+ print(' k2 has been installed successfully!')
58
+ print('=' * 70 + '\n')
59
+ return True
60
+ except Exception as e:
61
+ warnings.warn(
62
+ '\n' + '=' * 70 + '\n'
63
+ f' Failed to auto-install k2: {e}\n'
64
+ ' \n'
65
+ ' Please install k2 manually by running:\n'
66
+ ' \n'
67
+ ' install-k2\n'
68
+ ' \n' + '=' * 70,
69
+ RuntimeWarning,
70
+ stacklevel=2,
71
+ )
72
+ return False
73
+
74
+
75
+ # Auto-install k2 on first import
76
+ _check_and_install_k2()
77
+
78
+
16
79
  # Lazy import for LattifAI to avoid dependency issues during basic import
17
80
  def __getattr__(name):
18
81
  if name == 'LattifAI':
lattifai/client.py CHANGED
@@ -24,8 +24,9 @@ class LattifAI(SyncAPIClient):
24
24
  self,
25
25
  *,
26
26
  api_key: Optional[str] = None,
27
- base_url: Optional[str] = None,
27
+ model_name_or_path: str = 'Lattifai/Lattice-1-Alpha',
28
28
  device: str = 'cpu',
29
+ base_url: Optional[str] = None,
29
30
  timeout: Union[float, int] = 60.0,
30
31
  max_retries: int = 2,
31
32
  default_headers: Optional[Dict[str, str]] = None,
@@ -52,19 +53,16 @@ class LattifAI(SyncAPIClient):
52
53
  )
53
54
 
54
55
  # Initialize components
55
- model_name_or_path = '/Users/feiteng/GEEK/OmniCaptions/HF_models/Lattice-1-Alpha'
56
-
57
56
  if not Path(model_name_or_path).exists():
58
- from huggingface_hub import hf_hub_download
57
+ from huggingface_hub import snapshot_download
59
58
 
60
- model_path = hf_hub_download(repo_id=model_name_or_path, repo_type='model')
59
+ model_path = snapshot_download(repo_id=model_name_or_path, repo_type='model')
61
60
  else:
62
61
  model_path = model_name_or_path
63
62
 
64
63
  self.tokenizer = LatticeTokenizer.from_pretrained(
65
64
  client_wrapper=self,
66
- model_path=f'{model_path}/words.bin',
67
- g2p_model_path=f'{model_path}/g2p.bin' if Path(f'{model_path}/g2p.bin').exists() else None,
65
+ model_path=model_path,
68
66
  device=device,
69
67
  )
70
68
  self.worker = Lattice1AlphaWorker(model_path, device=device, num_threads=8)
@@ -1,7 +1,7 @@
1
1
  import re
2
2
  from typing import List, Optional, Union
3
3
 
4
- from dp.phonemizer import Phonemizer
4
+ from dp.phonemizer import Phonemizer # g2p-phonemizer
5
5
  from num2words import num2words
6
6
 
7
7
  LANGUAGE = 'omni'
@@ -33,22 +33,26 @@ class LatticeTokenizer:
33
33
  def from_pretrained(
34
34
  client_wrapper: SyncAPIClient,
35
35
  model_path: str,
36
- g2p_model_path: Optional[str] = None,
37
36
  device: str = 'cpu',
38
37
  compressed: bool = True,
39
38
  ):
40
39
  """Load tokenizer from exported binary file"""
40
+ from pathlib import Path
41
+
42
+ words_model_path = f'{model_path}/words.bin'
41
43
  if compressed:
42
- with gzip.open(model_path, 'rb') as f:
44
+ with gzip.open(words_model_path, 'rb') as f:
43
45
  data = pickle.load(f)
44
46
  else:
45
- with open(model_path, 'rb') as f:
47
+ with open(words_model_path, 'rb') as f:
46
48
  data = pickle.load(f)
47
49
 
48
50
  tokenizer = LatticeTokenizer(client_wrapper=client_wrapper)
49
51
  tokenizer.words = data['words']
50
52
  tokenizer.dictionaries = defaultdict(list, data['dictionaries'])
51
53
  tokenizer.oov_word = data['oov_word']
54
+
55
+ g2p_model_path = f'{model_path}/g2p.bin' if Path(f'{model_path}/g2p.bin').exists() else None
52
56
  if g2p_model_path:
53
57
  tokenizer.g2p_model = G2Phonemizer(g2p_model_path, device=device)
54
58
  return tokenizer
@@ -5,8 +5,9 @@ from typing import Any, BinaryIO, Dict, Tuple, Union
5
5
 
6
6
  import numpy as np
7
7
  import onnxruntime as ort
8
+ import resampy
9
+ import soundfile as sf
8
10
  import torch
9
- import torchaudio
10
11
  from lhotse import FbankConfig
11
12
  from lhotse.features.kaldi.layers import Wav2LogFilterBank
12
13
  from lhotse.utils import Pathlike
@@ -58,11 +59,12 @@ class Lattice1AlphaWorker:
58
59
 
59
60
  def load_audio(self, audio: Union[Pathlike, BinaryIO]) -> Tuple[torch.Tensor, int]:
60
61
  # load audio
61
- waveform, sample_rate = torchaudio.load(audio, channels_first=True)
62
- if waveform.size(0) > 1: # TODO: support choose channel
63
- waveform = torch.mean(waveform, dim=0, keepdim=True)
62
+ waveform, sample_rate = sf.read(audio, always_2d=True, dtype='float32')
63
+ if waveform.shape[1] > 1: # TODO: support choose channel
64
+ waveform = np.mean(waveform, axis=1, keepdims=True)
64
65
  if sample_rate != self.config['sample_rate']:
65
- waveform = torchaudio.functional.resample(waveform, sample_rate, self.config['sample_rate'])
66
+ waveform = resampy.resample(waveform, sample_rate, self.config['sample_rate'], axis=0)
67
+ waveform = torch.from_numpy(waveform.T).to(self.device) # (1, L)
66
68
  return waveform
67
69
 
68
70
  def alignment(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lattifai
3
- Version: 0.1.4
3
+ Version: 0.1.5
4
4
  Summary: Lattifai Python SDK: Seamless Integration with Lattifai's Speech and Video AI Services
5
5
  Author-email: Lattifai Technologies <tech@lattifai.com>
6
6
  Maintainer-email: Lattice <tech@lattifai.com>
@@ -26,10 +26,10 @@ License: MIT License
26
26
  OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27
27
  SOFTWARE.
28
28
  Project-URL: Homepage, https://github.com/lattifai/lattifai-python
29
- Project-URL: Documentation, https://github.com/lattifai/lattifai-python/api.md
29
+ Project-URL: Documentation, https://github.com/lattifai/lattifai-python/README.md
30
30
  Project-URL: Bug Tracker, https://github.com/lattifai/lattifai-python/issues
31
31
  Project-URL: Discussions, https://github.com/lattifai/lattifai-python/discussions
32
- Project-URL: Changelog, https://github.com/lattifai/lattifai-python/CHANGELOG
32
+ Project-URL: Changelog, https://github.com/lattifai/lattifai-python/CHANGELOG.md
33
33
  Keywords: lattifai,speech recognition,video analysis,ai,sdk,api client
34
34
  Classifier: Development Status :: 5 - Production/Stable
35
35
  Classifier: Intended Audience :: Developers
@@ -54,6 +54,12 @@ Requires-Dist: python-dotenv
54
54
  Requires-Dist: lhotse>=1.26.0
55
55
  Requires-Dist: colorful>=0.5.6
56
56
  Requires-Dist: lattifai-core>=0.1.4
57
+ Requires-Dist: pysubs2
58
+ Requires-Dist: praatio
59
+ Requires-Dist: tgt
60
+ Requires-Dist: onnxruntime
61
+ Requires-Dist: resampy
62
+ Requires-Dist: g2p-phonemizer>=0.1.1
57
63
  Provides-Extra: numpy
58
64
  Requires-Dist: numpy; extra == "numpy"
59
65
  Provides-Extra: test
@@ -70,24 +76,51 @@ Dynamic: license-file
70
76
 
71
77
  # LattifAI Python
72
78
 
73
- > ⚠️ **Under Active Development** - Official release scheduled for October 18, 2025
79
+ <!-- <p align="center">
80
+ <a href="https://badge.fury.io/py/lattifai"><img src="https://badge.fury.io/py/lattifai.svg" alt="PyPI version"></a>
81
+ </p> -->
74
82
 
75
83
  [![PyPI version](https://badge.fury.io/py/lattifai.svg)](https://badge.fury.io/py/lattifai)
76
84
 
77
- The official Python library for the LattifAI API - Advanced forced alignment and subtitle generation powered by `Lattice-1-Alpha` model.
85
+ <p align="center">
86
+ &nbsp&nbsp 🖥️ <a href="https://github.com/lattifai/lattifai-python">GitHub</a> &nbsp&nbsp | &nbsp&nbsp🤗 <a href="https://huggingface.co/Lattifai/Lattice-1-Alpha">Lattifai/Lattice-1-Alpha</a>&nbsp&nbsp | &nbsp&nbsp 📑 <a href="https://lattifai.com/blogs">Blog</a> &nbsp&nbsp | &nbsp&nbsp <a href="https://discord.gg/gTZqdaBJ"><img src="https://img.shields.io/badge/Discord-Join-5865F2?logo=discord&logoColor=white" alt="Discord" style="vertical-align: middle;"></a>&nbsp&nbsp
87
+ </p>
88
+
89
+ The official Python library for the LattifAI API - Advanced forced alignment and subtitle generation powered by `[Lattifai/Lattice-1-Alpha](https://huggingface.co/Lattifai/Lattice-1-Alpha)` model.
78
90
 
79
91
  ## Installation
80
92
 
81
93
  ```bash
82
94
  pip install lattifai
95
+
96
+ install-k2 # Required: This step must be executed to install k2 dependencies
83
97
  ```
84
98
 
99
+ > **⚠️ Important**: After installing `lattifai`, you **must** run `install-k2` to install the required k2 library. The library will not function properly without this step.
100
+
85
101
  ## GPU Support Status
86
102
 
87
- > **🚧 GPU Support Coming Soon**: CUDA/GPU acceleration is currently under active development. The current version supports CPU-only processing. GPU support will be available in an upcoming release, providing significant performance improvements for large audio files.
103
+ > **🚧 GPU Support Coming Soon**: The current version supports CPU-only processing. NVIDIA GPU and Apple Silicon acceleration will be available in an upcoming release, providing significant performance improvements for large audio files.
88
104
 
89
105
  ## Quick Start
90
106
 
107
+ ### Command Line Interface
108
+
109
+ The library provides powerful command-line tools for batch processing and automation.
110
+
111
+ #### Using the Main CLI
112
+
113
+ ```bash
114
+ # Show available commands
115
+ lattifai --help
116
+
117
+ # Perform alignment (same as lattifai-align)
118
+ lattifai align input_audio.wav input_subtitle.srt output.srt
119
+
120
+ # Subtitle format conversion
121
+ lattifai subtitle convert input.srt output.vtt
122
+ ```
123
+
91
124
  ### Python API
92
125
 
93
126
  ```python
@@ -97,6 +130,7 @@ from lattifai import LattifAI
97
130
  # Initialize client
98
131
  client = LattifAI(
99
132
  api_key=os.environ.get("LATTIFAI_API_KEY"), # Optional if set in environment
133
+ model_name_or_path='Lattifai/Lattice-1-Alpha',
100
134
  device='cpu', # Currently only CPU is supported, GPU support coming soon
101
135
  )
102
136
 
@@ -111,35 +145,6 @@ result = client.alignment(
111
145
  print(f"Alignment complete! Output saved to: {result}")
112
146
  ```
113
147
 
114
- ### Command Line Interface
115
-
116
- The library provides powerful command-line tools for batch processing and automation.
117
-
118
- #### Basic Alignment
119
-
120
- ```bash
121
- # Align audio with subtitle file
122
- lattifai-align input_audio.wav input_subtitle.srt output_aligned.srt
123
-
124
- # Specify input format explicitly
125
- lattifai-align -F srt input_audio.wav transcript.txt output.srt
126
-
127
- # Auto-detect input format (default)
128
- lattifai-align input_audio.wav subtitle_file.vtt output.vtt
129
- ```
130
-
131
- #### Using the Main CLI
132
-
133
- ```bash
134
- # Show available commands
135
- lattifai --help
136
-
137
- # Perform alignment (same as lattifai-align)
138
- lattifai align input_audio.wav input_subtitle.srt output.srt
139
-
140
- # Subtitle format conversion
141
- lattifai subtitle convert input.srt output.vtt
142
- ```
143
148
 
144
149
  #### Supported Input Formats
145
150
 
@@ -163,12 +168,11 @@ lattifai subtitle convert input.srt output.vtt
163
168
  ```python
164
169
  LattifAI(
165
170
  api_key: Optional[str] = None, # API key (or set LATTIFAI_API_KEY env var)
166
- base_url: Optional[str] = None, # API base URL (or set LATTIFAI_BASE_URL env var)
167
171
  device: str = 'cpu', # Device for processing (currently only 'cpu' supported)
168
172
  )
169
173
  ```
170
174
 
171
- > **Note**: The `device` parameter currently only supports `'cpu'`. GPU support (`'cuda'`) is under active development and will be available in future releases.
175
+ > **Note**: The `device` parameter currently only supports `'cpu'`. GPU support will be available in future releases.
172
176
 
173
177
  #### Methods
174
178
 
@@ -194,41 +198,11 @@ def alignment(
194
198
  **Returns:**
195
199
  - Path to output file (if `output_subtitle_path` specified) or alignment results
196
200
 
197
- **Example:**
198
- ```python
199
- # Basic usage
200
- result = client.alignment("audio.wav", "subtitle.srt")
201
-
202
- # With explicit format and output path
203
- result = client.alignment(
204
- audio="interview.mp3",
205
- subtitle="transcript.txt",
206
- format="txt",
207
- output_subtitle_path="aligned_interview.srt"
208
- )
209
- ```
210
-
211
-
212
201
  ## Configuration
213
202
 
214
203
  ### Environment Variables
215
204
 
216
205
  - `LATTIFAI_API_KEY`: Your LattifAI API key (required)
217
- - `LATTIFAI_BASE_URL`: Base URL for the API (default: `https://api.lattifai.com/v1`)
218
-
219
- ### Device Configuration
220
-
221
- > **Note**: GPU/CUDA support is currently under development and will be available in a future release. Currently, only CPU processing is supported.
222
-
223
- The library is designed to support both CPU and GPU processing:
224
-
225
- ```python
226
- # CPU processing (currently supported)
227
- client = LattifAI(device='cpu')
228
-
229
- # GPU processing (coming soon)
230
- # client = LattifAI(device='cuda') # Will be available in future releases
231
- ```
232
206
 
233
207
  ### Performance Tuning
234
208
 
@@ -236,11 +210,11 @@ For better performance with large files:
236
210
 
237
211
  ```python
238
212
  client = LattifAI(
239
- device='cpu', # Currently only CPU is supported
213
+ device='cpu', # Currently only CPU is supported, cuda/mps will be supported in upcoming releases.
240
214
  )
241
215
  ```
242
216
 
243
- > **GPU Acceleration**: CUDA support is in active development and will significantly improve processing speed for large audio files. Expected in upcoming releases.
217
+ > **GPU Acceleration**: CUDA and Apple Silicon(MPS) support is in active development and will significantly improve processing speed for large audio files. Expected in upcoming releases.
244
218
 
245
219
  ## Examples
246
220
 
@@ -340,7 +314,7 @@ except Exception as e:
340
314
 
341
315
  ## Model Information
342
316
 
343
- This library uses the **Lattice-1-Alpha** model for high-quality forced alignment and subtitle generation.
317
+ This library uses the **[Lattice-1-Alpha](https://huggingface.co/Lattifai/Lattice-1-Alpha)** model for high-quality forced alignment and subtitle generation.
344
318
 
345
319
  ### Model Features
346
320
  - **High Accuracy**: State-of-the-art alignment precision
@@ -360,7 +334,7 @@ This library uses the **Lattice-1-Alpha** model for high-quality forced alignmen
360
334
  - **Core Dependencies**:
361
335
  - httpx (HTTP client)
362
336
  - lhotse (audio processing)
363
- - colorful (colored output)
337
+ - k2 (audio computation)
364
338
  - python-dotenv (environment management)
365
339
  - click (command line interface)
366
340
 
@@ -397,21 +371,24 @@ To manually run these tools:
397
371
 
398
372
  ```bash
399
373
  # Sort imports
400
- isort src/ tests/
374
+ isort src/ tests/ scripts/
401
375
 
402
376
  # Run linter
403
- ruff check src/ tests/
377
+ ruff check src/ tests/ scripts/
404
378
 
405
379
  # Run formatter
406
- ruff format src/ tests/
380
+ ruff format src/ tests/ scripts/
407
381
 
408
382
  # Fix issues automatically
409
- ruff check --fix src/ tests/
383
+ ruff check --fix src/ tests/ scripts/
410
384
  ```
411
385
 
412
386
  ### Running Tests
413
387
 
414
388
  ```bash
389
+ # Install test dependencies first
390
+ pip install -e ".[test]"
391
+
415
392
  # Run all tests
416
393
  pytest
417
394
 
@@ -461,7 +438,7 @@ This project is licensed under the Apache License 2.0 - see the [LICENSE](LICENS
461
438
 
462
439
  ## Support
463
440
 
464
- - **Documentation**: [API Documentation](https://github.com/lattifai/lattifai-python/api.md)
441
+ - **Documentation**: [API Documentation](https://github.com/lattifai/lattifai-python/README.md)
465
442
  - **Issues**: [GitHub Issues](https://github.com/lattifai/lattifai-python/issues)
466
443
  - **Discussions**: [GitHub Discussions](https://github.com/lattifai/lattifai-python/discussions)
467
- - **Changelog**: [CHANGELOG.md](https://github.com/lattifai/lattifai-python/CHANGELOG)
444
+ - **Changelog**: [CHANGELOG.md](https://github.com/lattifai/lattifai-python/CHANGELOG.md)
@@ -0,0 +1,24 @@
1
+ lattifai/__init__.py,sha256=wPE3D03AK8Ktu2WFXqJ6rx6jvPVWKstFQgyPq0tndUU,2324
2
+ lattifai/base_client.py,sha256=ktFtATjL9pLSJUD-VqeJKA1FHkrsGHX7Uq_x00H7gO8,3322
3
+ lattifai/client.py,sha256=p9mFkyOINKBGB87TE6P4y6Z3fPOPSqi6HAyz00TMuag,4520
4
+ lattifai/bin/__init__.py,sha256=7YhmtEM8kbxJtz2-KIskvpLKBZAvkMSceVx8z4fkgQ4,61
5
+ lattifai/bin/align.py,sha256=uX8VaATzn8CgdHUtry1ZGhXiz0Jr89ELdfRK6GWC1t8,989
6
+ lattifai/bin/cli_base.py,sha256=4xlN4cnJZh54ErhHUHgJpyVsrcB-ftwniFzRQL_7SlU,289
7
+ lattifai/bin/subtitle.py,sha256=bUWImAHpvyY59Vskqb5loQiD5ytQOxR8lTQRiQ4LyNA,647
8
+ lattifai/io/__init__.py,sha256=vHWRN7MvAch-GUeFqqO-gM57SM-4YOpGUjIxFJdjfPA,671
9
+ lattifai/io/reader.py,sha256=ErPnPMUvYQpjZ7Vd86EsHUkOcEfKdoI8iM3yKHRzSOQ,2576
10
+ lattifai/io/supervision.py,sha256=5UfSsgBhXoDU3-6drDtoD7y8HIiA4xRKZnbOKgeejwM,354
11
+ lattifai/io/writer.py,sha256=1eAEFLlL8kricxRDPFBtVmeC4IiFyFnjbWXvw0VU-q4,2036
12
+ lattifai/tokenizers/__init__.py,sha256=aqv44PDtq6g3oFFKW_l4HSR5ywT5W8eP1dHHywIvBfs,72
13
+ lattifai/tokenizers/phonemizer.py,sha256=SfRi1KIMpmaao6OVmR1h_I_3QU-vrE6D5bh72Afg5XM,1759
14
+ lattifai/tokenizers/tokenizer.py,sha256=u4lgS6-jN9cLuMNIojA4Swfsqb1EcyI7Bh_iw7tuL-s,5818
15
+ lattifai/workers/__init__.py,sha256=s6YfkIq4FDIAzY9sPjRpXnJfszj2repqnMTqydRM5Zw,83
16
+ lattifai/workers/lattice1_alpha.py,sha256=5OJ6APnFpWvi1azKlkbJqF85e2n5JyZ_m8L1XQ2r3qg,4862
17
+ lattifai-0.1.5.dist-info/licenses/LICENSE,sha256=LNuoH5jpXXNKgjQ3XLwztFq8D3O7kZI-LSg81o4ym2M,1065
18
+ scripts/__init__.py,sha256=4nwVNeJeEuGiWGBACgySViLlm5DrqcvFOWpo7Ds8MUA,49
19
+ scripts/install_k2.py,sha256=iGdy_VZrwaBJfdKy5EDvPX0x5V4dH_lm3f8DEiVWcOQ,21166
20
+ lattifai-0.1.5.dist-info/METADATA,sha256=LsJCRZPTmnuF279cKjO6jtK1ZzG8JrL0sEnOfjvoNxk,13644
21
+ lattifai-0.1.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
22
+ lattifai-0.1.5.dist-info/entry_points.txt,sha256=CwTI2NbJvF9msIHboAfTA99cmDr_HOWoODjS8R64JOw,131
23
+ lattifai-0.1.5.dist-info/top_level.txt,sha256=-OVWZ68YYFcTN13ARkLasp2OUappe9wEVq-CKes7jM4,17
24
+ lattifai-0.1.5.dist-info/RECORD,,
@@ -1,3 +1,4 @@
1
1
  [console_scripts]
2
+ install-k2 = scripts.install_k2:install_k2
2
3
  lattifai = lattifai.bin:cli
3
4
  lattifai-align = lattifai.bin.align:align
scripts/__init__.py ADDED
@@ -0,0 +1 @@
1
+ """Scripts for lattifai package installation."""
scripts/install_k2.py ADDED
@@ -0,0 +1,520 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+
4
+ """
5
+ Auto-install the latest k2 wheel that matches the current machine.
6
+ - Prints in English.
7
+ - Sources:
8
+ Linux CUDA wheels: https://k2-fsa.github.io/k2/installation/pre-compiled-cuda-wheels-linux/index.html
9
+ macOS CPU wheels: https://k2-fsa.github.io/k2/installation/pre-compiled-cpu-wheels-macos/index.html
10
+ Windows CPU wheels: https://k2-fsa.github.io/k2/installation/pre-compiled-cpu-wheels-windows/index.html
11
+
12
+ Usage:
13
+ python install_k2_auto.py # install immediately
14
+ python install_k2_auto.py --dry-run # only show what would be installed
15
+ """
16
+
17
+ import argparse
18
+ import os
19
+ import platform
20
+ import re
21
+ import subprocess
22
+ import sys
23
+ import urllib.request
24
+ from html.parser import HTMLParser
25
+ from typing import List, Optional, Tuple
26
+
27
+ os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
28
+ os.environ['OMP_NUM_THREADS'] = '4'
29
+
30
+ CUDA_LINUX_URL = 'https://k2-fsa.github.io/k2/installation/pre-compiled-cuda-wheels-linux/index.html'
31
+ MAC_CPU_URL = 'https://k2-fsa.github.io/k2/installation/pre-compiled-cpu-wheels-macos/index.html'
32
+ WIN_CPU_URL = 'https://k2-fsa.github.io/k2/installation/pre-compiled-cpu-wheels-windows/index.html'
33
+
34
+
35
+ class WheelLinkParser(HTMLParser):
36
+ def __init__(self, parse_mode='wheels'):
37
+ super().__init__()
38
+ self.links: List[str] = []
39
+ self.parse_mode = parse_mode # 'wheels' or 'versions'
40
+
41
+ def handle_starttag(self, tag, attrs):
42
+ if tag.lower() == 'a':
43
+ href = dict(attrs).get('href')
44
+ if href:
45
+ if self.parse_mode == 'wheels' and href.endswith('.whl'):
46
+ self.links.append(href)
47
+ elif self.parse_mode == 'versions' and re.match(r'^\d+\.\d+\.\d+\.html$', href):
48
+ # Match version links like "2.8.0.html"
49
+ self.links.append(href)
50
+
51
+
52
+ def fetch_wheel_links(
53
+ page_url: str, target_torch_version: Optional[str] = None, cuda_version: Optional[str] = None
54
+ ) -> List[str]:
55
+ """
56
+ Fetch wheel links from k2 pages. The structure is:
57
+ - Index page contains links to version-specific pages (e.g., 2.8.0.html)
58
+ - Version pages contain actual .whl file links
59
+
60
+ Args:
61
+ page_url: The base URL to fetch wheels from
62
+ target_torch_version: If specified, only fetch wheels for this torch version (e.g., "2.8.0")
63
+ cuda_version: If specified, prefer wheels with this CUDA version (e.g., "12.1")
64
+ """
65
+ with urllib.request.urlopen(page_url) as resp:
66
+ html = resp.read().decode('utf-8', errors='ignore')
67
+
68
+ # First, try to find version page links
69
+ version_parser = WheelLinkParser(parse_mode='versions')
70
+ version_parser.feed(html)
71
+
72
+ if version_parser.links:
73
+ # If we found version links, this is an index page
74
+ # Filter version links if target_torch_version is specified
75
+ version_links_to_process = version_parser.links
76
+ if target_torch_version:
77
+ target_filename = f'{target_torch_version}.html'
78
+ version_links_to_process = [link for link in version_parser.links if link == target_filename]
79
+ if not version_links_to_process:
80
+ print(f'[WARN] No page found for torch version {target_torch_version}')
81
+ return []
82
+ print(f'[INFO] Found torch version {target_torch_version}, fetching wheels from {target_filename}')
83
+ else:
84
+ # If no target version specified, choose the highest version (latest torch version)
85
+ def parse_version_from_link(link: str) -> Tuple[int, int, int]:
86
+ # Extract version from "2.8.0.html" -> (2, 8, 0)
87
+ match = re.match(r'^(\d+)\.(\d+)\.(\d+)\.html$', link)
88
+ if match:
89
+ return (int(match.group(1)), int(match.group(2)), int(match.group(3)))
90
+ return (0, 0, 0)
91
+
92
+ # Sort by version and take the highest
93
+ sorted_versions = sorted(version_parser.links, key=parse_version_from_link, reverse=True)
94
+ if sorted_versions:
95
+ latest_version = sorted_versions[0]
96
+ version_links_to_process = [latest_version]
97
+ version_str = latest_version.replace('.html', '')
98
+ print(f'[INFO] No target torch version specified, using latest version: {version_str}')
99
+
100
+ # Fetch wheel links from version pages
101
+ all_wheel_links = []
102
+ base_url = page_url.rsplit('/', 1)[0]
103
+ py_tag, abi_tag = py_tags()
104
+
105
+ for version_link in version_links_to_process:
106
+ version_url = f'{base_url}/{version_link}'
107
+ try:
108
+ with urllib.request.urlopen(version_url) as resp:
109
+ version_html = resp.read().decode('utf-8', errors='ignore')
110
+ wheel_parser = WheelLinkParser(parse_mode='wheels')
111
+ wheel_parser.feed(version_html)
112
+
113
+ # If target version specified or using latest version, find matching wheels
114
+ if target_torch_version or len(version_links_to_process) == 1:
115
+ matching_wheels = []
116
+ for wheel_link in wheel_parser.links:
117
+ if py_tag in wheel_link and abi_tag in wheel_link:
118
+ matching_wheels.append(wheel_link)
119
+
120
+ if cuda_version and matching_wheels:
121
+ # First try to find wheels with the specified CUDA version
122
+ cuda_specific_wheels = []
123
+ for wheel_link in matching_wheels:
124
+ wheel_cuda = parse_cuda_from_filename(wheel_link)
125
+ if wheel_cuda and wheel_cuda == cuda_version:
126
+ cuda_specific_wheels.append(wheel_link)
127
+
128
+ if cuda_specific_wheels:
129
+ # Found wheels with specified CUDA version, pick the latest one by dev date
130
+ def sort_by_devdate(wheel: str) -> int:
131
+ return parse_devdate(wheel) or 0
132
+
133
+ best_wheel = max(cuda_specific_wheels, key=sort_by_devdate)
134
+ print(
135
+ f'[INFO] Found matching wheel for Python {py_tag} and CUDA {cuda_version}: {best_wheel}'
136
+ )
137
+ return [best_wheel] if best_wheel.startswith('http') else [best_wheel]
138
+ else:
139
+ print(f'[WARN] No wheel found for CUDA {cuda_version}, falling back to latest version')
140
+
141
+ # If no CUDA version specified or no matching CUDA wheels found, use the latest wheel
142
+ if matching_wheels:
143
+
144
+ def sort_by_devdate(wheel: str) -> int:
145
+ return parse_devdate(wheel) or 0
146
+
147
+ best_wheel = max(matching_wheels, key=sort_by_devdate)
148
+ cuda_info = (
149
+ f' (CUDA {parse_cuda_from_filename(best_wheel)})'
150
+ if parse_cuda_from_filename(best_wheel)
151
+ else ''
152
+ )
153
+ print(f'[INFO] Found matching wheel for Python {py_tag}{cuda_info}: {best_wheel}')
154
+ return [best_wheel]
155
+
156
+ version_str = version_link.replace('.html', '')
157
+ print(f'[WARN] No wheel found for Python {py_tag} in torch {version_str}')
158
+ else:
159
+ all_wheel_links.extend(wheel_parser.links)
160
+ except Exception as e:
161
+ print(f'[WARN] Failed to fetch {version_url}: {e}')
162
+ continue
163
+
164
+ # If target version specified or latest version but no matching wheel found
165
+ if target_torch_version or len(version_links_to_process) == 1:
166
+ return []
167
+
168
+ # Normalize to absolute URLs for all wheels case
169
+ abs_links = []
170
+ for href in all_wheel_links:
171
+ if href.startswith('http://') or href.startswith('https://'):
172
+ abs_links.append(href)
173
+ else:
174
+ # For huggingface links, they are already absolute in the href
175
+ abs_links.append(href)
176
+ return abs_links
177
+
178
+ else:
179
+ raise ValueError('No version links found on the page; unexpected page structure.')
180
+
181
+
182
+ def py_tags() -> Tuple[str, str]:
183
+ """Return (py_tag, abi_tag), e.g. ('cp310', 'cp310') for CPython."""
184
+ impl = platform.python_implementation().lower()
185
+ if impl != 'cpython':
186
+ # Wheels are for CPython; still try cpXY
187
+ pass
188
+ major, minor = sys.version_info.major, sys.version_info.minor
189
+ tag = f'cp{major}{minor}'
190
+ return tag, tag
191
+
192
+
193
+ def detect_torch_version() -> Optional[str]:
194
+ """
195
+ Detect installed PyTorch version string like '2.8.0'.
196
+ Returns None if PyTorch is not installed.
197
+ """
198
+ try:
199
+ import importlib
200
+
201
+ torch = importlib.import_module('torch')
202
+ version = getattr(torch, '__version__', None)
203
+ if version:
204
+ # Extract major.minor.patch from version string (remove +cu118 etc suffixes)
205
+ version_match = re.match(r'(\d+\.\d+\.\d+)', str(version))
206
+ if version_match:
207
+ return version_match.group(1)
208
+ except Exception:
209
+ pass
210
+ return None
211
+
212
+
213
+ def detect_cuda_version_linux() -> Optional[str]:
214
+ """
215
+ Detect CUDA version string like '12.1'.
216
+ Priority: torch.version.cuda -> nvidia-smi -> None
217
+ """
218
+ # Try PyTorch if installed
219
+ try:
220
+ import importlib
221
+
222
+ torch = importlib.import_module('torch')
223
+ v = getattr(getattr(torch, 'version', None), 'cuda', None)
224
+ if v:
225
+ return str(v)
226
+ except Exception:
227
+ pass
228
+
229
+ # # Try nvidia-smi
230
+ # try:
231
+ # out = subprocess.check_output(["nvidia-smi"], stderr=subprocess.STDOUT, text=True)
232
+ # m = re.search(r"CUDA Version:\s*([\d.]+)", out)
233
+ # if m:
234
+ # return m.group(1)
235
+ # except Exception:
236
+ # pass
237
+
238
+ return None
239
+
240
+
241
+ def parse_cuda_from_filename(name: str) -> Optional[str]:
242
+ # e.g., ...+cuda12.1-..., ...+cuda11.8-...
243
+ m = re.search(r'cuda(\d+(?:\.\d+)?)', name)
244
+ return m.group(1) if m else None
245
+
246
+
247
+ def parse_devdate(name: str) -> Optional[int]:
248
+ # e.g., dev20240606
249
+ m = re.search(r'dev(\d{8})', name)
250
+ return int(m.group(1)) if m else None
251
+
252
+
253
+ def parse_version_tuple(name: str) -> Tuple[int, ...]:
254
+ # k2-<version>... take first contiguous version-like sequence
255
+ m = re.search(r'k2-([\d]+(?:\.[\d]+)*)', name)
256
+ if not m:
257
+ return tuple()
258
+ return tuple(int(p) for p in m.group(1).split('.'))
259
+
260
+
261
+ def best_match_cuda(candidates: List[str], installed_cuda: Optional[str]) -> List[str]:
262
+ """
263
+ Keep only CUDA wheels; if installed_cuda present, prefer same major.minor,
264
+ else fallback to highest CUDA version available.
265
+ """
266
+ cuda_wheels = [w for w in candidates if 'cuda' in w.lower()]
267
+ if not cuda_wheels:
268
+ return []
269
+
270
+ if installed_cuda:
271
+ # Normalize like '12.1' -> (12,1)
272
+ def to_tuple(v: str) -> Tuple[int, int]:
273
+ parts = v.split('.')
274
+ major = int(parts[0])
275
+ minor = int(parts[1]) if len(parts) > 1 else 0
276
+ return (major, minor)
277
+
278
+ target = to_tuple(installed_cuda)
279
+
280
+ # Score by distance in (major, minor); prefer exact or closest lower/higher
281
+ def score(w: str) -> Tuple[int, int, int]:
282
+ wc = parse_cuda_from_filename(w) or '0'
283
+ wt = to_tuple(wc)
284
+ # absolute distance
285
+ dist = (abs(wt[0] - target[0]) * 100) + abs(wt[1] - target[1])
286
+ # Prefer same major, then higher minor not exceeding target, etc.
287
+ bias = 0 if wt[0] == target[0] else 1
288
+ # Negative if <= target to prefer not exceeding
289
+ not_exceed = 0 if (wt <= target) else 1
290
+ return (dist, bias, not_exceed)
291
+
292
+ cuda_wheels.sort(key=score)
293
+ # Keep top-N that share the best CUDA version string (for later date/version sorting)
294
+ best_cuda = parse_cuda_from_filename(cuda_wheels[0])
295
+ cuda_wheels = [w for w in cuda_wheels if parse_cuda_from_filename(w) == best_cuda]
296
+ return cuda_wheels
297
+
298
+ # No installed CUDA detected: pick the highest CUDA in page (by version tuple)
299
+ def cudatuple(w: str) -> Tuple[int, int]:
300
+ c = parse_cuda_from_filename(w) or '0'
301
+ parts = c.split('.')
302
+ major = int(parts[0])
303
+ minor = int(parts[1]) if len(parts) > 1 else 0
304
+ return (major, minor)
305
+
306
+ cuda_wheels.sort(key=cudatuple, reverse=True)
307
+ top = parse_cuda_from_filename(cuda_wheels[0])
308
+ return [w for w in cuda_wheels if parse_cuda_from_filename(w) == top]
309
+
310
+
311
+ def platform_tag_filters() -> List[str]:
312
+ system = platform.system().lower()
313
+ machine = platform.machine().lower()
314
+
315
+ if system == 'linux':
316
+ # Manylinux tags typically include 'linux_x86_64' or 'manylinux...' but
317
+ # the page often lists 'linux_x86_64'. We'll match the common substrings.
318
+ if 'aarch64' in machine or 'arm64' in machine:
319
+ return ['linux_aarch64', 'manylinux_aarch64']
320
+ return ['linux_x86_64', 'manylinux_x86_64']
321
+
322
+ if system == 'darwin':
323
+ if 'arm64' in machine or 'aarch64' in machine:
324
+ return ['macosx_11_0_arm64', 'macosx_12_0_arm64', 'macosx_13_0_arm64', 'macosx_14_0_arm64']
325
+ # Intel macs
326
+ return [
327
+ 'macosx_10_9_x86_64',
328
+ 'macosx_11_0_x86_64',
329
+ 'macosx_12_0_x86_64',
330
+ 'macosx_13_0_x86_64',
331
+ 'macosx_14_0_x86_64',
332
+ ]
333
+
334
+ if system == 'windows':
335
+ if 'arm64' in machine:
336
+ # If k2 provides win_arm64 wheels in future, this will catch them.
337
+ return ['win_arm64']
338
+ return ['win_amd64']
339
+
340
+ return []
341
+
342
+
343
+ def choose_best_wheel(links: List[str], require_cuda: bool) -> Optional[str]:
344
+ py_tag, abi_tag = py_tags()
345
+ plat_filters = platform_tag_filters()
346
+
347
+ def match_basic(name: str) -> bool:
348
+ # python tag & abi tag must appear
349
+ if py_tag not in name or abi_tag not in name:
350
+ return False
351
+ # platform tag must match one of known substrings
352
+ if not any(tag in name for tag in plat_filters):
353
+ return False
354
+ return True
355
+
356
+ candidates = [u for u in links if match_basic(u)]
357
+ if not candidates:
358
+ return None
359
+
360
+ if require_cuda:
361
+ candidates = best_match_cuda(candidates, detect_cuda_version_linux())
362
+ if not candidates:
363
+ return None
364
+ else:
365
+ # For CPU, try to exclude CUDA wheels explicitly
366
+ candidates = [u for u in candidates if 'cuda' not in u.lower()]
367
+
368
+ # Now sort by (dev date desc, version desc, URL lex desc as tie-breaker)
369
+ def sort_key(u: str):
370
+ date = parse_devdate(u) or 0
371
+ ver = parse_version_tuple(u)
372
+ return (date, ver, u)
373
+
374
+ candidates.sort(key=sort_key, reverse=True)
375
+ return candidates[0] if candidates else None
376
+
377
+
378
+ def run_pip_install(wheel_url: str, dry_run: bool):
379
+ cmd = [sys.executable, '-m', 'pip', 'install', '--upgrade', '--no-cache-dir', wheel_url]
380
+ print('[INFO] Pip command:', ' '.join(cmd))
381
+ if dry_run:
382
+ print('[DRY-RUN] Skipping actual installation.')
383
+ return
384
+ try:
385
+ subprocess.check_call(cmd)
386
+ print('[SUCCESS] k2 has been installed successfully.')
387
+ except subprocess.CalledProcessError as e:
388
+ print('[ERROR] pip install failed with exit code:', e.returncode)
389
+ sys.exit(e.returncode)
390
+
391
+
392
+ def install_k2_main(dry_run: bool = False):
393
+ """Main function to install k2 without argparse, suitable for programmatic use."""
394
+ system = platform.system().lower()
395
+ print(f'[INFO] Detected OS: {system}')
396
+ print(f'[INFO] Python: {platform.python_version()} | Impl: {platform.python_implementation()}')
397
+
398
+ # Check if torch is already installed
399
+ torch_version = detect_torch_version()
400
+ if torch_version:
401
+ print(f'[INFO] Detected PyTorch version: {torch_version}')
402
+ else:
403
+ print('[INFO] PyTorch not detected, will search all available versions')
404
+
405
+ if system == 'linux':
406
+ print('[INFO] Target: Linux (CUDA wheels)')
407
+ cuda_version = detect_cuda_version_linux()
408
+ if not cuda_version:
409
+ print('[WARN] No CUDA detected on Linux.')
410
+ # print("[HINT] Install CUDA or build from source if CPU-only is required.")
411
+ # print("")
412
+ # print("To build k2 from source, you can run the following commands:")
413
+ # print(" git clone https://github.com/k2-fsa/k2.git")
414
+ # print(" cd k2")
415
+ # print(' export K2_MAKE_ARGS="-j6"')
416
+ # print(" python3 setup.py install")
417
+ # print("")
418
+ # response = input("Do you want to continue with source installation? (y/N): ").strip().lower()
419
+ # if response in ["y", "yes"]:
420
+ # print("[INFO] Please run the commands above manually to install k2 from source.")
421
+ # sys.exit(2)
422
+ print(f'[INFO] Detected CUDA version: {cuda_version}')
423
+
424
+ wheel = None
425
+ for _torch_version in [torch_version, None] if torch_version else [None]:
426
+ for _cuda_version in [cuda_version, None] if cuda_version else [None]:
427
+ links = fetch_wheel_links(CUDA_LINUX_URL, _torch_version, cuda_version=_cuda_version)
428
+ if _torch_version and links:
429
+ # If we have torch version and found matching wheel, use it directly
430
+ wheel = links[0]
431
+ else:
432
+ # Fallback to traditional selection
433
+ if not links:
434
+ links = fetch_wheel_links(CUDA_LINUX_URL)
435
+ wheel = choose_best_wheel(links, require_cuda=_cuda_version is not None)
436
+
437
+ if not _torch_version and links and not wheel:
438
+ wheel = links[0] # Pick first available as last resort
439
+
440
+ if not wheel:
441
+ if _cuda_version:
442
+ print(
443
+ f'[WARN] No suitable wheel found for CUDA {_cuda_version}, " + \
444
+ "trying without CUDA preference...'
445
+ )
446
+ else:
447
+ break # Found a wheel, exit loop
448
+
449
+ if not wheel and _torch_version:
450
+ print(
451
+ f'[WARN] Tried torch version {_torch_version}, but not found wheel, trying without torch version...'
452
+ )
453
+
454
+ if wheel:
455
+ break
456
+
457
+ print(f'[INFO] Selected wheel:\n {wheel}')
458
+ run_pip_install(wheel, dry_run)
459
+ return
460
+
461
+ elif system == 'darwin':
462
+ print('[INFO] Target: macOS (CPU wheels)')
463
+ for _torch_version in [torch_version, None] if torch_version else [None]:
464
+ links = fetch_wheel_links(MAC_CPU_URL, _torch_version)
465
+ if _torch_version and links:
466
+ # If we have torch version and found matching wheel, use it directly
467
+ wheel = links[0]
468
+ else:
469
+ # Fallback to traditional selection
470
+ if not links:
471
+ links = fetch_wheel_links(MAC_CPU_URL)
472
+ wheel = choose_best_wheel(links, require_cuda=False)
473
+ if links and not wheel:
474
+ wheel = links[0] # Pick first available as last resort
475
+
476
+ if not wheel:
477
+ print('[ERROR] Could not find a suitable macOS CPU wheel for your Python/platform.')
478
+ sys.exit(1)
479
+
480
+ print(f'[INFO] Selected wheel:\n {wheel}')
481
+ run_pip_install(wheel, dry_run)
482
+ return
483
+
484
+ elif system == 'windows':
485
+ print('[INFO] Target: Windows (CPU wheels)')
486
+ for _torch_version in [torch_version, None] if torch_version else [None]:
487
+ links = fetch_wheel_links(WIN_CPU_URL, torch_version)
488
+ if torch_version and links:
489
+ # If we have torch version and found matching wheel, use it directly
490
+ wheel = links[0]
491
+ else:
492
+ # Fallback to traditional selection
493
+ if not links:
494
+ links = fetch_wheel_links(WIN_CPU_URL)
495
+ wheel = choose_best_wheel(links, require_cuda=False)
496
+ if links and not wheel:
497
+ wheel = links[0] # Pick first available as last resort
498
+
499
+ if not wheel:
500
+ print('[ERROR] Could not find a suitable Windows CPU wheel for your Python/platform.')
501
+ sys.exit(1)
502
+ print(f'[INFO] Selected wheel:\n {wheel}')
503
+ run_pip_install(wheel, dry_run)
504
+ return
505
+
506
+ else:
507
+ print(f'[ERROR] Unsupported OS: {system}')
508
+ sys.exit(3)
509
+
510
+
511
+ def install_k2():
512
+ """CLI entry point with argparse support."""
513
+ parser = argparse.ArgumentParser(description='Auto-install the latest k2 wheel for your environment.')
514
+ parser.add_argument('--dry-run', action='store_true', help='Show what would be installed without making changes.')
515
+ args = parser.parse_args()
516
+ install_k2_main(dry_run=args.dry_run)
517
+
518
+
519
+ if __name__ == '__main__':
520
+ install_k2()
@@ -1,22 +0,0 @@
1
- lattifai/__init__.py,sha256=8vZF9_yObaoDZ6sUaJBuQqUp5mGBRfD4z0SdTY_8BcQ,672
2
- lattifai/base_client.py,sha256=ktFtATjL9pLSJUD-VqeJKA1FHkrsGHX7Uq_x00H7gO8,3322
3
- lattifai/client.py,sha256=szz9aT_GTa5JKA2ZGsd7XHr56aG_H-nf83yqRcPUVxs,4664
4
- lattifai/bin/__init__.py,sha256=7YhmtEM8kbxJtz2-KIskvpLKBZAvkMSceVx8z4fkgQ4,61
5
- lattifai/bin/align.py,sha256=uX8VaATzn8CgdHUtry1ZGhXiz0Jr89ELdfRK6GWC1t8,989
6
- lattifai/bin/cli_base.py,sha256=4xlN4cnJZh54ErhHUHgJpyVsrcB-ftwniFzRQL_7SlU,289
7
- lattifai/bin/subtitle.py,sha256=bUWImAHpvyY59Vskqb5loQiD5ytQOxR8lTQRiQ4LyNA,647
8
- lattifai/io/__init__.py,sha256=vHWRN7MvAch-GUeFqqO-gM57SM-4YOpGUjIxFJdjfPA,671
9
- lattifai/io/reader.py,sha256=ErPnPMUvYQpjZ7Vd86EsHUkOcEfKdoI8iM3yKHRzSOQ,2576
10
- lattifai/io/supervision.py,sha256=5UfSsgBhXoDU3-6drDtoD7y8HIiA4xRKZnbOKgeejwM,354
11
- lattifai/io/writer.py,sha256=1eAEFLlL8kricxRDPFBtVmeC4IiFyFnjbWXvw0VU-q4,2036
12
- lattifai/tokenizers/__init__.py,sha256=aqv44PDtq6g3oFFKW_l4HSR5ywT5W8eP1dHHywIvBfs,72
13
- lattifai/tokenizers/phonemizer.py,sha256=Q5Z-4rbT3AjAPLNPnyvWGcEaJuKXRudgeIK6tUhVsJs,1741
14
- lattifai/tokenizers/tokenizer.py,sha256=Qqg12zihl192Tlax6plVyxthrnzBciGLSRuzFQRgOdc,5663
15
- lattifai/workers/__init__.py,sha256=s6YfkIq4FDIAzY9sPjRpXnJfszj2repqnMTqydRM5Zw,83
16
- lattifai/workers/lattice1_alpha.py,sha256=kR5wNLMn1qN14PvRA6RlWjQUGblYrJ636ILC-XkvS0s,4770
17
- lattifai-0.1.4.dist-info/licenses/LICENSE,sha256=LNuoH5jpXXNKgjQ3XLwztFq8D3O7kZI-LSg81o4ym2M,1065
18
- lattifai-0.1.4.dist-info/METADATA,sha256=HEsoKRRfjRoeNwUff7Cx44sYP9trid_bdtX3j5WqK6o,13628
19
- lattifai-0.1.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
20
- lattifai-0.1.4.dist-info/entry_points.txt,sha256=BaixG8HYzE3Ff0QDAmsbX8cjsxZSLwUv5u4cnIpunr0,88
21
- lattifai-0.1.4.dist-info/top_level.txt,sha256=tHSoXF26r-IGfbIP_JoYATqbmf14h5NrnNJGH4j5reI,9
22
- lattifai-0.1.4.dist-info/RECORD,,