pullama-cli 1.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pullama_cli-1.0.0/.github/workflows/publish.yml +35 -0
- pullama_cli-1.0.0/.gitignore +211 -0
- pullama_cli-1.0.0/LICENSE +22 -0
- pullama_cli-1.0.0/PKG-INFO +210 -0
- pullama_cli-1.0.0/README.md +185 -0
- pullama_cli-1.0.0/pullama/__init__.py +0 -0
- pullama_cli-1.0.0/pullama/__main__.py +818 -0
- pullama_cli-1.0.0/pyproject.toml +52 -0
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
name: Publish to PyPI
|
|
2
|
+
|
|
3
|
+
on:
|
|
4
|
+
release:
|
|
5
|
+
types: [published]
|
|
6
|
+
|
|
7
|
+
permissions:
|
|
8
|
+
id-token: write # Required for trusted publishing
|
|
9
|
+
|
|
10
|
+
jobs:
|
|
11
|
+
build-and-publish:
|
|
12
|
+
name: Build and publish to PyPI
|
|
13
|
+
runs-on: ubuntu-latest
|
|
14
|
+
|
|
15
|
+
environment:
|
|
16
|
+
name: pypi
|
|
17
|
+
url: https://pypi.org/project/pullama/
|
|
18
|
+
|
|
19
|
+
steps:
|
|
20
|
+
- name: Checkout
|
|
21
|
+
uses: actions/checkout@v4
|
|
22
|
+
|
|
23
|
+
- name: Set up Python
|
|
24
|
+
uses: actions/setup-python@v5
|
|
25
|
+
with:
|
|
26
|
+
python-version: "3.x"
|
|
27
|
+
|
|
28
|
+
- name: Install build
|
|
29
|
+
run: pip install build
|
|
30
|
+
|
|
31
|
+
- name: Build package
|
|
32
|
+
run: python -m build
|
|
33
|
+
|
|
34
|
+
- name: Publish to PyPI
|
|
35
|
+
uses: pypa/gh-action-pypi-publish@release/v1
|
|
@@ -0,0 +1,211 @@
|
|
|
1
|
+
# Byte-compiled / optimized / DLL files
|
|
2
|
+
__pycache__/
|
|
3
|
+
*.py[codz]
|
|
4
|
+
*$py.class
|
|
5
|
+
|
|
6
|
+
# C extensions
|
|
7
|
+
*.so
|
|
8
|
+
|
|
9
|
+
# Distribution / packaging
|
|
10
|
+
.Python
|
|
11
|
+
build/
|
|
12
|
+
develop-eggs/
|
|
13
|
+
dist/
|
|
14
|
+
downloads/
|
|
15
|
+
eggs/
|
|
16
|
+
.eggs/
|
|
17
|
+
lib/
|
|
18
|
+
lib64/
|
|
19
|
+
parts/
|
|
20
|
+
sdist/
|
|
21
|
+
var/
|
|
22
|
+
wheels/
|
|
23
|
+
share/python-wheels/
|
|
24
|
+
*.egg-info/
|
|
25
|
+
.installed.cfg
|
|
26
|
+
*.egg
|
|
27
|
+
MANIFEST
|
|
28
|
+
|
|
29
|
+
# PyInstaller
|
|
30
|
+
# Usually these files are written by a python script from a template
|
|
31
|
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
|
32
|
+
*.manifest
|
|
33
|
+
*.spec
|
|
34
|
+
|
|
35
|
+
# Installer logs
|
|
36
|
+
pip-log.txt
|
|
37
|
+
pip-delete-this-directory.txt
|
|
38
|
+
|
|
39
|
+
# Unit test / coverage reports
|
|
40
|
+
htmlcov/
|
|
41
|
+
.tox/
|
|
42
|
+
.nox/
|
|
43
|
+
.coverage
|
|
44
|
+
.coverage.*
|
|
45
|
+
.cache
|
|
46
|
+
nosetests.xml
|
|
47
|
+
coverage.xml
|
|
48
|
+
*.cover
|
|
49
|
+
*.py.cover
|
|
50
|
+
.hypothesis/
|
|
51
|
+
.pytest_cache/
|
|
52
|
+
cover/
|
|
53
|
+
|
|
54
|
+
# Translations
|
|
55
|
+
*.mo
|
|
56
|
+
*.pot
|
|
57
|
+
|
|
58
|
+
# Django stuff:
|
|
59
|
+
*.log
|
|
60
|
+
local_settings.py
|
|
61
|
+
db.sqlite3
|
|
62
|
+
db.sqlite3-journal
|
|
63
|
+
|
|
64
|
+
# Flask stuff:
|
|
65
|
+
instance/
|
|
66
|
+
.webassets-cache
|
|
67
|
+
|
|
68
|
+
# Scrapy stuff:
|
|
69
|
+
.scrapy
|
|
70
|
+
|
|
71
|
+
# Sphinx documentation
|
|
72
|
+
docs/_build/
|
|
73
|
+
|
|
74
|
+
# PyBuilder
|
|
75
|
+
.pybuilder/
|
|
76
|
+
target/
|
|
77
|
+
|
|
78
|
+
# Jupyter Notebook
|
|
79
|
+
.ipynb_checkpoints
|
|
80
|
+
|
|
81
|
+
# IPython
|
|
82
|
+
profile_default/
|
|
83
|
+
ipython_config.py
|
|
84
|
+
|
|
85
|
+
# pyenv
|
|
86
|
+
# For a library or package, you might want to ignore these files since the code is
|
|
87
|
+
# intended to run in multiple environments; otherwise, check them in:
|
|
88
|
+
# .python-version
|
|
89
|
+
|
|
90
|
+
# pipenv
|
|
91
|
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
|
92
|
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
|
93
|
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
|
94
|
+
# install all needed dependencies.
|
|
95
|
+
#Pipfile.lock
|
|
96
|
+
|
|
97
|
+
# UV
|
|
98
|
+
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
|
99
|
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
|
100
|
+
# commonly ignored for libraries.
|
|
101
|
+
#uv.lock
|
|
102
|
+
|
|
103
|
+
# poetry
|
|
104
|
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
|
105
|
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
|
106
|
+
# commonly ignored for libraries.
|
|
107
|
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
|
108
|
+
#poetry.lock
|
|
109
|
+
#poetry.toml
|
|
110
|
+
|
|
111
|
+
# pdm
|
|
112
|
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
|
113
|
+
# pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
|
|
114
|
+
# https://pdm-project.org/en/latest/usage/project/#working-with-version-control
|
|
115
|
+
#pdm.lock
|
|
116
|
+
#pdm.toml
|
|
117
|
+
.pdm-python
|
|
118
|
+
.pdm-build/
|
|
119
|
+
|
|
120
|
+
# pixi
|
|
121
|
+
# Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
|
|
122
|
+
#pixi.lock
|
|
123
|
+
# Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
|
|
124
|
+
# in the .venv directory. It is recommended not to include this directory in version control.
|
|
125
|
+
.pixi
|
|
126
|
+
|
|
127
|
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
|
128
|
+
__pypackages__/
|
|
129
|
+
|
|
130
|
+
# Celery stuff
|
|
131
|
+
celerybeat-schedule
|
|
132
|
+
celerybeat.pid
|
|
133
|
+
|
|
134
|
+
# SageMath parsed files
|
|
135
|
+
*.sage.py
|
|
136
|
+
|
|
137
|
+
# Environments
|
|
138
|
+
.env
|
|
139
|
+
.envrc
|
|
140
|
+
.venv
|
|
141
|
+
env/
|
|
142
|
+
venv/
|
|
143
|
+
ENV/
|
|
144
|
+
env.bak/
|
|
145
|
+
venv.bak/
|
|
146
|
+
|
|
147
|
+
# Spyder project settings
|
|
148
|
+
.spyderproject
|
|
149
|
+
.spyproject
|
|
150
|
+
|
|
151
|
+
# Rope project settings
|
|
152
|
+
.ropeproject
|
|
153
|
+
|
|
154
|
+
# mkdocs documentation
|
|
155
|
+
/site
|
|
156
|
+
|
|
157
|
+
# mypy
|
|
158
|
+
.mypy_cache/
|
|
159
|
+
.dmypy.json
|
|
160
|
+
dmypy.json
|
|
161
|
+
|
|
162
|
+
# Pyre type checker
|
|
163
|
+
.pyre/
|
|
164
|
+
|
|
165
|
+
# pytype static type analyzer
|
|
166
|
+
.pytype/
|
|
167
|
+
|
|
168
|
+
# Cython debug symbols
|
|
169
|
+
cython_debug/
|
|
170
|
+
|
|
171
|
+
# PyCharm
|
|
172
|
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
|
173
|
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
|
174
|
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
|
175
|
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
|
176
|
+
#.idea/
|
|
177
|
+
|
|
178
|
+
# Abstra
|
|
179
|
+
# Abstra is an AI-powered process automation framework.
|
|
180
|
+
# Ignore directories containing user credentials, local state, and settings.
|
|
181
|
+
# Learn more at https://abstra.io/docs
|
|
182
|
+
.abstra/
|
|
183
|
+
|
|
184
|
+
# Visual Studio Code
|
|
185
|
+
# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
|
|
186
|
+
# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
|
|
187
|
+
# and can be added to the global gitignore or merged into this file. However, if you prefer,
|
|
188
|
+
# you could uncomment the following to ignore the entire vscode folder
|
|
189
|
+
# .vscode/
|
|
190
|
+
|
|
191
|
+
# Ruff stuff:
|
|
192
|
+
.ruff_cache/
|
|
193
|
+
|
|
194
|
+
# PyPI configuration file
|
|
195
|
+
.pypirc
|
|
196
|
+
|
|
197
|
+
# Cursor
|
|
198
|
+
# Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
|
|
199
|
+
# exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
|
|
200
|
+
# refer to https://docs.cursor.com/context/ignore-files
|
|
201
|
+
.cursorignore
|
|
202
|
+
.cursorindexingignore
|
|
203
|
+
|
|
204
|
+
# Marimo
|
|
205
|
+
marimo/_static/
|
|
206
|
+
marimo/_lsp/
|
|
207
|
+
__marimo__/
|
|
208
|
+
|
|
209
|
+
#ai
|
|
210
|
+
.claude/
|
|
211
|
+
CLAUDE.md
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 fr0stb1rd (original oget)
|
|
4
|
+
Copyright (c) 2026 Steve-sy (pullama)
|
|
5
|
+
|
|
6
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
7
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
8
|
+
in the Software without restriction, including without limitation the rights
|
|
9
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
10
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
11
|
+
furnished to do so, subject to the following conditions:
|
|
12
|
+
|
|
13
|
+
The above copyright notice and this permission notice shall be included in all
|
|
14
|
+
copies or substantial portions of the Software.
|
|
15
|
+
|
|
16
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
17
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
18
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
19
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
20
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
21
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
22
|
+
SOFTWARE.
|
|
@@ -0,0 +1,210 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: pullama-cli
|
|
3
|
+
Version: 1.0.0
|
|
4
|
+
Summary: Fix ollama pull TLS timeout and disconnects — resumable Ollama model downloader for slow connections
|
|
5
|
+
Project-URL: Homepage, https://github.com/Steve-sy/pullama
|
|
6
|
+
Project-URL: Issues, https://github.com/Steve-sy/pullama/issues
|
|
7
|
+
License: MIT
|
|
8
|
+
License-File: LICENSE
|
|
9
|
+
Keywords: ai,download,llm,model,offline,ollama,ollama-download-manager,ollama-offline,ollama-pull,ollama-pull-alternative,resume,slow-internet,tls-handshake-timeout,tls-timeout
|
|
10
|
+
Classifier: Development Status :: 4 - Beta
|
|
11
|
+
Classifier: Environment :: Console
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: Intended Audience :: Science/Research
|
|
14
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
15
|
+
Classifier: Operating System :: OS Independent
|
|
16
|
+
Classifier: Programming Language :: Python :: 3
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
21
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
22
|
+
Classifier: Topic :: Utilities
|
|
23
|
+
Requires-Python: >=3.8
|
|
24
|
+
Description-Content-Type: text/markdown
|
|
25
|
+
|
|
26
|
+
# Pullama 🦙 Ollama Model Downloader & Installer
|
|
27
|
+
|
|
28
|
+
**The ollama pull alternative built for slow, unstable, and limited internet connections.**
|
|
29
|
+
|
|
30
|
+
If `ollama pull` keeps restarting, times out, or disconnects mid-download — Pullama fixes that.
|
|
31
|
+
It resumes interrupted downloads automatically, supports parallel connections via aria2, and installs models directly into Ollama when done, Works on slow connections, unstable Wi-Fi, mobile data, and VPNs.
|
|
32
|
+
|
|
33
|
+
```
|
|
34
|
+
# Common Ollama pull errors Pullama solves:
|
|
35
|
+
net/http: TLS handshake timeout
|
|
36
|
+
context deadline exceeded
|
|
37
|
+
download interrupted, starting from scratch
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+

|
|
41
|
+
|
|
42
|
+
---
|
|
43
|
+
|
|
44
|
+
## Install
|
|
45
|
+
|
|
46
|
+
```bash
|
|
47
|
+
pip install pullama-cli
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
**For faster, more reliable downloads — install aria2 (optional but recommended):**
|
|
51
|
+
|
|
52
|
+
```bash
|
|
53
|
+
# Linux (Debian/Ubuntu)
|
|
54
|
+
sudo apt install aria2
|
|
55
|
+
|
|
56
|
+
# macOS
|
|
57
|
+
brew install aria2
|
|
58
|
+
|
|
59
|
+
# Windows
|
|
60
|
+
winget install aria2
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
With aria2, Pullama downloads Ollama models using multiple parallel connections — significantly faster and more resilient on slow or throttled connections.
|
|
64
|
+
|
|
65
|
+
---
|
|
66
|
+
|
|
67
|
+
## Quick Start
|
|
68
|
+
|
|
69
|
+
### Download & install ollama models in one command
|
|
70
|
+
```bash
|
|
71
|
+
pullama pull tinyllama:latest
|
|
72
|
+
pullama pull gemma2:2b
|
|
73
|
+
pullama pull deepseek-r1:7b
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
Pullama downloads the model and installs it into Ollama automatically. Then:
|
|
77
|
+
|
|
78
|
+
```bash
|
|
79
|
+
ollama run tinyllama:latest
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
---
|
|
83
|
+
|
|
84
|
+
## Resume interrupted Ollama model downloads
|
|
85
|
+
|
|
86
|
+
If your connection drops, just run the same command again — Pullama resumes from where it stopped:
|
|
87
|
+
|
|
88
|
+
```bash
|
|
89
|
+
pullama pull gemma2:2b
|
|
90
|
+
# ... connection drops at 60% ...
|
|
91
|
+
|
|
92
|
+
pullama pull gemma2:2b
|
|
93
|
+
# ℹ Resuming from 1.1 GB / 1.7 GB
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
No flags, no setup. Works after power cuts, network switches, sleep, or days later.
|
|
97
|
+
This is the core feature `ollama pull` is missing — once it disconnects, you lose everything.
|
|
98
|
+
|
|
99
|
+
---
|
|
100
|
+
|
|
101
|
+
## Pullama vs ollama pull
|
|
102
|
+
|
|
103
|
+
| Feature | `ollama pull` | `pullama` |
|
|
104
|
+
|---|---|---|
|
|
105
|
+
| Resume interrupted download | ❌ | ✅ |
|
|
106
|
+
| Parallel chunk downloads (aria2) | ❌ | ✅ |
|
|
107
|
+
| Offline / manual install | ❌ | ✅ |
|
|
108
|
+
| Download without Ollama installed | ❌ | ✅ |
|
|
109
|
+
| Export ollama model to another machine | ❌ | ✅ |
|
|
110
|
+
| Track download progress across sessions | ❌ | ✅ |
|
|
111
|
+
| Works on slow / unstable connections | ⚠️ unreliable | ✅ |
|
|
112
|
+
| SHA256 verification | ❌ | ✅ |
|
|
113
|
+
|
|
114
|
+
---
|
|
115
|
+
|
|
116
|
+
## Commands
|
|
117
|
+
|
|
118
|
+
### Track your downloads
|
|
119
|
+
|
|
120
|
+
```bash
|
|
121
|
+
pullama list
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
```
|
|
125
|
+
Model Size Downloaded Installed
|
|
126
|
+
────────────────────────────────────────────────────────────
|
|
127
|
+
tinyllama:latest 608 MB 608/608 MB ✔ ✔ yes
|
|
128
|
+
gemma2:2b 1.7 GB 856 MB/1.7 GB ✗ no
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
### Get direct download URLs
|
|
132
|
+
|
|
133
|
+
For users who prefer to download Ollama models manually with wget, curl, IDM, or any other download manager:
|
|
134
|
+
|
|
135
|
+
```bash
|
|
136
|
+
pullama get gemma2:2b
|
|
137
|
+
```
|
|
138
|
+
|
|
139
|
+
Prints direct blob URLs and ready-to-use curl commands — useful for downloading ollama models on a separate machine or through a proxy.
|
|
140
|
+
|
|
141
|
+
### Manual Ollama model installation
|
|
142
|
+
|
|
143
|
+
Already downloaded the files? Install them into Ollama without re-downloading:
|
|
144
|
+
|
|
145
|
+
```bash
|
|
146
|
+
pullama install --model gemma2:2b --blobsPath ./downloads
|
|
147
|
+
```
|
|
148
|
+
|
|
149
|
+
---
|
|
150
|
+
|
|
151
|
+
## Download Ollama models without ollama (offline install)
|
|
152
|
+
|
|
153
|
+
Pullama works even if Ollama isn't installed yet. It saves the model files locally so you can install them later — or copy them to another machine or a friend with no internet:
|
|
154
|
+
|
|
155
|
+
```bash
|
|
156
|
+
pullama pull gemma2:2b
|
|
157
|
+
# ⚠ Ollama not found — downloading to: ~/pullama-models/gemma2-2b/
|
|
158
|
+
# ✔ gemma2:2b downloaded!
|
|
159
|
+
# Saved to: ~/pullama-models/gemma2-2b/
|
|
160
|
+
#
|
|
161
|
+
# Once Ollama is installed, run:
|
|
162
|
+
# pullama install --model gemma2:2b --blobsPath ~/pullama-models/gemma2-2b/
|
|
163
|
+
```
|
|
164
|
+
|
|
165
|
+
Copy the folder to a USB drive, give it to a friend, install on an air-gapped machine — it just works.
|
|
166
|
+
|
|
167
|
+
---
|
|
168
|
+
|
|
169
|
+
## How it works
|
|
170
|
+
|
|
171
|
+
Ollama stores models as SHA256-named blob files. Pullama downloads each blob directly into Ollama's models directory (`~/.ollama/models` or `/usr/share/ollama/.ollama/models` for system installs) and writes the manifest **last** — so Ollama only sees the model once everything is verified complete.
|
|
172
|
+
|
|
173
|
+
If a download is interrupted, the partial blob stays on disk. On the next run, Pullama checks the existing file size and sends an HTTP `Range: bytes=X-` request to continue exactly where it stopped — no re-downloading from zero.
|
|
174
|
+
|
|
175
|
+
**With aria2:** splits each file into 4 parallel chunks. Bypasses per-connection throttling and dramatically improves speed on slow connections.
|
|
176
|
+
|
|
177
|
+
**Without aria2:** uses Python's built-in HTTP client with the same resume logic.
|
|
178
|
+
|
|
179
|
+
---
|
|
180
|
+
|
|
181
|
+
## Model name format
|
|
182
|
+
|
|
183
|
+
```
|
|
184
|
+
tinyllama:latest # official model, explicit tag
|
|
185
|
+
gemma2:2b # official model
|
|
186
|
+
deepseek-r1:7b # official model
|
|
187
|
+
huihui_ai/deepseek-r1:8b # community model (namespace/model:tag)
|
|
188
|
+
```
|
|
189
|
+
|
|
190
|
+
---
|
|
191
|
+
|
|
192
|
+
## Platform support
|
|
193
|
+
|
|
194
|
+
| Platform | Supported |
|
|
195
|
+
|---|---|
|
|
196
|
+
| Linux | ✔ |
|
|
197
|
+
| macOS | ✔ |
|
|
198
|
+
| Windows | ✔ |
|
|
199
|
+
|
|
200
|
+
---
|
|
201
|
+
|
|
202
|
+
## License
|
|
203
|
+
|
|
204
|
+
MIT
|
|
205
|
+
|
|
206
|
+
---
|
|
207
|
+
|
|
208
|
+
## Credits
|
|
209
|
+
|
|
210
|
+
Pullama started as a fork of [oget](https://github.com/fr0stb1rd/oget) by [fr0stb1rd](https://github.com/fr0stb1rd). The original idea of fetching direct download URLs from the Ollama registry belongs to them. Pullama extends it with resumable downloads, automatic Ollama install, aria2 support, state tracking, smart path detection, and a fully rewritten CLI built for slow and unstable connections.
|
|
@@ -0,0 +1,185 @@
|
|
|
1
|
+
# Pullama 🦙 Ollama Model Downloader & Installer
|
|
2
|
+
|
|
3
|
+
**The ollama pull alternative built for slow, unstable, and limited internet connections.**
|
|
4
|
+
|
|
5
|
+
If `ollama pull` keeps restarting, times out, or disconnects mid-download — Pullama fixes that.
|
|
6
|
+
It resumes interrupted downloads automatically, supports parallel connections via aria2, and installs models directly into Ollama when done, Works on slow connections, unstable Wi-Fi, mobile data, and VPNs.
|
|
7
|
+
|
|
8
|
+
```
|
|
9
|
+
# Common Ollama pull errors Pullama solves:
|
|
10
|
+
net/http: TLS handshake timeout
|
|
11
|
+
context deadline exceeded
|
|
12
|
+
download interrupted, starting from scratch
|
|
13
|
+
```
|
|
14
|
+
|
|
15
|
+

|
|
16
|
+
|
|
17
|
+
---
|
|
18
|
+
|
|
19
|
+
## Install
|
|
20
|
+
|
|
21
|
+
```bash
|
|
22
|
+
pip install pullama-cli
|
|
23
|
+
```
|
|
24
|
+
|
|
25
|
+
**For faster, more reliable downloads — install aria2 (optional but recommended):**
|
|
26
|
+
|
|
27
|
+
```bash
|
|
28
|
+
# Linux (Debian/Ubuntu)
|
|
29
|
+
sudo apt install aria2
|
|
30
|
+
|
|
31
|
+
# macOS
|
|
32
|
+
brew install aria2
|
|
33
|
+
|
|
34
|
+
# Windows
|
|
35
|
+
winget install aria2
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
With aria2, Pullama downloads Ollama models using multiple parallel connections — significantly faster and more resilient on slow or throttled connections.
|
|
39
|
+
|
|
40
|
+
---
|
|
41
|
+
|
|
42
|
+
## Quick Start
|
|
43
|
+
|
|
44
|
+
### Download & install ollama models in one command
|
|
45
|
+
```bash
|
|
46
|
+
pullama pull tinyllama:latest
|
|
47
|
+
pullama pull gemma2:2b
|
|
48
|
+
pullama pull deepseek-r1:7b
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
Pullama downloads the model and installs it into Ollama automatically. Then:
|
|
52
|
+
|
|
53
|
+
```bash
|
|
54
|
+
ollama run tinyllama:latest
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
---
|
|
58
|
+
|
|
59
|
+
## Resume interrupted Ollama model downloads
|
|
60
|
+
|
|
61
|
+
If your connection drops, just run the same command again — Pullama resumes from where it stopped:
|
|
62
|
+
|
|
63
|
+
```bash
|
|
64
|
+
pullama pull gemma2:2b
|
|
65
|
+
# ... connection drops at 60% ...
|
|
66
|
+
|
|
67
|
+
pullama pull gemma2:2b
|
|
68
|
+
# ℹ Resuming from 1.1 GB / 1.7 GB
|
|
69
|
+
```
|
|
70
|
+
|
|
71
|
+
No flags, no setup. Works after power cuts, network switches, sleep, or days later.
|
|
72
|
+
This is the core feature `ollama pull` is missing — once it disconnects, you lose everything.
|
|
73
|
+
|
|
74
|
+
---
|
|
75
|
+
|
|
76
|
+
## Pullama vs ollama pull
|
|
77
|
+
|
|
78
|
+
| Feature | `ollama pull` | `pullama` |
|
|
79
|
+
|---|---|---|
|
|
80
|
+
| Resume interrupted download | ❌ | ✅ |
|
|
81
|
+
| Parallel chunk downloads (aria2) | ❌ | ✅ |
|
|
82
|
+
| Offline / manual install | ❌ | ✅ |
|
|
83
|
+
| Download without Ollama installed | ❌ | ✅ |
|
|
84
|
+
| Export ollama model to another machine | ❌ | ✅ |
|
|
85
|
+
| Track download progress across sessions | ❌ | ✅ |
|
|
86
|
+
| Works on slow / unstable connections | ⚠️ unreliable | ✅ |
|
|
87
|
+
| SHA256 verification | ❌ | ✅ |
|
|
88
|
+
|
|
89
|
+
---
|
|
90
|
+
|
|
91
|
+
## Commands
|
|
92
|
+
|
|
93
|
+
### Track your downloads
|
|
94
|
+
|
|
95
|
+
```bash
|
|
96
|
+
pullama list
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
```
|
|
100
|
+
Model Size Downloaded Installed
|
|
101
|
+
────────────────────────────────────────────────────────────
|
|
102
|
+
tinyllama:latest 608 MB 608/608 MB ✔ ✔ yes
|
|
103
|
+
gemma2:2b 1.7 GB 856 MB/1.7 GB ✗ no
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
### Get direct download URLs
|
|
107
|
+
|
|
108
|
+
For users who prefer to download Ollama models manually with wget, curl, IDM, or any other download manager:
|
|
109
|
+
|
|
110
|
+
```bash
|
|
111
|
+
pullama get gemma2:2b
|
|
112
|
+
```
|
|
113
|
+
|
|
114
|
+
Prints direct blob URLs and ready-to-use curl commands — useful for downloading ollama models on a separate machine or through a proxy.
|
|
115
|
+
|
|
116
|
+
### Manual Ollama model installation
|
|
117
|
+
|
|
118
|
+
Already downloaded the files? Install them into Ollama without re-downloading:
|
|
119
|
+
|
|
120
|
+
```bash
|
|
121
|
+
pullama install --model gemma2:2b --blobsPath ./downloads
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
---
|
|
125
|
+
|
|
126
|
+
## Download Ollama models without ollama (offline install)
|
|
127
|
+
|
|
128
|
+
Pullama works even if Ollama isn't installed yet. It saves the model files locally so you can install them later — or copy them to another machine or a friend with no internet:
|
|
129
|
+
|
|
130
|
+
```bash
|
|
131
|
+
pullama pull gemma2:2b
|
|
132
|
+
# ⚠ Ollama not found — downloading to: ~/pullama-models/gemma2-2b/
|
|
133
|
+
# ✔ gemma2:2b downloaded!
|
|
134
|
+
# Saved to: ~/pullama-models/gemma2-2b/
|
|
135
|
+
#
|
|
136
|
+
# Once Ollama is installed, run:
|
|
137
|
+
# pullama install --model gemma2:2b --blobsPath ~/pullama-models/gemma2-2b/
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
Copy the folder to a USB drive, give it to a friend, install on an air-gapped machine — it just works.
|
|
141
|
+
|
|
142
|
+
---
|
|
143
|
+
|
|
144
|
+
## How it works
|
|
145
|
+
|
|
146
|
+
Ollama stores models as SHA256-named blob files. Pullama downloads each blob directly into Ollama's models directory (`~/.ollama/models` or `/usr/share/ollama/.ollama/models` for system installs) and writes the manifest **last** — so Ollama only sees the model once everything is verified complete.
|
|
147
|
+
|
|
148
|
+
If a download is interrupted, the partial blob stays on disk. On the next run, Pullama checks the existing file size and sends an HTTP `Range: bytes=X-` request to continue exactly where it stopped — no re-downloading from zero.
|
|
149
|
+
|
|
150
|
+
**With aria2:** splits each file into 4 parallel chunks. Bypasses per-connection throttling and dramatically improves speed on slow connections.
|
|
151
|
+
|
|
152
|
+
**Without aria2:** uses Python's built-in HTTP client with the same resume logic.
|
|
153
|
+
|
|
154
|
+
---
|
|
155
|
+
|
|
156
|
+
## Model name format
|
|
157
|
+
|
|
158
|
+
```
|
|
159
|
+
tinyllama:latest # official model, explicit tag
|
|
160
|
+
gemma2:2b # official model
|
|
161
|
+
deepseek-r1:7b # official model
|
|
162
|
+
huihui_ai/deepseek-r1:8b # community model (namespace/model:tag)
|
|
163
|
+
```
|
|
164
|
+
|
|
165
|
+
---
|
|
166
|
+
|
|
167
|
+
## Platform support
|
|
168
|
+
|
|
169
|
+
| Platform | Supported |
|
|
170
|
+
|---|---|
|
|
171
|
+
| Linux | ✔ |
|
|
172
|
+
| macOS | ✔ |
|
|
173
|
+
| Windows | ✔ |
|
|
174
|
+
|
|
175
|
+
---
|
|
176
|
+
|
|
177
|
+
## License
|
|
178
|
+
|
|
179
|
+
MIT
|
|
180
|
+
|
|
181
|
+
---
|
|
182
|
+
|
|
183
|
+
## Credits
|
|
184
|
+
|
|
185
|
+
Pullama started as a fork of [oget](https://github.com/fr0stb1rd/oget) by [fr0stb1rd](https://github.com/fr0stb1rd). The original idea of fetching direct download URLs from the Ollama registry belongs to them. Pullama extends it with resumable downloads, automatic Ollama install, aria2 support, state tracking, smart path detection, and a fully rewritten CLI built for slow and unstable connections.
|
|
File without changes
|