deploy-llm 0.0.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,12 @@
1
+ Metadata-Version: 2.1
2
+ Name: deploy-llm
3
+ Version: 0.0.2
4
+ Summary: A CLI tool to deploy and manage LLMs using Ollama.
5
+ Home-page:
6
+ Author: Ankit Gupta
7
+ Author-email: devankitgupta01@gmail.com
8
+ Classifier: Programming Language :: Python :: 3
9
+ Classifier: License :: OSI Approved :: MIT License
10
+ Classifier: Operating System :: OS Independent
11
+ Requires-Python: >=3.7
12
+ Requires-Dist: click
File without changes
@@ -0,0 +1,12 @@
1
+ Metadata-Version: 2.1
2
+ Name: deploy-llm
3
+ Version: 0.0.2
4
+ Summary: A CLI tool to deploy and manage LLMs using Ollama.
5
+ Home-page:
6
+ Author: Ankit Gupta
7
+ Author-email: devankitgupta01@gmail.com
8
+ Classifier: Programming Language :: Python :: 3
9
+ Classifier: License :: OSI Approved :: MIT License
10
+ Classifier: Operating System :: OS Independent
11
+ Requires-Python: >=3.7
12
+ Requires-Dist: click
@@ -0,0 +1,12 @@
1
+ README.md
2
+ setup.py
3
+ deploy_llm.egg-info/PKG-INFO
4
+ deploy_llm.egg-info/SOURCES.txt
5
+ deploy_llm.egg-info/dependency_links.txt
6
+ deploy_llm.egg-info/entry_points.txt
7
+ deploy_llm.egg-info/requires.txt
8
+ deploy_llm.egg-info/top_level.txt
9
+ llmdeploy/__init__.py
10
+ llmdeploy/cli.py
11
+ llmdeploy/model_manager.py
12
+ llmdeploy/ollama_manager.py
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ llmdeploy = llmdeploy.cli:cli
@@ -0,0 +1 @@
1
+ click
@@ -0,0 +1 @@
1
+ llmdeploy
File without changes
@@ -0,0 +1,45 @@
1
+ import click
2
+ from llmdeploy.model_manager import deploy_model, list_models, remove_model, run_inference
3
+
4
+ @click.group()
5
+ def cli():
6
+ """LLMDeploy CLI - Manage and deploy LLM models."""
7
+ pass
8
+
9
+ @click.command()
10
+ @click.argument("model_name")
11
+ @click.option("--model_type", required=True, type=click.Choice(["text", "multimodal"]), help="Model type (text/multimodal).")
12
+ def deploy(model_name, model_type):
13
+ """Deploy a model from Ollama."""
14
+ result = deploy_model(model_name, model_type)
15
+ click.echo(result)
16
+
17
+ @click.command()
18
+ def list():
19
+ """List deployed models."""
20
+ models = list_models()
21
+ click.echo(models)
22
+
23
+ @click.command()
24
+ @click.argument("model_name")
25
+ def remove(model_name):
26
+ """Remove a deployed model."""
27
+ result = remove_model(model_name)
28
+ click.echo(result)
29
+
30
+ @click.command()
31
+ @click.argument("model_name")
32
+ @click.option("--input", help="Text input for inference.")
33
+ @click.option("--image-path", help="Path to an image (for multimodal models).")
34
+ def infer(model_name, input, image_path):
35
+ """Run inference on a deployed model."""
36
+ result = run_inference(model_name, input_text=input, image_path=image_path)
37
+ click.echo(result)
38
+
39
+ cli.add_command(deploy)
40
+ cli.add_command(list)
41
+ cli.add_command(remove)
42
+ cli.add_command(infer)
43
+
44
+ if __name__ == "__main__":
45
+ cli()
@@ -0,0 +1,68 @@
1
+ import json
2
+ import os
3
+ from llmdeploy.ollama_manager import deploy_ollama_model, remove_ollama_model, infer_ollama
4
+
5
+ MODELS_FILE = "models.json"
6
+
7
+ def load_models():
8
+ """Load models from JSON storage."""
9
+ if os.path.exists(MODELS_FILE):
10
+ with open(MODELS_FILE, "r") as f:
11
+ return json.load(f)
12
+ return {}
13
+
14
+ def save_models(models):
15
+ """Save models to JSON storage."""
16
+ with open(MODELS_FILE, "w") as f:
17
+ json.dump(models, f, indent=4)
18
+
19
+ def deploy_model(model_name, model_type):
20
+ """Deploy a model using Ollama."""
21
+ models = load_models()
22
+
23
+ if model_name in models:
24
+ return f"⚠️ Model '{model_name}' is already deployed."
25
+
26
+ model_info = {
27
+ "name": model_name,
28
+ "source": "ollama",
29
+ "type": model_type
30
+ }
31
+
32
+ deploy_status = deploy_ollama_model(model_name)
33
+ if "❌" in deploy_status:
34
+ return deploy_status # Return error message if deployment fails
35
+
36
+ models[model_name] = model_info
37
+ save_models(models)
38
+
39
+ return f"✅ Model '{model_name}' deployed successfully."
40
+
41
+ def list_models():
42
+ """List all deployed models."""
43
+ models = load_models()
44
+ return json.dumps(models, indent=4) if models else "📜 No models deployed."
45
+
46
+ def remove_model(model_name):
47
+ """Remove a deployed model."""
48
+ models = load_models()
49
+ if model_name not in models:
50
+ return f"❌ Model '{model_name}' not found."
51
+
52
+ remove_status = remove_ollama_model(model_name)
53
+ if "❌" in remove_status:
54
+ return remove_status # Return error message if removal fails
55
+
56
+ del models[model_name]
57
+ save_models(models)
58
+
59
+ return f"✅ Model '{model_name}' removed."
60
+
61
+ def run_inference(model_name, input_text=None, image_path=None):
62
+ """Run inference on a model."""
63
+ models = load_models()
64
+ if model_name not in models:
65
+ return f"❌ Model '{model_name}' not found."
66
+
67
+ model_type = models[model_name]["type"]
68
+ return infer_ollama(model_name, model_type, input_text, image_path)
@@ -0,0 +1,33 @@
1
+ import subprocess
2
+
3
+ def deploy_ollama_model(model_name):
4
+ """Deploy an Ollama model."""
5
+ try:
6
+ subprocess.run(["ollama", "pull", model_name], check=True)
7
+ return f"✅ Model '{model_name}' pulled successfully."
8
+ except subprocess.CalledProcessError as e:
9
+ return f"❌ Error deploying Ollama model: {e}"
10
+
11
+ def remove_ollama_model(model_name):
12
+ """Remove an Ollama model."""
13
+ try:
14
+ subprocess.run(["ollama", "rm", model_name], check=True)
15
+ return f"✅ Model '{model_name}' removed successfully."
16
+ except subprocess.CalledProcessError as e:
17
+ return f"❌ Error removing Ollama model: {e}"
18
+
19
+ def infer_ollama(model_name, model_type, input_text=None, image_path=None):
20
+ """Run inference using Ollama (text or multimodal)."""
21
+ try:
22
+ command = ["ollama", "run", model_name]
23
+
24
+ if model_type == "text" and input_text:
25
+ command.append(input_text)
26
+ elif model_type == "multimodal" and image_path:
27
+ command.extend(["--image", image_path])
28
+
29
+ result = subprocess.run(command, capture_output=True, text=True)
30
+ return result.stdout.strip() if result.stdout else "❌ No output received."
31
+
32
+ except subprocess.CalledProcessError as e:
33
+ return f"❌ Inference error: {e}"
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,25 @@
1
+ from setuptools import setup, find_packages
2
+
3
+ setup(
4
+ name="deploy-llm",
5
+ version="0.0.2",
6
+ packages=find_packages(),
7
+ install_requires=[
8
+ "click"
9
+ ],
10
+ entry_points={
11
+ "console_scripts": [
12
+ "llmdeploy=llmdeploy.cli:cli"
13
+ ]
14
+ },
15
+ author="Ankit Gupta",
16
+ author_email="devankitgupta01@gmail.com",
17
+ description="A CLI tool to deploy and manage LLMs using Ollama.",
18
+ url="",
19
+ classifiers=[
20
+ "Programming Language :: Python :: 3",
21
+ "License :: OSI Approved :: MIT License",
22
+ "Operating System :: OS Independent",
23
+ ],
24
+ python_requires=">=3.7",
25
+ )