loreguard-cli 0.3.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- loreguard_cli-0.3.0/.claude/settings.local.json +10 -0
- loreguard_cli-0.3.0/.env.example +40 -0
- loreguard_cli-0.3.0/.github/workflows/release.yml +96 -0
- loreguard_cli-0.3.0/.gitignore +45 -0
- loreguard_cli-0.3.0/LICENSE +21 -0
- loreguard_cli-0.3.0/PKG-INFO +220 -0
- loreguard_cli-0.3.0/README.md +187 -0
- loreguard_cli-0.3.0/pyproject.toml +56 -0
- loreguard_cli-0.3.0/scripts/build.py +80 -0
- loreguard_cli-0.3.0/src/__init__.py +3 -0
- loreguard_cli-0.3.0/src/cli.py +409 -0
- loreguard_cli-0.3.0/src/config.py +61 -0
- loreguard_cli-0.3.0/src/llama_server.py +377 -0
- loreguard_cli-0.3.0/src/llm.py +485 -0
- loreguard_cli-0.3.0/src/main.py +147 -0
- loreguard_cli-0.3.0/src/models_registry.py +130 -0
- loreguard_cli-0.3.0/src/npc_chat.py +468 -0
- loreguard_cli-0.3.0/src/steam.py +505 -0
- loreguard_cli-0.3.0/src/term_ui.py +719 -0
- loreguard_cli-0.3.0/src/tunnel.py +428 -0
- loreguard_cli-0.3.0/src/wizard.py +649 -0
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
# Lorekeeper Client Configuration
|
|
2
|
+
|
|
3
|
+
# Local LLM endpoint (llama.cpp server)
|
|
4
|
+
# llama.cpp: http://localhost:8080 (default)
|
|
5
|
+
# Ollama: http://localhost:11434
|
|
6
|
+
# LM Studio: http://localhost:1234
|
|
7
|
+
LLM_ENDPOINT=http://localhost:8080
|
|
8
|
+
|
|
9
|
+
# Backend WebSocket URL (workers endpoint)
|
|
10
|
+
LOREKEEPER_BACKEND=wss://api.lorekeeper.ai/workers
|
|
11
|
+
|
|
12
|
+
# Worker authentication (REQUIRED for backend connection)
|
|
13
|
+
# Generate a token using the backend CLI:
|
|
14
|
+
# cd /path/to/lorekeeper
|
|
15
|
+
# go run cmd/token/main.go generate -local -worker-id my-worker -model-id qwen3-1.7b
|
|
16
|
+
#
|
|
17
|
+
# The CLI will output values to paste here:
|
|
18
|
+
WORKER_ID=
|
|
19
|
+
WORKER_TOKEN=
|
|
20
|
+
MODEL_ID=default
|
|
21
|
+
|
|
22
|
+
# Client bridge server settings
|
|
23
|
+
HOST=127.0.0.1
|
|
24
|
+
PORT=8081
|
|
25
|
+
|
|
26
|
+
# Context limits (in characters, ~4 chars per token)
|
|
27
|
+
# MAX_MESSAGE_LENGTH: Max size of a single message (default 100KB ~25K tokens)
|
|
28
|
+
# MAX_TOTAL_CONTEXT: Max total context size (default 500KB ~125K tokens)
|
|
29
|
+
# Set based on your model's context window (e.g., 32K model = ~128KB)
|
|
30
|
+
MAX_MESSAGE_LENGTH=100000
|
|
31
|
+
MAX_TOTAL_CONTEXT=500000
|
|
32
|
+
MAX_TIMEOUT=300.0
|
|
33
|
+
|
|
34
|
+
# Context compaction: if true, truncate old messages instead of erroring on overflow
|
|
35
|
+
# When enabled, older messages are removed to fit within MAX_TOTAL_CONTEXT
|
|
36
|
+
# System prompt and most recent messages are always preserved
|
|
37
|
+
CONTEXT_COMPACTION=true
|
|
38
|
+
|
|
39
|
+
# Development mode (enables hot reload)
|
|
40
|
+
DEV=false
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
name: Build and Release
|
|
2
|
+
|
|
3
|
+
on:
|
|
4
|
+
push:
|
|
5
|
+
tags:
|
|
6
|
+
- 'v*'
|
|
7
|
+
workflow_dispatch:
|
|
8
|
+
|
|
9
|
+
jobs:
|
|
10
|
+
build:
|
|
11
|
+
strategy:
|
|
12
|
+
matrix:
|
|
13
|
+
include:
|
|
14
|
+
- os: ubuntu-latest
|
|
15
|
+
artifact: loreguard-linux
|
|
16
|
+
- os: macos-latest
|
|
17
|
+
artifact: loreguard-macos
|
|
18
|
+
- os: windows-latest
|
|
19
|
+
artifact: loreguard-windows.exe
|
|
20
|
+
|
|
21
|
+
runs-on: ${{ matrix.os }}
|
|
22
|
+
|
|
23
|
+
steps:
|
|
24
|
+
- uses: actions/checkout@v4
|
|
25
|
+
|
|
26
|
+
- name: Set up Python
|
|
27
|
+
uses: actions/setup-python@v5
|
|
28
|
+
with:
|
|
29
|
+
python-version: '3.11'
|
|
30
|
+
|
|
31
|
+
- name: Install dependencies
|
|
32
|
+
run: |
|
|
33
|
+
python -m pip install --upgrade pip
|
|
34
|
+
pip install -e ".[build]"
|
|
35
|
+
|
|
36
|
+
- name: Build with PyInstaller
|
|
37
|
+
run: python scripts/build.py
|
|
38
|
+
|
|
39
|
+
- name: Rename artifact (Unix)
|
|
40
|
+
if: runner.os != 'Windows'
|
|
41
|
+
run: mv dist/loreguard dist/${{ matrix.artifact }}
|
|
42
|
+
|
|
43
|
+
- name: Rename artifact (Windows)
|
|
44
|
+
if: runner.os == 'Windows'
|
|
45
|
+
run: move dist\loreguard.exe dist\${{ matrix.artifact }}
|
|
46
|
+
|
|
47
|
+
- name: Upload artifact
|
|
48
|
+
uses: actions/upload-artifact@v4
|
|
49
|
+
with:
|
|
50
|
+
name: ${{ matrix.artifact }}
|
|
51
|
+
path: dist/${{ matrix.artifact }}
|
|
52
|
+
|
|
53
|
+
release:
|
|
54
|
+
needs: build
|
|
55
|
+
runs-on: ubuntu-latest
|
|
56
|
+
if: startsWith(github.ref, 'refs/tags/')
|
|
57
|
+
|
|
58
|
+
steps:
|
|
59
|
+
- name: Download all artifacts
|
|
60
|
+
uses: actions/download-artifact@v4
|
|
61
|
+
with:
|
|
62
|
+
path: artifacts
|
|
63
|
+
|
|
64
|
+
- name: Create Release
|
|
65
|
+
uses: softprops/action-gh-release@v1
|
|
66
|
+
with:
|
|
67
|
+
files: |
|
|
68
|
+
artifacts/loreguard-linux/loreguard-linux
|
|
69
|
+
artifacts/loreguard-macos/loreguard-macos
|
|
70
|
+
artifacts/loreguard-windows.exe/loreguard-windows.exe
|
|
71
|
+
generate_release_notes: true
|
|
72
|
+
env:
|
|
73
|
+
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
74
|
+
|
|
75
|
+
publish-pypi:
|
|
76
|
+
runs-on: ubuntu-latest
|
|
77
|
+
if: startsWith(github.ref, 'refs/tags/')
|
|
78
|
+
permissions:
|
|
79
|
+
id-token: write # Required for trusted publishing
|
|
80
|
+
|
|
81
|
+
steps:
|
|
82
|
+
- uses: actions/checkout@v4
|
|
83
|
+
|
|
84
|
+
- name: Set up Python
|
|
85
|
+
uses: actions/setup-python@v5
|
|
86
|
+
with:
|
|
87
|
+
python-version: '3.11'
|
|
88
|
+
|
|
89
|
+
- name: Install build tools
|
|
90
|
+
run: pip install build
|
|
91
|
+
|
|
92
|
+
- name: Build package
|
|
93
|
+
run: python -m build
|
|
94
|
+
|
|
95
|
+
- name: Publish to PyPI
|
|
96
|
+
uses: pypa/gh-action-pypi-publish@release/v1
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
# Python
|
|
2
|
+
__pycache__/
|
|
3
|
+
*.py[cod]
|
|
4
|
+
*$py.class
|
|
5
|
+
*.so
|
|
6
|
+
.Python
|
|
7
|
+
build/
|
|
8
|
+
develop-eggs/
|
|
9
|
+
dist/
|
|
10
|
+
downloads/
|
|
11
|
+
eggs/
|
|
12
|
+
.eggs/
|
|
13
|
+
lib/
|
|
14
|
+
lib64/
|
|
15
|
+
parts/
|
|
16
|
+
sdist/
|
|
17
|
+
var/
|
|
18
|
+
wheels/
|
|
19
|
+
*.egg-info/
|
|
20
|
+
.installed.cfg
|
|
21
|
+
*.egg
|
|
22
|
+
|
|
23
|
+
# Virtual environments
|
|
24
|
+
.venv/
|
|
25
|
+
venv/
|
|
26
|
+
ENV/
|
|
27
|
+
|
|
28
|
+
# IDE
|
|
29
|
+
.idea/
|
|
30
|
+
.vscode/
|
|
31
|
+
*.swp
|
|
32
|
+
*.swo
|
|
33
|
+
|
|
34
|
+
# Environment
|
|
35
|
+
.env
|
|
36
|
+
.env.local
|
|
37
|
+
|
|
38
|
+
# Testing
|
|
39
|
+
.pytest_cache/
|
|
40
|
+
.coverage
|
|
41
|
+
htmlcov/
|
|
42
|
+
|
|
43
|
+
# Misc
|
|
44
|
+
*.log
|
|
45
|
+
.DS_Store
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2024 Beyond Logic Labs
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,220 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: loreguard-cli
|
|
3
|
+
Version: 0.3.0
|
|
4
|
+
Summary: Local inference client for Loreguard NPCs
|
|
5
|
+
Project-URL: Homepage, https://loreguard.com
|
|
6
|
+
Project-URL: Documentation, https://github.com/beyond-logic-labs/loreguard-cli#readme
|
|
7
|
+
Project-URL: Repository, https://github.com/beyond-logic-labs/loreguard-cli
|
|
8
|
+
Project-URL: Issues, https://github.com/beyond-logic-labs/loreguard-cli/issues
|
|
9
|
+
License-Expression: MIT
|
|
10
|
+
License-File: LICENSE
|
|
11
|
+
Keywords: gamedev,inference,llm,loreguard,npc
|
|
12
|
+
Classifier: Development Status :: 4 - Beta
|
|
13
|
+
Classifier: Environment :: Console
|
|
14
|
+
Classifier: Intended Audience :: Developers
|
|
15
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
16
|
+
Classifier: Operating System :: OS Independent
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
20
|
+
Classifier: Topic :: Games/Entertainment
|
|
21
|
+
Requires-Python: >=3.10
|
|
22
|
+
Requires-Dist: aiofiles>=24.1.0
|
|
23
|
+
Requires-Dist: httpx>=0.26.0
|
|
24
|
+
Requires-Dist: pydantic>=2.5.0
|
|
25
|
+
Requires-Dist: websockets>=12.0
|
|
26
|
+
Provides-Extra: build
|
|
27
|
+
Requires-Dist: pyinstaller>=6.0.0; extra == 'build'
|
|
28
|
+
Provides-Extra: dev
|
|
29
|
+
Requires-Dist: pytest-asyncio>=0.23.0; extra == 'dev'
|
|
30
|
+
Requires-Dist: pytest>=7.4.0; extra == 'dev'
|
|
31
|
+
Requires-Dist: ruff>=0.1.0; extra == 'dev'
|
|
32
|
+
Description-Content-Type: text/markdown
|
|
33
|
+
|
|
34
|
+
# Loreguard
|
|
35
|
+
|
|
36
|
+
[](https://pypi.org/project/loreguard-cli/)
|
|
37
|
+
[](https://github.com/beyond-logic-labs/loreguard-cli/actions/workflows/release.yml)
|
|
38
|
+
[](https://opensource.org/licenses/MIT)
|
|
39
|
+
[](https://www.python.org/downloads/)
|
|
40
|
+
[](https://github.com/beyond-logic-labs/loreguard-cli/releases)
|
|
41
|
+
|
|
42
|
+
```
|
|
43
|
+
┌────────────────────────────────────────────────────────────────────────────────┐
|
|
44
|
+
│ │
|
|
45
|
+
│ ██╗ ██████╗ ██████╗ ███████╗ ██████╗ ██╗ ██╗ █████╗ ██████╗ ██████╗ │
|
|
46
|
+
│ ██║ ██╔═══██╗██╔══██╗██╔════╝ ██╔════╝ ██║ ██║██╔══██╗██╔══██╗██╔══██╗ │
|
|
47
|
+
│ ██║ ██║ ██║██████╔╝█████╗ ██║ ███╗██║ ██║███████║██████╔╝██║ ██║ │
|
|
48
|
+
│ ██║ ██║ ██║██╔══██╗██╔══╝ ██║ ██║██║ ██║██╔══██║██╔══██╗██║ ██║ │
|
|
49
|
+
│ ███████╗╚██████╔╝██║ ██║███████╗ ╚██████╔╝╚██████╔╝██║ ██║██║ ██║██████╔╝ │
|
|
50
|
+
│ ╚══════╝ ╚═════╝ ╚═╝ ╚═╝╚══════╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═╝╚═════╝ │
|
|
51
|
+
│ │
|
|
52
|
+
│ Local inference for your game NPCs │
|
|
53
|
+
│ loreguard.com │
|
|
54
|
+
│ │
|
|
55
|
+
└────────────────────────────────────────────────────────────────────────────────┘
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
AI-Powered NPCs using your own hardware (your servers or your player's hardware)
|
|
59
|
+
Loreguard CLI connects the LLM Inference to the Loreguard NPC system.
|
|
60
|
+
|
|
61
|
+
## How It Works
|
|
62
|
+
|
|
63
|
+
```
|
|
64
|
+
┌─────────────────┐ wss://api.loreguard.com ┌─────────────────┐
|
|
65
|
+
│ Your Game │◄────────────────────────────► │ Loreguard API │
|
|
66
|
+
│ (NPC Dialog) │ │ (Backend) │
|
|
67
|
+
└─────────────────┘ └────────┬────────┘
|
|
68
|
+
│
|
|
69
|
+
│ Routes inference
|
|
70
|
+
│ to your worker
|
|
71
|
+
▼
|
|
72
|
+
┌─────────────────┐
|
|
73
|
+
│ Loreguard CLI │◄── You run this
|
|
74
|
+
│ (This repo) │
|
|
75
|
+
└────────┬────────┘
|
|
76
|
+
│
|
|
77
|
+
│ Local inference
|
|
78
|
+
▼
|
|
79
|
+
┌─────────────────┐
|
|
80
|
+
│ llama.cpp │
|
|
81
|
+
│ (Your GPU/CPU) │
|
|
82
|
+
└─────────────────┘
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
## Installation
|
|
86
|
+
|
|
87
|
+
### Option 1: Download Binary (Recommended)
|
|
88
|
+
|
|
89
|
+
Download standalone binaries from [Releases](https://github.com/beyond-logic-labs/loreguard-cli/releases):
|
|
90
|
+
- `loreguard-linux` - Linux x64
|
|
91
|
+
- `loreguard-macos` - macOS (Intel & Apple Silicon)
|
|
92
|
+
- `loreguard-windows.exe` - Windows x64
|
|
93
|
+
|
|
94
|
+
### Option 2: Install from PyPI
|
|
95
|
+
|
|
96
|
+
```bash
|
|
97
|
+
pip install loreguard-cli
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
### Option 3: Install from Source
|
|
101
|
+
|
|
102
|
+
```bash
|
|
103
|
+
git clone https://github.com/beyond-logic-labs/loreguard-cli
|
|
104
|
+
cd loreguard-cli
|
|
105
|
+
pip install -e .
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
### Option 4: Build Your Own Binary
|
|
109
|
+
|
|
110
|
+
```bash
|
|
111
|
+
git clone https://github.com/beyond-logic-labs/loreguard-cli
|
|
112
|
+
cd loreguard-cli
|
|
113
|
+
pip install -e ".[build]"
|
|
114
|
+
python scripts/build.py
|
|
115
|
+
# Output: dist/loreguard (or dist/loreguard.exe on Windows)
|
|
116
|
+
```
|
|
117
|
+
|
|
118
|
+
## Quick Start
|
|
119
|
+
|
|
120
|
+
### Interactive Wizard
|
|
121
|
+
|
|
122
|
+
```bash
|
|
123
|
+
loreguard
|
|
124
|
+
```
|
|
125
|
+
|
|
126
|
+
The wizard guides you through:
|
|
127
|
+
1. **Authentication** - Enter your worker token
|
|
128
|
+
2. **Model Selection** - Choose or download a model
|
|
129
|
+
3. **Running** - Starts llama-server and connects to backend
|
|
130
|
+
|
|
131
|
+
### Headless CLI
|
|
132
|
+
|
|
133
|
+
```bash
|
|
134
|
+
loreguard-cli --token lg_worker_xxx --model /path/to/model.gguf
|
|
135
|
+
```
|
|
136
|
+
|
|
137
|
+
Or auto-download a supported model:
|
|
138
|
+
|
|
139
|
+
```bash
|
|
140
|
+
loreguard-cli --token lg_worker_xxx --model-id qwen3-4b-instruct
|
|
141
|
+
```
|
|
142
|
+
|
|
143
|
+
**Environment variables:**
|
|
144
|
+
```bash
|
|
145
|
+
export LOREGUARD_TOKEN=lg_worker_xxx
|
|
146
|
+
export LOREGUARD_MODEL=/path/to/model.gguf
|
|
147
|
+
loreguard-cli
|
|
148
|
+
```
|
|
149
|
+
|
|
150
|
+
## Supported Models
|
|
151
|
+
|
|
152
|
+
| Model ID | Name | Size | Notes |
|
|
153
|
+
|----------|------|------|-------|
|
|
154
|
+
| `qwen3-4b-instruct` | Qwen3 4B Instruct | 2.8 GB | **Recommended** |
|
|
155
|
+
| `llama-3.2-3b-instruct` | Llama 3.2 3B | 2.0 GB | Fast |
|
|
156
|
+
| `qwen3-8b` | Qwen3 8B | 5.2 GB | Higher quality |
|
|
157
|
+
| `meta-llama-3-8b-instruct` | Llama 3 8B | 4.9 GB | General purpose |
|
|
158
|
+
|
|
159
|
+
Or use any `.gguf` model with `--model /path/to/model.gguf`.
|
|
160
|
+
|
|
161
|
+
## Use Cases
|
|
162
|
+
|
|
163
|
+
### For Game Developers (Testing & Development)
|
|
164
|
+
|
|
165
|
+
Use Loreguard CLI during development to test NPC dialogs with your own hardware:
|
|
166
|
+
|
|
167
|
+
```bash
|
|
168
|
+
# Start the worker
|
|
169
|
+
loreguard-cli --token $YOUR_DEV_TOKEN --model-id qwen3-4b-instruct
|
|
170
|
+
|
|
171
|
+
# Your game connects to Loreguard API
|
|
172
|
+
# NPC inference requests are routed to your local worker
|
|
173
|
+
```
|
|
174
|
+
|
|
175
|
+
### For Players (Coming Soon)
|
|
176
|
+
|
|
177
|
+
> **Note:** Player distribution support is in development. Currently, players would need their own Loreguard account and token.
|
|
178
|
+
|
|
179
|
+
We're working on a **Game Keys** system that will allow:
|
|
180
|
+
- Developers to register their game and get a Game API Key
|
|
181
|
+
- Players to run the CLI without needing a Loreguard account
|
|
182
|
+
- Automatic worker provisioning scoped to each game
|
|
183
|
+
|
|
184
|
+
**Interested in early access?** Contact us at [loreguard.com](https://loreguard.com)
|
|
185
|
+
|
|
186
|
+
## Requirements
|
|
187
|
+
|
|
188
|
+
- **RAM**: 8GB minimum (16GB+ for larger models)
|
|
189
|
+
- **GPU**: Optional but recommended (NVIDIA CUDA or Apple Silicon)
|
|
190
|
+
- **Disk**: 2-6GB depending on model
|
|
191
|
+
- **Python**: 3.10+ (if installing from source)
|
|
192
|
+
|
|
193
|
+
## Get Your Token
|
|
194
|
+
|
|
195
|
+
1. Go to [loreguard.com/developers](https://loreguard.com/developers)
|
|
196
|
+
2. Create a worker token
|
|
197
|
+
3. Use it with `--token` or `LOREGUARD_TOKEN`
|
|
198
|
+
|
|
199
|
+
## Development
|
|
200
|
+
|
|
201
|
+
```bash
|
|
202
|
+
git clone https://github.com/beyond-logic-labs/loreguard-cli
|
|
203
|
+
cd loreguard-cli
|
|
204
|
+
python -m venv .venv
|
|
205
|
+
source .venv/bin/activate # Windows: .venv\Scripts\activate
|
|
206
|
+
pip install -e ".[dev]"
|
|
207
|
+
|
|
208
|
+
# Run interactive wizard
|
|
209
|
+
python -m src.wizard
|
|
210
|
+
|
|
211
|
+
# Run headless CLI
|
|
212
|
+
python -m src.cli --help
|
|
213
|
+
|
|
214
|
+
# Run tests
|
|
215
|
+
pytest
|
|
216
|
+
```
|
|
217
|
+
|
|
218
|
+
## License
|
|
219
|
+
|
|
220
|
+
MIT
|
|
@@ -0,0 +1,187 @@
|
|
|
1
|
+
# Loreguard
|
|
2
|
+
|
|
3
|
+
[](https://pypi.org/project/loreguard-cli/)
|
|
4
|
+
[](https://github.com/beyond-logic-labs/loreguard-cli/actions/workflows/release.yml)
|
|
5
|
+
[](https://opensource.org/licenses/MIT)
|
|
6
|
+
[](https://www.python.org/downloads/)
|
|
7
|
+
[](https://github.com/beyond-logic-labs/loreguard-cli/releases)
|
|
8
|
+
|
|
9
|
+
```
|
|
10
|
+
┌────────────────────────────────────────────────────────────────────────────────┐
|
|
11
|
+
│ │
|
|
12
|
+
│ ██╗ ██████╗ ██████╗ ███████╗ ██████╗ ██╗ ██╗ █████╗ ██████╗ ██████╗ │
|
|
13
|
+
│ ██║ ██╔═══██╗██╔══██╗██╔════╝ ██╔════╝ ██║ ██║██╔══██╗██╔══██╗██╔══██╗ │
|
|
14
|
+
│ ██║ ██║ ██║██████╔╝█████╗ ██║ ███╗██║ ██║███████║██████╔╝██║ ██║ │
|
|
15
|
+
│ ██║ ██║ ██║██╔══██╗██╔══╝ ██║ ██║██║ ██║██╔══██║██╔══██╗██║ ██║ │
|
|
16
|
+
│ ███████╗╚██████╔╝██║ ██║███████╗ ╚██████╔╝╚██████╔╝██║ ██║██║ ██║██████╔╝ │
|
|
17
|
+
│ ╚══════╝ ╚═════╝ ╚═╝ ╚═╝╚══════╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═╝╚═════╝ │
|
|
18
|
+
│ │
|
|
19
|
+
│ Local inference for your game NPCs │
|
|
20
|
+
│ loreguard.com │
|
|
21
|
+
│ │
|
|
22
|
+
└────────────────────────────────────────────────────────────────────────────────┘
|
|
23
|
+
```
|
|
24
|
+
|
|
25
|
+
AI-Powered NPCs using your own hardware (your servers or your player's hardware)
|
|
26
|
+
Loreguard CLI connects the LLM Inference to the Loreguard NPC system.
|
|
27
|
+
|
|
28
|
+
## How It Works
|
|
29
|
+
|
|
30
|
+
```
|
|
31
|
+
┌─────────────────┐ wss://api.loreguard.com ┌─────────────────┐
|
|
32
|
+
│ Your Game │◄────────────────────────────► │ Loreguard API │
|
|
33
|
+
│ (NPC Dialog) │ │ (Backend) │
|
|
34
|
+
└─────────────────┘ └────────┬────────┘
|
|
35
|
+
│
|
|
36
|
+
│ Routes inference
|
|
37
|
+
│ to your worker
|
|
38
|
+
▼
|
|
39
|
+
┌─────────────────┐
|
|
40
|
+
│ Loreguard CLI │◄── You run this
|
|
41
|
+
│ (This repo) │
|
|
42
|
+
└────────┬────────┘
|
|
43
|
+
│
|
|
44
|
+
│ Local inference
|
|
45
|
+
▼
|
|
46
|
+
┌─────────────────┐
|
|
47
|
+
│ llama.cpp │
|
|
48
|
+
│ (Your GPU/CPU) │
|
|
49
|
+
└─────────────────┘
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
## Installation
|
|
53
|
+
|
|
54
|
+
### Option 1: Download Binary (Recommended)
|
|
55
|
+
|
|
56
|
+
Download standalone binaries from [Releases](https://github.com/beyond-logic-labs/loreguard-cli/releases):
|
|
57
|
+
- `loreguard-linux` - Linux x64
|
|
58
|
+
- `loreguard-macos` - macOS (Intel & Apple Silicon)
|
|
59
|
+
- `loreguard-windows.exe` - Windows x64
|
|
60
|
+
|
|
61
|
+
### Option 2: Install from PyPI
|
|
62
|
+
|
|
63
|
+
```bash
|
|
64
|
+
pip install loreguard-cli
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
### Option 3: Install from Source
|
|
68
|
+
|
|
69
|
+
```bash
|
|
70
|
+
git clone https://github.com/beyond-logic-labs/loreguard-cli
|
|
71
|
+
cd loreguard-cli
|
|
72
|
+
pip install -e .
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
### Option 4: Build Your Own Binary
|
|
76
|
+
|
|
77
|
+
```bash
|
|
78
|
+
git clone https://github.com/beyond-logic-labs/loreguard-cli
|
|
79
|
+
cd loreguard-cli
|
|
80
|
+
pip install -e ".[build]"
|
|
81
|
+
python scripts/build.py
|
|
82
|
+
# Output: dist/loreguard (or dist/loreguard.exe on Windows)
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
## Quick Start
|
|
86
|
+
|
|
87
|
+
### Interactive Wizard
|
|
88
|
+
|
|
89
|
+
```bash
|
|
90
|
+
loreguard
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
The wizard guides you through:
|
|
94
|
+
1. **Authentication** - Enter your worker token
|
|
95
|
+
2. **Model Selection** - Choose or download a model
|
|
96
|
+
3. **Running** - Starts llama-server and connects to backend
|
|
97
|
+
|
|
98
|
+
### Headless CLI
|
|
99
|
+
|
|
100
|
+
```bash
|
|
101
|
+
loreguard-cli --token lg_worker_xxx --model /path/to/model.gguf
|
|
102
|
+
```
|
|
103
|
+
|
|
104
|
+
Or auto-download a supported model:
|
|
105
|
+
|
|
106
|
+
```bash
|
|
107
|
+
loreguard-cli --token lg_worker_xxx --model-id qwen3-4b-instruct
|
|
108
|
+
```
|
|
109
|
+
|
|
110
|
+
**Environment variables:**
|
|
111
|
+
```bash
|
|
112
|
+
export LOREGUARD_TOKEN=lg_worker_xxx
|
|
113
|
+
export LOREGUARD_MODEL=/path/to/model.gguf
|
|
114
|
+
loreguard-cli
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
## Supported Models
|
|
118
|
+
|
|
119
|
+
| Model ID | Name | Size | Notes |
|
|
120
|
+
|----------|------|------|-------|
|
|
121
|
+
| `qwen3-4b-instruct` | Qwen3 4B Instruct | 2.8 GB | **Recommended** |
|
|
122
|
+
| `llama-3.2-3b-instruct` | Llama 3.2 3B | 2.0 GB | Fast |
|
|
123
|
+
| `qwen3-8b` | Qwen3 8B | 5.2 GB | Higher quality |
|
|
124
|
+
| `meta-llama-3-8b-instruct` | Llama 3 8B | 4.9 GB | General purpose |
|
|
125
|
+
|
|
126
|
+
Or use any `.gguf` model with `--model /path/to/model.gguf`.
|
|
127
|
+
|
|
128
|
+
## Use Cases
|
|
129
|
+
|
|
130
|
+
### For Game Developers (Testing & Development)
|
|
131
|
+
|
|
132
|
+
Use Loreguard CLI during development to test NPC dialogs with your own hardware:
|
|
133
|
+
|
|
134
|
+
```bash
|
|
135
|
+
# Start the worker
|
|
136
|
+
loreguard-cli --token $YOUR_DEV_TOKEN --model-id qwen3-4b-instruct
|
|
137
|
+
|
|
138
|
+
# Your game connects to Loreguard API
|
|
139
|
+
# NPC inference requests are routed to your local worker
|
|
140
|
+
```
|
|
141
|
+
|
|
142
|
+
### For Players (Coming Soon)
|
|
143
|
+
|
|
144
|
+
> **Note:** Player distribution support is in development. Currently, players would need their own Loreguard account and token.
|
|
145
|
+
|
|
146
|
+
We're working on a **Game Keys** system that will allow:
|
|
147
|
+
- Developers to register their game and get a Game API Key
|
|
148
|
+
- Players to run the CLI without needing a Loreguard account
|
|
149
|
+
- Automatic worker provisioning scoped to each game
|
|
150
|
+
|
|
151
|
+
**Interested in early access?** Contact us at [loreguard.com](https://loreguard.com)
|
|
152
|
+
|
|
153
|
+
## Requirements
|
|
154
|
+
|
|
155
|
+
- **RAM**: 8GB minimum (16GB+ for larger models)
|
|
156
|
+
- **GPU**: Optional but recommended (NVIDIA CUDA or Apple Silicon)
|
|
157
|
+
- **Disk**: 2-6GB depending on model
|
|
158
|
+
- **Python**: 3.10+ (if installing from source)
|
|
159
|
+
|
|
160
|
+
## Get Your Token
|
|
161
|
+
|
|
162
|
+
1. Go to [loreguard.com/developers](https://loreguard.com/developers)
|
|
163
|
+
2. Create a worker token
|
|
164
|
+
3. Use it with `--token` or `LOREGUARD_TOKEN`
|
|
165
|
+
|
|
166
|
+
## Development
|
|
167
|
+
|
|
168
|
+
```bash
|
|
169
|
+
git clone https://github.com/beyond-logic-labs/loreguard-cli
|
|
170
|
+
cd loreguard-cli
|
|
171
|
+
python -m venv .venv
|
|
172
|
+
source .venv/bin/activate # Windows: .venv\Scripts\activate
|
|
173
|
+
pip install -e ".[dev]"
|
|
174
|
+
|
|
175
|
+
# Run interactive wizard
|
|
176
|
+
python -m src.wizard
|
|
177
|
+
|
|
178
|
+
# Run headless CLI
|
|
179
|
+
python -m src.cli --help
|
|
180
|
+
|
|
181
|
+
# Run tests
|
|
182
|
+
pytest
|
|
183
|
+
```
|
|
184
|
+
|
|
185
|
+
## License
|
|
186
|
+
|
|
187
|
+
MIT
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["hatchling"]
|
|
3
|
+
build-backend = "hatchling.build"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "loreguard-cli"
|
|
7
|
+
version = "0.3.0"
|
|
8
|
+
description = "Local inference client for Loreguard NPCs"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
license = "MIT"
|
|
11
|
+
requires-python = ">=3.10"
|
|
12
|
+
keywords = ["llm", "npc", "inference", "loreguard", "gamedev"]
|
|
13
|
+
classifiers = [
|
|
14
|
+
"Development Status :: 4 - Beta",
|
|
15
|
+
"Environment :: Console",
|
|
16
|
+
"Intended Audience :: Developers",
|
|
17
|
+
"License :: OSI Approved :: MIT License",
|
|
18
|
+
"Operating System :: OS Independent",
|
|
19
|
+
"Programming Language :: Python :: 3.10",
|
|
20
|
+
"Programming Language :: Python :: 3.11",
|
|
21
|
+
"Programming Language :: Python :: 3.12",
|
|
22
|
+
"Topic :: Games/Entertainment",
|
|
23
|
+
]
|
|
24
|
+
dependencies = [
|
|
25
|
+
"httpx>=0.26.0",
|
|
26
|
+
"websockets>=12.0",
|
|
27
|
+
"pydantic>=2.5.0",
|
|
28
|
+
"aiofiles>=24.1.0",
|
|
29
|
+
]
|
|
30
|
+
|
|
31
|
+
[project.urls]
|
|
32
|
+
Homepage = "https://loreguard.com"
|
|
33
|
+
Documentation = "https://github.com/beyond-logic-labs/loreguard-cli#readme"
|
|
34
|
+
Repository = "https://github.com/beyond-logic-labs/loreguard-cli"
|
|
35
|
+
Issues = "https://github.com/beyond-logic-labs/loreguard-cli/issues"
|
|
36
|
+
|
|
37
|
+
[project.optional-dependencies]
|
|
38
|
+
dev = [
|
|
39
|
+
"pytest>=7.4.0",
|
|
40
|
+
"pytest-asyncio>=0.23.0",
|
|
41
|
+
"ruff>=0.1.0",
|
|
42
|
+
]
|
|
43
|
+
build = [
|
|
44
|
+
"pyinstaller>=6.0.0",
|
|
45
|
+
]
|
|
46
|
+
|
|
47
|
+
[project.scripts]
|
|
48
|
+
loreguard = "src.wizard:main"
|
|
49
|
+
loreguard-cli = "src.cli:main"
|
|
50
|
+
|
|
51
|
+
[tool.hatch.build.targets.wheel]
|
|
52
|
+
packages = ["src"]
|
|
53
|
+
|
|
54
|
+
[tool.ruff]
|
|
55
|
+
line-length = 100
|
|
56
|
+
target-version = "py310"
|