locollm 0.2.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- locollm-0.2.0/.claude/settings.local.json +11 -0
- locollm-0.2.0/.github/workflows/deploy-pages.yml +54 -0
- locollm-0.2.0/.gitignore +49 -0
- locollm-0.2.0/.python-version +1 -0
- locollm-0.2.0/CONTRIBUTING.md +258 -0
- locollm-0.2.0/LICENSE +21 -0
- locollm-0.2.0/PKG-INFO +322 -0
- locollm-0.2.0/README.md +298 -0
- locollm-0.2.0/adapters/analysis/eval_dataset.jsonl +20 -0
- locollm-0.2.0/adapters/code/eval_dataset.jsonl +20 -0
- locollm-0.2.0/adapters/math/eval_dataset.jsonl +20 -0
- locollm-0.2.0/adapters/registry.yaml +73 -0
- locollm-0.2.0/docs/.nojekyll +0 -0
- locollm-0.2.0/docs/CNAME +1 -0
- locollm-0.2.0/docs/TRAINING_LOG_TEMPLATE.md +163 -0
- locollm-0.2.0/docs/adapter-guide.md +251 -0
- locollm-0.2.0/docs/adr/0000-template.md +21 -0
- locollm-0.2.0/docs/adr/0001-base-model-qwen3-4b.md +31 -0
- locollm-0.2.0/docs/adr/0002-adapter-registry-design.md +40 -0
- locollm-0.2.0/docs/adr/0003-single-evolving-router.md +57 -0
- locollm-0.2.0/docs/adr/0004-retire-cerebro-adopt-b250-multi-gpu.md +37 -0
- locollm-0.2.0/docs/adr/README.md +40 -0
- locollm-0.2.0/docs/ai-landscape.md +138 -0
- locollm-0.2.0/docs/architecture-vision.md +339 -0
- locollm-0.2.0/docs/architecture.md +395 -0
- locollm-0.2.0/docs/base-model-selection.md +327 -0
- locollm-0.2.0/docs/benchmarking-guide.md +40 -0
- locollm-0.2.0/docs/benchmarks/index.md +22 -0
- locollm-0.2.0/docs/capstone-project-blurb.md +40 -0
- locollm-0.2.0/docs/economics-of-local-training.md +171 -0
- locollm-0.2.0/docs/evaluation-standards.md +243 -0
- locollm-0.2.0/docs/faq.md +124 -0
- locollm-0.2.0/docs/finetuning-primer.md +184 -0
- locollm-0.2.0/docs/ideas.md +129 -0
- locollm-0.2.0/docs/index.html +1057 -0
- locollm-0.2.0/docs/index.md +61 -0
- locollm-0.2.0/docs/known-challenges.md +191 -0
- locollm-0.2.0/docs/meet-the-lab.md +171 -0
- locollm-0.2.0/docs/meet-the-team.md +37 -0
- locollm-0.2.0/docs/nvidia-gpu-reference.md +159 -0
- locollm-0.2.0/docs/project-ideas.md +291 -0
- locollm-0.2.0/docs/research-roadmap.md +147 -0
- locollm-0.2.0/docs/stylesheets/extra.css +164 -0
- locollm-0.2.0/docs/train-math-adapter.md +149 -0
- locollm-0.2.0/docs/training-new-adapters.md +150 -0
- locollm-0.2.0/docs/why-locollm.md +159 -0
- locollm-0.2.0/mkdocs.yml +83 -0
- locollm-0.2.0/notebooks/train_math_adapter.ipynb +533 -0
- locollm-0.2.0/notebooks/train_tiny_poc.ipynb +730 -0
- locollm-0.2.0/pyproject.toml +82 -0
- locollm-0.2.0/scripts/download_models.sh +108 -0
- locollm-0.2.0/scripts/prepare_analysis_data.py +103 -0
- locollm-0.2.0/scripts/prepare_code_data.py +100 -0
- locollm-0.2.0/scripts/prepare_gsm8k.py +104 -0
- locollm-0.2.0/scripts/train_adapter.py +189 -0
- locollm-0.2.0/scripts/train_math_adapter.py +187 -0
- locollm-0.2.0/src/locollm/__init__.py +5 -0
- locollm-0.2.0/src/locollm/adapter_manager.py +114 -0
- locollm-0.2.0/src/locollm/chat_session.py +202 -0
- locollm-0.2.0/src/locollm/cli.py +336 -0
- locollm-0.2.0/src/locollm/eval.py +169 -0
- locollm-0.2.0/src/locollm/ollama_client.py +145 -0
- locollm-0.2.0/src/locollm/router.py +42 -0
- locollm-0.2.0/tests/__init__.py +0 -0
- locollm-0.2.0/tests/test_adapter_manager.py +137 -0
- locollm-0.2.0/tests/test_chat_session.py +285 -0
- locollm-0.2.0/tests/test_cli.py +71 -0
- locollm-0.2.0/tests/test_eval.py +136 -0
- locollm-0.2.0/tests/test_router.py +50 -0
- locollm-0.2.0/uv.lock +3685 -0
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
name: Deploy to GitHub Pages
|
|
2
|
+
|
|
3
|
+
on:
|
|
4
|
+
push:
|
|
5
|
+
branches: [main]
|
|
6
|
+
workflow_dispatch:
|
|
7
|
+
|
|
8
|
+
permissions:
|
|
9
|
+
contents: read
|
|
10
|
+
pages: write
|
|
11
|
+
id-token: write
|
|
12
|
+
|
|
13
|
+
concurrency:
|
|
14
|
+
group: pages
|
|
15
|
+
cancel-in-progress: false
|
|
16
|
+
|
|
17
|
+
jobs:
|
|
18
|
+
build:
|
|
19
|
+
runs-on: ubuntu-latest
|
|
20
|
+
steps:
|
|
21
|
+
- uses: actions/checkout@v4
|
|
22
|
+
|
|
23
|
+
- uses: actions/setup-python@v5
|
|
24
|
+
with:
|
|
25
|
+
python-version: "3.12"
|
|
26
|
+
|
|
27
|
+
- name: Install uv
|
|
28
|
+
uses: astral-sh/setup-uv@v4
|
|
29
|
+
|
|
30
|
+
- name: Build MkDocs
|
|
31
|
+
run: uv run --group docs mkdocs build --strict
|
|
32
|
+
|
|
33
|
+
- name: Assemble site
|
|
34
|
+
run: |
|
|
35
|
+
mkdir _site
|
|
36
|
+
|
|
37
|
+
# Landing page files at root
|
|
38
|
+
cp docs/index.html _site/
|
|
39
|
+
cp docs/CNAME _site/
|
|
40
|
+
|
|
41
|
+
# MkDocs output under /docs/
|
|
42
|
+
mv site _site/docs
|
|
43
|
+
|
|
44
|
+
- uses: actions/upload-pages-artifact@v3
|
|
45
|
+
|
|
46
|
+
deploy:
|
|
47
|
+
needs: build
|
|
48
|
+
runs-on: ubuntu-latest
|
|
49
|
+
environment:
|
|
50
|
+
name: github-pages
|
|
51
|
+
url: ${{ steps.deployment.outputs.page_url }}
|
|
52
|
+
steps:
|
|
53
|
+
- id: deployment
|
|
54
|
+
uses: actions/deploy-pages@v4
|
locollm-0.2.0/.gitignore
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
# OS
|
|
2
|
+
.DS_Store
|
|
3
|
+
Thumbs.db
|
|
4
|
+
|
|
5
|
+
# Editor
|
|
6
|
+
*.swp
|
|
7
|
+
*.swo
|
|
8
|
+
*~
|
|
9
|
+
.vscode/
|
|
10
|
+
.idea/
|
|
11
|
+
|
|
12
|
+
# Python
|
|
13
|
+
__pycache__/
|
|
14
|
+
*.py[cod]
|
|
15
|
+
*.egg-info/
|
|
16
|
+
dist/
|
|
17
|
+
build/
|
|
18
|
+
.venv/
|
|
19
|
+
.mypy_cache/
|
|
20
|
+
.pytest_cache/
|
|
21
|
+
.ruff_cache/
|
|
22
|
+
htmlcov/
|
|
23
|
+
.coverage
|
|
24
|
+
|
|
25
|
+
# Jupyter
|
|
26
|
+
.ipynb_checkpoints/
|
|
27
|
+
|
|
28
|
+
# MkDocs
|
|
29
|
+
site/
|
|
30
|
+
|
|
31
|
+
# Models and weights
|
|
32
|
+
models/
|
|
33
|
+
*.gguf
|
|
34
|
+
*.bin
|
|
35
|
+
*.safetensors
|
|
36
|
+
|
|
37
|
+
# Benchmark results
|
|
38
|
+
results/
|
|
39
|
+
|
|
40
|
+
# Logs
|
|
41
|
+
*.log
|
|
42
|
+
|
|
43
|
+
# Training
|
|
44
|
+
wandb/
|
|
45
|
+
adapter_checkpoints/
|
|
46
|
+
|
|
47
|
+
# Environment
|
|
48
|
+
.env
|
|
49
|
+
.env.local
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
3.10
|
|
@@ -0,0 +1,258 @@
|
|
|
1
|
+
# Contributing to LocoLLM
|
|
2
|
+
|
|
3
|
+
This guide covers everything you need to contribute a new adapter to the LocoLLM ecosystem. Whether you're a student working on a semester project or an external contributor, the process is the same.
|
|
4
|
+
|
|
5
|
+
## Overview
|
|
6
|
+
|
|
7
|
+
Contributing an adapter involves five steps:
|
|
8
|
+
|
|
9
|
+
1. Propose a domain and get it approved
|
|
10
|
+
2. Curate and prepare training data
|
|
11
|
+
3. Fine-tune a LoRA adapter using the standard training script
|
|
12
|
+
4. Evaluate it against the base model using the standard harness
|
|
13
|
+
5. Submit a pull request with all required artifacts
|
|
14
|
+
|
|
15
|
+
The whole process is designed to be completable by a student team in one semester.
|
|
16
|
+
|
|
17
|
+
## Step 1: Propose a Domain
|
|
18
|
+
|
|
19
|
+
Before starting work, open a GitHub issue using the `[adapter-proposal]` template. Your proposal should cover:
|
|
20
|
+
|
|
21
|
+
- **Target domain**: What task or subject area will this adapter specialize in?
|
|
22
|
+
- **Why the base model underperforms**: Show 3-5 example queries where the base model gives poor results. This is your motivation.
|
|
23
|
+
- **Planned training data sources**: Where will your training examples come from? Public datasets, synthetic generation, expert-written examples, or a combination?
|
|
24
|
+
- **Scope boundaries**: What's in and out of scope for this adapter? "Math" is too broad. "Multi-step arithmetic word problems at secondary school level" is about right.
|
|
25
|
+
- **Estimated training set size**: How many examples do you plan to use?
|
|
26
|
+
|
|
27
|
+
A maintainer will review and either approve, suggest adjustments, or flag overlap with existing adapters.
|
|
28
|
+
|
|
29
|
+
### Choosing Good Domains
|
|
30
|
+
|
|
31
|
+
Adapters work best when the target domain has these properties:
|
|
32
|
+
|
|
33
|
+
**Good candidates:**
|
|
34
|
+
- The base model gives mediocre results but not terrible ones (there's something to build on)
|
|
35
|
+
- The task has a relatively consistent input/output structure
|
|
36
|
+
- Quality can be measured objectively or semi-objectively
|
|
37
|
+
- Enough training data exists or can be generated
|
|
38
|
+
- Students in the program would actually use this
|
|
39
|
+
|
|
40
|
+
**Poor candidates:**
|
|
41
|
+
- The base model already handles it well (basic summarization, simple Q&A)
|
|
42
|
+
- The task is too broad or subjective to benchmark meaningfully
|
|
43
|
+
- Training data would require deep domain expertise to create or validate
|
|
44
|
+
- The domain changes so rapidly that training data goes stale quickly
|
|
45
|
+
|
|
46
|
+
**Suggested starter domains** (for teams that want guidance):
|
|
47
|
+
- Structured output formatting (JSON, CSV, specific templates)
|
|
48
|
+
- Domain-specific reasoning (accounting calculations, legal clause analysis)
|
|
49
|
+
- Code generation in a specific language or framework
|
|
50
|
+
- Academic writing style (citations, formal structure, discipline-specific conventions)
|
|
51
|
+
- Data analysis narration (turning numbers into plain-language insights)
|
|
52
|
+
|
|
53
|
+
## Step 2: Prepare Training Data
|
|
54
|
+
|
|
55
|
+
### Format
|
|
56
|
+
|
|
57
|
+
All training data must be in JSONL format with three fields:
|
|
58
|
+
|
|
59
|
+
```json
|
|
60
|
+
{"instruction": "What to do", "input": "Context or problem", "output": "Expected response"}
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
For tasks without separate context, leave `input` as an empty string:
|
|
64
|
+
|
|
65
|
+
```json
|
|
66
|
+
{"instruction": "Write a formal email declining a meeting invitation", "input": "", "output": "Dear [Name], ..."}
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
### Requirements
|
|
70
|
+
|
|
71
|
+
- **Minimum 500 examples** for initial training. More is generally better, but quality matters more than quantity.
|
|
72
|
+
- **No copyrighted material** in training data. Use public domain sources, synthetic generation, or original content.
|
|
73
|
+
- **Diverse examples** within your domain. Don't train on 500 variations of the same problem.
|
|
74
|
+
- **Consistent quality** in the output field. Every output should be an example of what a good response looks like.
|
|
75
|
+
- **Document everything** in `TRAINING_LOG.md`: data sources, any filtering or cleaning steps, how synthetic data was generated.
|
|
76
|
+
|
|
77
|
+
### Synthetic Data Generation
|
|
78
|
+
|
|
79
|
+
Using a frontier model to generate training examples is fine and often practical. If you do this:
|
|
80
|
+
|
|
81
|
+
- Document which model and what prompts you used
|
|
82
|
+
- Include the generation script in your training directory
|
|
83
|
+
- Manually review at least 10% of generated examples for quality
|
|
84
|
+
- Note the review process and any rejection/edit rates in your training log
|
|
85
|
+
|
|
86
|
+
### Data Splits
|
|
87
|
+
|
|
88
|
+
- **Training set**: Your main JSONL file (minimum 500 examples)
|
|
89
|
+
- **Benchmark set** (`eval/benchmark.jsonl`): Separate set of at least 50 examples, held out from training, used for evaluation. These should never appear in training data.
|
|
90
|
+
|
|
91
|
+
## Step 3: Fine-Tune
|
|
92
|
+
|
|
93
|
+
Use the standard training script to ensure consistency across all adapters:
|
|
94
|
+
|
|
95
|
+
```bash
|
|
96
|
+
uv run python scripts/fine_tune.py \
|
|
97
|
+
--base-model Qwen/Qwen2.5-3B-Instruct \
|
|
98
|
+
--dataset adapters/your-domain/training/dataset.jsonl \
|
|
99
|
+
--output adapters/your-domain/ \
|
|
100
|
+
--lora-rank 16 \
|
|
101
|
+
--epochs 3 \
|
|
102
|
+
--lr 2e-4 \
|
|
103
|
+
--batch-size 4 \
|
|
104
|
+
--max-seq-length 1024
|
|
105
|
+
```
|
|
106
|
+
|
|
107
|
+
### Training Hardware
|
|
108
|
+
|
|
109
|
+
Fine-tuning a 3B model with LoRA is feasible on:
|
|
110
|
+
- A single consumer GPU (8GB+ VRAM) in 1-4 hours
|
|
111
|
+
- Google Colab free tier (T4 GPU) in 2-6 hours
|
|
112
|
+
- University lab machines
|
|
113
|
+
|
|
114
|
+
The standard script uses QLoRA (4-bit base model + LoRA adapters) to minimize memory requirements.
|
|
115
|
+
|
|
116
|
+
### Hyperparameter Guidance
|
|
117
|
+
|
|
118
|
+
The defaults in `fine_tune.py` are good starting points. If you want to experiment:
|
|
119
|
+
|
|
120
|
+
| Parameter | Default | Notes |
|
|
121
|
+
|---|---|---|
|
|
122
|
+
| LoRA rank | 16 | Higher = more capacity but larger adapter. 8-32 is reasonable range. |
|
|
123
|
+
| Learning rate | 2e-4 | Standard for QLoRA. Reduce if loss is unstable. |
|
|
124
|
+
| Epochs | 3 | Watch for overfitting. 2-5 is typical. |
|
|
125
|
+
| Batch size | 4 | Increase if GPU memory allows. |
|
|
126
|
+
| Max sequence length | 1024 | Increase for tasks with longer inputs/outputs. |
|
|
127
|
+
|
|
128
|
+
### Output Artifacts
|
|
129
|
+
|
|
130
|
+
After training, your adapter directory should contain:
|
|
131
|
+
|
|
132
|
+
```
|
|
133
|
+
adapters/your-domain/
|
|
134
|
+
├── adapter_config.json # LoRA configuration
|
|
135
|
+
├── adapter_model.safetensors # Trained weights
|
|
136
|
+
├── training/
|
|
137
|
+
│ ├── dataset.jsonl # Training data
|
|
138
|
+
│ ├── train_config.yaml # Exact config used
|
|
139
|
+
│ └── TRAINING_LOG.md # Human-readable training record
|
|
140
|
+
└── eval/
|
|
141
|
+
├── benchmark.jsonl # 50+ held-out test cases
|
|
142
|
+
└── results.json # Populated in Step 4
|
|
143
|
+
```
|
|
144
|
+
|
|
145
|
+
## Step 4: Evaluate
|
|
146
|
+
|
|
147
|
+
Run the standard evaluation harness to benchmark your adapter:
|
|
148
|
+
|
|
149
|
+
```bash
|
|
150
|
+
# Evaluate your adapter
|
|
151
|
+
uv run python scripts/evaluate.py \
|
|
152
|
+
--adapter adapters/your-domain/ \
|
|
153
|
+
--benchmark adapters/your-domain/eval/benchmark.jsonl \
|
|
154
|
+
--output adapters/your-domain/eval/results.json
|
|
155
|
+
|
|
156
|
+
# Evaluate the base model on the same benchmark (for comparison)
|
|
157
|
+
uv run python scripts/evaluate.py \
|
|
158
|
+
--benchmark adapters/your-domain/eval/benchmark.jsonl \
|
|
159
|
+
--output adapters/your-domain/eval/base_results.json
|
|
160
|
+
|
|
161
|
+
# Run out-of-domain check (pick another adapter's benchmark)
|
|
162
|
+
uv run python scripts/evaluate.py \
|
|
163
|
+
--adapter adapters/your-domain/ \
|
|
164
|
+
--benchmark adapters/math-reasoning/eval/benchmark.jsonl \
|
|
165
|
+
--output adapters/your-domain/eval/ood_results.json
|
|
166
|
+
```
|
|
167
|
+
|
|
168
|
+
### Minimum Bar
|
|
169
|
+
|
|
170
|
+
Your adapter must:
|
|
171
|
+
- Score higher than the base model on your domain benchmark
|
|
172
|
+
- Not score significantly lower than the base model on the out-of-domain benchmark (small degradation is acceptable, large drops are not)
|
|
173
|
+
|
|
174
|
+
There is no fixed percentage improvement threshold. A 10% improvement on a hard task is valuable. A 50% improvement on a trivial task is less interesting. Use judgment and document your reasoning.
|
|
175
|
+
|
|
176
|
+
### Results Format
|
|
177
|
+
|
|
178
|
+
The evaluation script produces a `results.json`:
|
|
179
|
+
|
|
180
|
+
```json
|
|
181
|
+
{
|
|
182
|
+
"adapter": "your-domain",
|
|
183
|
+
"base_model": "Qwen/Qwen2.5-3B-Instruct",
|
|
184
|
+
"quantization": "Q4_K_M",
|
|
185
|
+
"benchmark": "your-domain/eval/benchmark.jsonl",
|
|
186
|
+
"n_examples": 50,
|
|
187
|
+
"metric": "exact_match",
|
|
188
|
+
"adapter_score": 0.72,
|
|
189
|
+
"base_model_score": 0.41,
|
|
190
|
+
"improvement": 0.756,
|
|
191
|
+
"timestamp": "2026-08-15T14:30:00Z"
|
|
192
|
+
}
|
|
193
|
+
```
|
|
194
|
+
|
|
195
|
+
## Step 5: Submit
|
|
196
|
+
|
|
197
|
+
Open a pull request that includes:
|
|
198
|
+
|
|
199
|
+
1. Your complete adapter directory (all files listed above)
|
|
200
|
+
2. An updated entry in `adapters/registry.yaml`
|
|
201
|
+
3. A PR description that includes:
|
|
202
|
+
- What the adapter does and why it's useful
|
|
203
|
+
- Benchmark results (adapter score vs base model score)
|
|
204
|
+
- Any known limitations or edge cases
|
|
205
|
+
- Team members and their contributions
|
|
206
|
+
|
|
207
|
+
### PR Checklist
|
|
208
|
+
|
|
209
|
+
- [ ] `adapter_config.json` and `adapter_model.safetensors` present
|
|
210
|
+
- [ ] `training/dataset.jsonl` with 500+ examples
|
|
211
|
+
- [ ] `training/TRAINING_LOG.md` fully documented
|
|
212
|
+
- [ ] `eval/benchmark.jsonl` with 50+ held-out test cases
|
|
213
|
+
- [ ] `eval/results.json` showing improvement over base model
|
|
214
|
+
- [ ] Out-of-domain evaluation completed
|
|
215
|
+
- [ ] `registry.yaml` updated with new adapter entry
|
|
216
|
+
- [ ] No copyrighted material in training data
|
|
217
|
+
- [ ] All data sources documented
|
|
218
|
+
|
|
219
|
+
## Improving Existing Adapters
|
|
220
|
+
|
|
221
|
+
Not every contribution needs to be a new adapter. Improving an existing one is equally valuable:
|
|
222
|
+
|
|
223
|
+
- **Better training data**: More examples, higher quality, more diverse
|
|
224
|
+
- **Hyperparameter tuning**: Finding a better training configuration
|
|
225
|
+
- **Benchmark expansion**: Adding more test cases or harder test cases
|
|
226
|
+
- **Version bumping**: If your improved adapter beats the current version on the same benchmark, submit it as a version upgrade
|
|
227
|
+
|
|
228
|
+
When improving an existing adapter, keep the previous version's benchmark results in the PR for comparison.
|
|
229
|
+
|
|
230
|
+
## Development Setup
|
|
231
|
+
|
|
232
|
+
```bash
|
|
233
|
+
# Install uv (if you don't have it)
|
|
234
|
+
curl -LsSf https://astral.sh/uv/install.sh | sh
|
|
235
|
+
|
|
236
|
+
# Install all dependencies (including dev tools)
|
|
237
|
+
uv sync
|
|
238
|
+
|
|
239
|
+
# Run checks
|
|
240
|
+
uv run ruff check . # lint
|
|
241
|
+
uv run ruff format --check . # format check
|
|
242
|
+
uv run mypy src/ # type check
|
|
243
|
+
uv run pytest # tests
|
|
244
|
+
```
|
|
245
|
+
|
|
246
|
+
## Code Contributions
|
|
247
|
+
|
|
248
|
+
Contributions to the core framework (router, CLI, evaluation harness) are also welcome. For these:
|
|
249
|
+
|
|
250
|
+
1. Open an issue describing the proposed change
|
|
251
|
+
2. Get a maintainer's input before starting significant work
|
|
252
|
+
3. Follow existing code style and patterns
|
|
253
|
+
4. Include tests where applicable
|
|
254
|
+
5. Update documentation if behavior changes
|
|
255
|
+
|
|
256
|
+
## Questions?
|
|
257
|
+
|
|
258
|
+
Open an issue with the `[question]` tag or reach out to the project maintainers.
|
locollm-0.2.0/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Michael Borck and Contributors
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|