llama-cpp-haystack 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llama_cpp_haystack-0.1.0/.gitignore +133 -0
- llama_cpp_haystack-0.1.0/LICENSE.txt +73 -0
- llama_cpp_haystack-0.1.0/PKG-INFO +258 -0
- llama_cpp_haystack-0.1.0/README.md +233 -0
- llama_cpp_haystack-0.1.0/examples/llama_cpp_generator_example.py +12 -0
- llama_cpp_haystack-0.1.0/examples/rag_pipeline_example.py +85 -0
- llama_cpp_haystack-0.1.0/pyproject.toml +185 -0
- llama_cpp_haystack-0.1.0/src/llama_cpp_haystack/__init__.py +7 -0
- llama_cpp_haystack-0.1.0/src/llama_cpp_haystack/generator.py +98 -0
- llama_cpp_haystack-0.1.0/tests/__init__.py +3 -0
- llama_cpp_haystack-0.1.0/tests/models/.gitignore +2 -0
- llama_cpp_haystack-0.1.0/tests/test_generator.py +233 -0
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
# Byte-compiled / optimized / DLL files
|
|
2
|
+
__pycache__/
|
|
3
|
+
*.py[cod]
|
|
4
|
+
*$py.class
|
|
5
|
+
|
|
6
|
+
# C extensions
|
|
7
|
+
*.so
|
|
8
|
+
|
|
9
|
+
# Distribution / packaging
|
|
10
|
+
.Python
|
|
11
|
+
build/
|
|
12
|
+
develop-eggs/
|
|
13
|
+
dist/
|
|
14
|
+
downloads/
|
|
15
|
+
eggs/
|
|
16
|
+
.eggs/
|
|
17
|
+
lib/
|
|
18
|
+
lib64/
|
|
19
|
+
parts/
|
|
20
|
+
sdist/
|
|
21
|
+
var/
|
|
22
|
+
wheels/
|
|
23
|
+
pip-wheel-metadata/
|
|
24
|
+
share/python-wheels/
|
|
25
|
+
*.egg-info/
|
|
26
|
+
.installed.cfg
|
|
27
|
+
*.egg
|
|
28
|
+
MANIFEST
|
|
29
|
+
|
|
30
|
+
# PyInstaller
|
|
31
|
+
# Usually these files are written by a python script from a template
|
|
32
|
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
|
33
|
+
*.manifest
|
|
34
|
+
*.spec
|
|
35
|
+
|
|
36
|
+
# Installer logs
|
|
37
|
+
pip-log.txt
|
|
38
|
+
pip-delete-this-directory.txt
|
|
39
|
+
|
|
40
|
+
# Unit test / coverage reports
|
|
41
|
+
htmlcov/
|
|
42
|
+
.tox/
|
|
43
|
+
.nox/
|
|
44
|
+
.coverage
|
|
45
|
+
.coverage.*
|
|
46
|
+
.cache
|
|
47
|
+
nosetests.xml
|
|
48
|
+
coverage.xml
|
|
49
|
+
*.cover
|
|
50
|
+
*.py,cover
|
|
51
|
+
.hypothesis/
|
|
52
|
+
.pytest_cache/
|
|
53
|
+
volumes/
|
|
54
|
+
|
|
55
|
+
# Translations
|
|
56
|
+
*.mo
|
|
57
|
+
*.pot
|
|
58
|
+
|
|
59
|
+
# Django stuff:
|
|
60
|
+
*.log
|
|
61
|
+
local_settings.py
|
|
62
|
+
db.sqlite3
|
|
63
|
+
db.sqlite3-journal
|
|
64
|
+
|
|
65
|
+
# Flask stuff:
|
|
66
|
+
instance/
|
|
67
|
+
.webassets-cache
|
|
68
|
+
|
|
69
|
+
# Scrapy stuff:
|
|
70
|
+
.scrapy
|
|
71
|
+
|
|
72
|
+
# Sphinx documentation
|
|
73
|
+
docs/_build/
|
|
74
|
+
|
|
75
|
+
# PyBuilder
|
|
76
|
+
target/
|
|
77
|
+
|
|
78
|
+
# Jupyter Notebook
|
|
79
|
+
.ipynb_checkpoints
|
|
80
|
+
|
|
81
|
+
# IPython
|
|
82
|
+
profile_default/
|
|
83
|
+
ipython_config.py
|
|
84
|
+
|
|
85
|
+
# pyenv
|
|
86
|
+
.python-version
|
|
87
|
+
|
|
88
|
+
# pipenv
|
|
89
|
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
|
90
|
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
|
91
|
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
|
92
|
+
# install all needed dependencies.
|
|
93
|
+
#Pipfile.lock
|
|
94
|
+
|
|
95
|
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
|
96
|
+
__pypackages__/
|
|
97
|
+
|
|
98
|
+
# Celery stuff
|
|
99
|
+
celerybeat-schedule
|
|
100
|
+
celerybeat.pid
|
|
101
|
+
|
|
102
|
+
# SageMath parsed files
|
|
103
|
+
*.sage.py
|
|
104
|
+
|
|
105
|
+
# Environments
|
|
106
|
+
.env
|
|
107
|
+
.venv
|
|
108
|
+
env/
|
|
109
|
+
venv/
|
|
110
|
+
ENV/
|
|
111
|
+
env.bak/
|
|
112
|
+
venv.bak/
|
|
113
|
+
|
|
114
|
+
# Spyder project settings
|
|
115
|
+
.spyderproject
|
|
116
|
+
.spyproject
|
|
117
|
+
|
|
118
|
+
# Rope project settings
|
|
119
|
+
.ropeproject
|
|
120
|
+
|
|
121
|
+
# mkdocs documentation
|
|
122
|
+
/site
|
|
123
|
+
|
|
124
|
+
# mypy
|
|
125
|
+
.mypy_cache/
|
|
126
|
+
.dmypy.json
|
|
127
|
+
dmypy.json
|
|
128
|
+
|
|
129
|
+
# Pyre type checker
|
|
130
|
+
.pyre/
|
|
131
|
+
|
|
132
|
+
# IDEs
|
|
133
|
+
.vscode
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
Apache License
|
|
2
|
+
Version 2.0, January 2004
|
|
3
|
+
http://www.apache.org/licenses/
|
|
4
|
+
|
|
5
|
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
6
|
+
|
|
7
|
+
1. Definitions.
|
|
8
|
+
|
|
9
|
+
"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
|
|
10
|
+
|
|
11
|
+
"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
|
|
12
|
+
|
|
13
|
+
"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
|
|
14
|
+
|
|
15
|
+
"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
|
|
16
|
+
|
|
17
|
+
"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
|
|
18
|
+
|
|
19
|
+
"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
|
|
20
|
+
|
|
21
|
+
"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
|
|
22
|
+
|
|
23
|
+
"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
|
|
24
|
+
|
|
25
|
+
"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
|
|
26
|
+
|
|
27
|
+
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
|
|
28
|
+
|
|
29
|
+
2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
|
|
30
|
+
|
|
31
|
+
3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
|
|
32
|
+
|
|
33
|
+
4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
|
|
34
|
+
|
|
35
|
+
(a) You must give any other recipients of the Work or Derivative Works a copy of this License; and
|
|
36
|
+
|
|
37
|
+
(b) You must cause any modified files to carry prominent notices stating that You changed the files; and
|
|
38
|
+
|
|
39
|
+
(c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
|
|
40
|
+
|
|
41
|
+
(d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
|
|
42
|
+
|
|
43
|
+
You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
|
|
44
|
+
|
|
45
|
+
5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
|
|
46
|
+
|
|
47
|
+
6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
|
|
48
|
+
|
|
49
|
+
7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
|
|
50
|
+
|
|
51
|
+
8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
|
|
52
|
+
|
|
53
|
+
9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
|
|
54
|
+
|
|
55
|
+
END OF TERMS AND CONDITIONS
|
|
56
|
+
|
|
57
|
+
APPENDIX: How to apply the Apache License to your work.
|
|
58
|
+
|
|
59
|
+
To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives.
|
|
60
|
+
|
|
61
|
+
Copyright [yyyy] [name of copyright owner]
|
|
62
|
+
|
|
63
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
|
64
|
+
you may not use this file except in compliance with the License.
|
|
65
|
+
You may obtain a copy of the License at
|
|
66
|
+
|
|
67
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
|
68
|
+
|
|
69
|
+
Unless required by applicable law or agreed to in writing, software
|
|
70
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
|
71
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
72
|
+
See the License for the specific language governing permissions and
|
|
73
|
+
limitations under the License.
|
|
@@ -0,0 +1,258 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: llama-cpp-haystack
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: An integration between the llama.cpp LLM framework and Haystack
|
|
5
|
+
Project-URL: Documentation, https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/llama_cpp#readme
|
|
6
|
+
Project-URL: Issues, https://github.com/deepset-ai/haystack-core-integrations/issues
|
|
7
|
+
Project-URL: Source, https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/llama_cpp
|
|
8
|
+
Author: Ashwin Mathur
|
|
9
|
+
Author-email: deepset GmbH <info@deepset.ai>
|
|
10
|
+
License-Expression: Apache-2.0
|
|
11
|
+
License-File: LICENSE.txt
|
|
12
|
+
Classifier: Development Status :: 4 - Beta
|
|
13
|
+
Classifier: Programming Language :: Python
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
+
Classifier: Programming Language :: Python :: Implementation :: CPython
|
|
20
|
+
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
|
21
|
+
Requires-Python: >=3.8
|
|
22
|
+
Requires-Dist: haystack-ai
|
|
23
|
+
Requires-Dist: llama-cpp-python
|
|
24
|
+
Description-Content-Type: text/markdown
|
|
25
|
+
|
|
26
|
+
# llama-cpp-haystack
|
|
27
|
+
|
|
28
|
+
[](https://pypi.org/project/llama-cpp-haystack)
|
|
29
|
+
[](https://pypi.org/project/llama-cpp)
|
|
30
|
+
|
|
31
|
+
-----
|
|
32
|
+
|
|
33
|
+
Custom component for [Haystack](https://github.com/deepset-ai/haystack) (2.x) for running LLMs using the [Llama.cpp](https://github.com/ggerganov/llama.cpp) LLM framework. This implementation leverages the [Python Bindings for llama.cpp](https://github.com/abetlen/llama-cpp-python).
|
|
34
|
+
|
|
35
|
+
**Table of Contents**
|
|
36
|
+
|
|
37
|
+
- [Installation](#installation)
|
|
38
|
+
- [Usage](#usage)
|
|
39
|
+
- [Example](#example)
|
|
40
|
+
- [License](#license)
|
|
41
|
+
|
|
42
|
+
## Installation
|
|
43
|
+
|
|
44
|
+
```bash
|
|
45
|
+
pip install llama-cpp-haystack
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
The default install behaviour is to build `llama.cpp` for CPU only on Linux and Windows and use Metal on MacOS.
|
|
49
|
+
|
|
50
|
+
To install using the other backends, first install [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) using the instructions on their [installation documentation](https://github.com/abetlen/llama-cpp-python#installation) and then install [llama-cpp-haystack](https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/llama_cpp).
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
For example, to use `llama-cpp-haystack` with the cuBLAS backend:
|
|
54
|
+
|
|
55
|
+
```bash
|
|
56
|
+
export LLAMA_CUBLAS=1
|
|
57
|
+
CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install llama-cpp-python
|
|
58
|
+
pip install llama-cpp-haystack
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
## Usage
|
|
62
|
+
|
|
63
|
+
You can utilize the [`LlamaCppGenerator`](https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/llama_cpp/src/llama_cpp_haystack/generator.py) to load models quantized using llama.cpp (GGUF) for text generation.
|
|
64
|
+
|
|
65
|
+
Information about the supported models and model parameters can be found on the llama.cpp [documentation](https://llama-cpp-python.readthedocs.io/en/latest).
|
|
66
|
+
|
|
67
|
+
The GGUF versions of popular models can be downloaded from [HuggingFace](https://huggingface.co/models?library=gguf).
|
|
68
|
+
|
|
69
|
+
### Passing additional model parameters
|
|
70
|
+
|
|
71
|
+
The `model_path`, `n_ctx`, `n_batch` arguments have been exposed for convenience and can be directly passed to the Generator during initialization as keyword arguments.
|
|
72
|
+
|
|
73
|
+
The `model_kwargs` parameter can be used to pass additional arguments when initializing the model. In case of duplication, these kwargs override `model_path`, `n_ctx`, and `n_batch` init parameters.
|
|
74
|
+
|
|
75
|
+
See Llama.cpp's [model documentation](https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#llama_cpp.Llama.__init__) for more information on the available model arguments.
|
|
76
|
+
|
|
77
|
+
For example, to offload the model to GPU during initialization:
|
|
78
|
+
|
|
79
|
+
```python
|
|
80
|
+
from llama_cpp_haystack import LlamaCppGenerator
|
|
81
|
+
|
|
82
|
+
generator = LlamaCppGenerator(
|
|
83
|
+
model_path="/content/openchat-3.5-1210.Q3_K_S.gguf",
|
|
84
|
+
n_ctx=512,
|
|
85
|
+
n_batch=128,
|
|
86
|
+
model_kwargs={"n_gpu_layers": -1}
|
|
87
|
+
)
|
|
88
|
+
generator.warm_up()
|
|
89
|
+
|
|
90
|
+
input = "Who is the best American actor?"
|
|
91
|
+
prompt = f"GPT4 Correct User: {input} <|end_of_turn|> GPT4 Correct Assistant:"
|
|
92
|
+
|
|
93
|
+
result = generator.run(prompt, generation_kwargs={"max_tokens": 128})
|
|
94
|
+
generated_text = result["replies"][0]
|
|
95
|
+
|
|
96
|
+
print(generated_text)
|
|
97
|
+
```
|
|
98
|
+
### Passing generation parameters
|
|
99
|
+
|
|
100
|
+
The `generation_kwargs` parameter can be used to pass additional generation arguments like `max_tokens`, `temperature`, `top_k`, `top_p`, etc to the model during inference.
|
|
101
|
+
|
|
102
|
+
See Llama.cpp's [`create_completion` documentation](https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#llama_cpp.Llama.create_completion) for more information on the available generation arguments.
|
|
103
|
+
|
|
104
|
+
For example, to set the `max_tokens` and `temperature`:
|
|
105
|
+
|
|
106
|
+
```python
|
|
107
|
+
from llama_cpp_haystack import LlamaCppGenerator
|
|
108
|
+
|
|
109
|
+
generator = LlamaCppGenerator(
|
|
110
|
+
model_path="/content/openchat-3.5-1210.Q3_K_S.gguf",
|
|
111
|
+
n_ctx=512,
|
|
112
|
+
n_batch=128,
|
|
113
|
+
generation_kwargs={"max_tokens": 128, "temperature": 0.1},
|
|
114
|
+
)
|
|
115
|
+
generator.warm_up()
|
|
116
|
+
|
|
117
|
+
input = "Who is the best American actor?"
|
|
118
|
+
prompt = f"GPT4 Correct User: {input} <|end_of_turn|> GPT4 Correct Assistant:"
|
|
119
|
+
|
|
120
|
+
result = generator.run(prompt)
|
|
121
|
+
generated_text = result["replies"][0]
|
|
122
|
+
|
|
123
|
+
print(generated_text)
|
|
124
|
+
```
|
|
125
|
+
The `generation_kwargs` can also be passed to the `run` method of the generator directly:
|
|
126
|
+
|
|
127
|
+
```python
|
|
128
|
+
from llama_cpp_haystack import LlamaCppGenerator
|
|
129
|
+
|
|
130
|
+
generator = LlamaCppGenerator(
|
|
131
|
+
model_path="/content/openchat-3.5-1210.Q3_K_S.gguf",
|
|
132
|
+
n_ctx=512,
|
|
133
|
+
n_batch=128,
|
|
134
|
+
)
|
|
135
|
+
generator.warm_up()
|
|
136
|
+
|
|
137
|
+
input = "Who is the best American actor?"
|
|
138
|
+
prompt = f"GPT4 Correct User: {input} <|end_of_turn|> GPT4 Correct Assistant:"
|
|
139
|
+
|
|
140
|
+
result = generator.run(
|
|
141
|
+
prompt,
|
|
142
|
+
generation_kwargs={"max_tokens": 128, "temperature": 0.1},
|
|
143
|
+
)
|
|
144
|
+
generated_text = result["replies"][0]
|
|
145
|
+
|
|
146
|
+
print(generated_text)
|
|
147
|
+
```
|
|
148
|
+
|
|
149
|
+
## Example
|
|
150
|
+
|
|
151
|
+
Below is the example Retrieval Augmented Generation pipeline that uses the [Simple Wikipedia](https://huggingface.co/datasets/pszemraj/simple_wikipedia) Dataset from HuggingFace. You can find more examples in the [`examples`](https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/llama_cpp/examples) folder.
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
Load the dataset:
|
|
155
|
+
|
|
156
|
+
```python
|
|
157
|
+
# Install HuggingFace Datasets using "pip install datasets"
|
|
158
|
+
from datasets import load_dataset
|
|
159
|
+
from haystack import Document, Pipeline
|
|
160
|
+
from haystack.components.builders.answer_builder import AnswerBuilder
|
|
161
|
+
from haystack.components.builders.prompt_builder import PromptBuilder
|
|
162
|
+
from haystack.components.embedders import SentenceTransformersDocumentEmbedder, SentenceTransformersTextEmbedder
|
|
163
|
+
from haystack.components.retrievers import InMemoryEmbeddingRetriever
|
|
164
|
+
from haystack.components.writers import DocumentWriter
|
|
165
|
+
from haystack.document_stores import InMemoryDocumentStore
|
|
166
|
+
|
|
167
|
+
# Import LlamaCppGenerator
|
|
168
|
+
from llama_cpp_haystack import LlamaCppGenerator
|
|
169
|
+
|
|
170
|
+
# Load first 100 rows of the Simple Wikipedia Dataset from HuggingFace
|
|
171
|
+
dataset = load_dataset("pszemraj/simple_wikipedia", split="validation[:100]")
|
|
172
|
+
|
|
173
|
+
docs = [
|
|
174
|
+
Document(
|
|
175
|
+
content=doc["text"],
|
|
176
|
+
meta={
|
|
177
|
+
"title": doc["title"],
|
|
178
|
+
"url": doc["url"],
|
|
179
|
+
},
|
|
180
|
+
)
|
|
181
|
+
for doc in dataset
|
|
182
|
+
]
|
|
183
|
+
```
|
|
184
|
+
|
|
185
|
+
Index the documents to the `InMemoryDocumentStore` using the `SentenceTransformersDocumentEmbedder` and `DocumentWriter`:
|
|
186
|
+
|
|
187
|
+
```python
|
|
188
|
+
doc_store = InMemoryDocumentStore(embedding_similarity_function="cosine")
|
|
189
|
+
doc_embedder = SentenceTransformersDocumentEmbedder(model_name_or_path="sentence-transformers/all-MiniLM-L6-v2")
|
|
190
|
+
|
|
191
|
+
# Indexing Pipeline
|
|
192
|
+
indexing_pipeline = Pipeline()
|
|
193
|
+
indexing_pipeline.add_component(instance=doc_embedder, name="DocEmbedder")
|
|
194
|
+
indexing_pipeline.add_component(instance=DocumentWriter(document_store=doc_store), name="DocWriter")
|
|
195
|
+
indexing_pipeline.connect(connect_from="DocEmbedder", connect_to="DocWriter")
|
|
196
|
+
|
|
197
|
+
indexing_pipeline.run({"DocEmbedder": {"documents": docs}})
|
|
198
|
+
```
|
|
199
|
+
|
|
200
|
+
Create the Retrieval Augmented Generation (RAG) pipeline and add the `LlamaCppGenerator` to it:
|
|
201
|
+
|
|
202
|
+
```python
|
|
203
|
+
# Prompt Template for the https://huggingface.co/openchat/openchat-3.5-1210 LLM
|
|
204
|
+
prompt_template = """GPT4 Correct User: Answer the question using the provided context.
|
|
205
|
+
Question: {{question}}
|
|
206
|
+
Context:
|
|
207
|
+
{% for doc in documents %}
|
|
208
|
+
{{ doc.content }}
|
|
209
|
+
{% endfor %}
|
|
210
|
+
<|end_of_turn|>
|
|
211
|
+
GPT4 Correct Assistant:
|
|
212
|
+
"""
|
|
213
|
+
|
|
214
|
+
rag_pipeline = Pipeline()
|
|
215
|
+
|
|
216
|
+
text_embedder = SentenceTransformersTextEmbedder(model_name_or_path="sentence-transformers/all-MiniLM-L6-v2")
|
|
217
|
+
|
|
218
|
+
# Load the LLM using LlamaCppGenerator
|
|
219
|
+
model_path = "openchat-3.5-1210.Q3_K_S.gguf"
|
|
220
|
+
generator = LlamaCppGenerator(model_path=model_path, n_ctx=4096, n_batch=128)
|
|
221
|
+
|
|
222
|
+
rag_pipeline.add_component(
|
|
223
|
+
instance=text_embedder,
|
|
224
|
+
name="text_embedder",
|
|
225
|
+
)
|
|
226
|
+
rag_pipeline.add_component(instance=InMemoryEmbeddingRetriever(document_store=doc_store, top_k=3), name="retriever")
|
|
227
|
+
rag_pipeline.add_component(instance=PromptBuilder(template=prompt_template), name="prompt_builder")
|
|
228
|
+
rag_pipeline.add_component(instance=generator, name="llm")
|
|
229
|
+
rag_pipeline.add_component(instance=AnswerBuilder(), name="answer_builder")
|
|
230
|
+
|
|
231
|
+
rag_pipeline.connect("text_embedder", "retriever")
|
|
232
|
+
rag_pipeline.connect("retriever", "prompt_builder.documents")
|
|
233
|
+
rag_pipeline.connect("prompt_builder", "llm")
|
|
234
|
+
rag_pipeline.connect("llm.replies", "answer_builder.replies")
|
|
235
|
+
rag_pipeline.connect("retriever", "answer_builder.documents")
|
|
236
|
+
```
|
|
237
|
+
|
|
238
|
+
Run the pipeline:
|
|
239
|
+
|
|
240
|
+
```python
|
|
241
|
+
question = "Which year did the Joker movie release?"
|
|
242
|
+
result = rag_pipeline.run(
|
|
243
|
+
{
|
|
244
|
+
"text_embedder": {"text": question},
|
|
245
|
+
"prompt_builder": {"question": question},
|
|
246
|
+
"llm": {"generation_kwargs": {"max_tokens": 128, "temperature": 0.1}},
|
|
247
|
+
"answer_builder": {"query": question},
|
|
248
|
+
}
|
|
249
|
+
)
|
|
250
|
+
|
|
251
|
+
generated_answer = result["answer_builder"]["answers"][0]
|
|
252
|
+
print(generated_answer.data)
|
|
253
|
+
# The Joker movie was released on October 4, 2019.
|
|
254
|
+
```
|
|
255
|
+
|
|
256
|
+
## License
|
|
257
|
+
|
|
258
|
+
`llama-cpp-haystack` is distributed under the terms of the [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html) license.
|
|
@@ -0,0 +1,233 @@
|
|
|
1
|
+
# llama-cpp-haystack
|
|
2
|
+
|
|
3
|
+
[](https://pypi.org/project/llama-cpp-haystack)
|
|
4
|
+
[](https://pypi.org/project/llama-cpp)
|
|
5
|
+
|
|
6
|
+
-----
|
|
7
|
+
|
|
8
|
+
Custom component for [Haystack](https://github.com/deepset-ai/haystack) (2.x) for running LLMs using the [Llama.cpp](https://github.com/ggerganov/llama.cpp) LLM framework. This implementation leverages the [Python Bindings for llama.cpp](https://github.com/abetlen/llama-cpp-python).
|
|
9
|
+
|
|
10
|
+
**Table of Contents**
|
|
11
|
+
|
|
12
|
+
- [Installation](#installation)
|
|
13
|
+
- [Usage](#usage)
|
|
14
|
+
- [Example](#example)
|
|
15
|
+
- [License](#license)
|
|
16
|
+
|
|
17
|
+
## Installation
|
|
18
|
+
|
|
19
|
+
```bash
|
|
20
|
+
pip install llama-cpp-haystack
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
The default install behaviour is to build `llama.cpp` for CPU only on Linux and Windows and use Metal on MacOS.
|
|
24
|
+
|
|
25
|
+
To install using the other backends, first install [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) using the instructions on their [installation documentation](https://github.com/abetlen/llama-cpp-python#installation) and then install [llama-cpp-haystack](https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/llama_cpp).
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
For example, to use `llama-cpp-haystack` with the cuBLAS backend:
|
|
29
|
+
|
|
30
|
+
```bash
|
|
31
|
+
export LLAMA_CUBLAS=1
|
|
32
|
+
CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install llama-cpp-python
|
|
33
|
+
pip install llama-cpp-haystack
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
## Usage
|
|
37
|
+
|
|
38
|
+
You can utilize the [`LlamaCppGenerator`](https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/llama_cpp/src/llama_cpp_haystack/generator.py) to load models quantized using llama.cpp (GGUF) for text generation.
|
|
39
|
+
|
|
40
|
+
Information about the supported models and model parameters can be found on the llama.cpp [documentation](https://llama-cpp-python.readthedocs.io/en/latest).
|
|
41
|
+
|
|
42
|
+
The GGUF versions of popular models can be downloaded from [HuggingFace](https://huggingface.co/models?library=gguf).
|
|
43
|
+
|
|
44
|
+
### Passing additional model parameters
|
|
45
|
+
|
|
46
|
+
The `model_path`, `n_ctx`, `n_batch` arguments have been exposed for convenience and can be directly passed to the Generator during initialization as keyword arguments.
|
|
47
|
+
|
|
48
|
+
The `model_kwargs` parameter can be used to pass additional arguments when initializing the model. In case of duplication, these kwargs override `model_path`, `n_ctx`, and `n_batch` init parameters.
|
|
49
|
+
|
|
50
|
+
See Llama.cpp's [model documentation](https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#llama_cpp.Llama.__init__) for more information on the available model arguments.
|
|
51
|
+
|
|
52
|
+
For example, to offload the model to GPU during initialization:
|
|
53
|
+
|
|
54
|
+
```python
|
|
55
|
+
from llama_cpp_haystack import LlamaCppGenerator
|
|
56
|
+
|
|
57
|
+
generator = LlamaCppGenerator(
|
|
58
|
+
model_path="/content/openchat-3.5-1210.Q3_K_S.gguf",
|
|
59
|
+
n_ctx=512,
|
|
60
|
+
n_batch=128,
|
|
61
|
+
model_kwargs={"n_gpu_layers": -1}
|
|
62
|
+
)
|
|
63
|
+
generator.warm_up()
|
|
64
|
+
|
|
65
|
+
input = "Who is the best American actor?"
|
|
66
|
+
prompt = f"GPT4 Correct User: {input} <|end_of_turn|> GPT4 Correct Assistant:"
|
|
67
|
+
|
|
68
|
+
result = generator.run(prompt, generation_kwargs={"max_tokens": 128})
|
|
69
|
+
generated_text = result["replies"][0]
|
|
70
|
+
|
|
71
|
+
print(generated_text)
|
|
72
|
+
```
|
|
73
|
+
### Passing generation parameters
|
|
74
|
+
|
|
75
|
+
The `generation_kwargs` parameter can be used to pass additional generation arguments like `max_tokens`, `temperature`, `top_k`, `top_p`, etc to the model during inference.
|
|
76
|
+
|
|
77
|
+
See Llama.cpp's [`create_completion` documentation](https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#llama_cpp.Llama.create_completion) for more information on the available generation arguments.
|
|
78
|
+
|
|
79
|
+
For example, to set the `max_tokens` and `temperature`:
|
|
80
|
+
|
|
81
|
+
```python
|
|
82
|
+
from llama_cpp_haystack import LlamaCppGenerator
|
|
83
|
+
|
|
84
|
+
generator = LlamaCppGenerator(
|
|
85
|
+
model_path="/content/openchat-3.5-1210.Q3_K_S.gguf",
|
|
86
|
+
n_ctx=512,
|
|
87
|
+
n_batch=128,
|
|
88
|
+
generation_kwargs={"max_tokens": 128, "temperature": 0.1},
|
|
89
|
+
)
|
|
90
|
+
generator.warm_up()
|
|
91
|
+
|
|
92
|
+
input = "Who is the best American actor?"
|
|
93
|
+
prompt = f"GPT4 Correct User: {input} <|end_of_turn|> GPT4 Correct Assistant:"
|
|
94
|
+
|
|
95
|
+
result = generator.run(prompt)
|
|
96
|
+
generated_text = result["replies"][0]
|
|
97
|
+
|
|
98
|
+
print(generated_text)
|
|
99
|
+
```
|
|
100
|
+
The `generation_kwargs` can also be passed to the `run` method of the generator directly:
|
|
101
|
+
|
|
102
|
+
```python
|
|
103
|
+
from llama_cpp_haystack import LlamaCppGenerator
|
|
104
|
+
|
|
105
|
+
generator = LlamaCppGenerator(
|
|
106
|
+
model_path="/content/openchat-3.5-1210.Q3_K_S.gguf",
|
|
107
|
+
n_ctx=512,
|
|
108
|
+
n_batch=128,
|
|
109
|
+
)
|
|
110
|
+
generator.warm_up()
|
|
111
|
+
|
|
112
|
+
input = "Who is the best American actor?"
|
|
113
|
+
prompt = f"GPT4 Correct User: {input} <|end_of_turn|> GPT4 Correct Assistant:"
|
|
114
|
+
|
|
115
|
+
result = generator.run(
|
|
116
|
+
prompt,
|
|
117
|
+
generation_kwargs={"max_tokens": 128, "temperature": 0.1},
|
|
118
|
+
)
|
|
119
|
+
generated_text = result["replies"][0]
|
|
120
|
+
|
|
121
|
+
print(generated_text)
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
## Example
|
|
125
|
+
|
|
126
|
+
Below is the example Retrieval Augmented Generation pipeline that uses the [Simple Wikipedia](https://huggingface.co/datasets/pszemraj/simple_wikipedia) Dataset from HuggingFace. You can find more examples in the [`examples`](https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/llama_cpp/examples) folder.
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
Load the dataset:
|
|
130
|
+
|
|
131
|
+
```python
|
|
132
|
+
# Install HuggingFace Datasets using "pip install datasets"
|
|
133
|
+
from datasets import load_dataset
|
|
134
|
+
from haystack import Document, Pipeline
|
|
135
|
+
from haystack.components.builders.answer_builder import AnswerBuilder
|
|
136
|
+
from haystack.components.builders.prompt_builder import PromptBuilder
|
|
137
|
+
from haystack.components.embedders import SentenceTransformersDocumentEmbedder, SentenceTransformersTextEmbedder
|
|
138
|
+
from haystack.components.retrievers import InMemoryEmbeddingRetriever
|
|
139
|
+
from haystack.components.writers import DocumentWriter
|
|
140
|
+
from haystack.document_stores import InMemoryDocumentStore
|
|
141
|
+
|
|
142
|
+
# Import LlamaCppGenerator
|
|
143
|
+
from llama_cpp_haystack import LlamaCppGenerator
|
|
144
|
+
|
|
145
|
+
# Load first 100 rows of the Simple Wikipedia Dataset from HuggingFace
|
|
146
|
+
dataset = load_dataset("pszemraj/simple_wikipedia", split="validation[:100]")
|
|
147
|
+
|
|
148
|
+
docs = [
|
|
149
|
+
Document(
|
|
150
|
+
content=doc["text"],
|
|
151
|
+
meta={
|
|
152
|
+
"title": doc["title"],
|
|
153
|
+
"url": doc["url"],
|
|
154
|
+
},
|
|
155
|
+
)
|
|
156
|
+
for doc in dataset
|
|
157
|
+
]
|
|
158
|
+
```
|
|
159
|
+
|
|
160
|
+
Index the documents to the `InMemoryDocumentStore` using the `SentenceTransformersDocumentEmbedder` and `DocumentWriter`:
|
|
161
|
+
|
|
162
|
+
```python
|
|
163
|
+
doc_store = InMemoryDocumentStore(embedding_similarity_function="cosine")
|
|
164
|
+
doc_embedder = SentenceTransformersDocumentEmbedder(model_name_or_path="sentence-transformers/all-MiniLM-L6-v2")
|
|
165
|
+
|
|
166
|
+
# Indexing Pipeline
|
|
167
|
+
indexing_pipeline = Pipeline()
|
|
168
|
+
indexing_pipeline.add_component(instance=doc_embedder, name="DocEmbedder")
|
|
169
|
+
indexing_pipeline.add_component(instance=DocumentWriter(document_store=doc_store), name="DocWriter")
|
|
170
|
+
indexing_pipeline.connect(connect_from="DocEmbedder", connect_to="DocWriter")
|
|
171
|
+
|
|
172
|
+
indexing_pipeline.run({"DocEmbedder": {"documents": docs}})
|
|
173
|
+
```
|
|
174
|
+
|
|
175
|
+
Create the Retrieval Augmented Generation (RAG) pipeline and add the `LlamaCppGenerator` to it:
|
|
176
|
+
|
|
177
|
+
```python
|
|
178
|
+
# Prompt Template for the https://huggingface.co/openchat/openchat-3.5-1210 LLM
|
|
179
|
+
prompt_template = """GPT4 Correct User: Answer the question using the provided context.
|
|
180
|
+
Question: {{question}}
|
|
181
|
+
Context:
|
|
182
|
+
{% for doc in documents %}
|
|
183
|
+
{{ doc.content }}
|
|
184
|
+
{% endfor %}
|
|
185
|
+
<|end_of_turn|>
|
|
186
|
+
GPT4 Correct Assistant:
|
|
187
|
+
"""
|
|
188
|
+
|
|
189
|
+
rag_pipeline = Pipeline()
|
|
190
|
+
|
|
191
|
+
text_embedder = SentenceTransformersTextEmbedder(model_name_or_path="sentence-transformers/all-MiniLM-L6-v2")
|
|
192
|
+
|
|
193
|
+
# Load the LLM using LlamaCppGenerator
|
|
194
|
+
model_path = "openchat-3.5-1210.Q3_K_S.gguf"
|
|
195
|
+
generator = LlamaCppGenerator(model_path=model_path, n_ctx=4096, n_batch=128)
|
|
196
|
+
|
|
197
|
+
rag_pipeline.add_component(
|
|
198
|
+
instance=text_embedder,
|
|
199
|
+
name="text_embedder",
|
|
200
|
+
)
|
|
201
|
+
rag_pipeline.add_component(instance=InMemoryEmbeddingRetriever(document_store=doc_store, top_k=3), name="retriever")
|
|
202
|
+
rag_pipeline.add_component(instance=PromptBuilder(template=prompt_template), name="prompt_builder")
|
|
203
|
+
rag_pipeline.add_component(instance=generator, name="llm")
|
|
204
|
+
rag_pipeline.add_component(instance=AnswerBuilder(), name="answer_builder")
|
|
205
|
+
|
|
206
|
+
rag_pipeline.connect("text_embedder", "retriever")
|
|
207
|
+
rag_pipeline.connect("retriever", "prompt_builder.documents")
|
|
208
|
+
rag_pipeline.connect("prompt_builder", "llm")
|
|
209
|
+
rag_pipeline.connect("llm.replies", "answer_builder.replies")
|
|
210
|
+
rag_pipeline.connect("retriever", "answer_builder.documents")
|
|
211
|
+
```
|
|
212
|
+
|
|
213
|
+
Run the pipeline:
|
|
214
|
+
|
|
215
|
+
```python
|
|
216
|
+
question = "Which year did the Joker movie release?"
|
|
217
|
+
result = rag_pipeline.run(
|
|
218
|
+
{
|
|
219
|
+
"text_embedder": {"text": question},
|
|
220
|
+
"prompt_builder": {"question": question},
|
|
221
|
+
"llm": {"generation_kwargs": {"max_tokens": 128, "temperature": 0.1}},
|
|
222
|
+
"answer_builder": {"query": question},
|
|
223
|
+
}
|
|
224
|
+
)
|
|
225
|
+
|
|
226
|
+
generated_answer = result["answer_builder"]["answers"][0]
|
|
227
|
+
print(generated_answer.data)
|
|
228
|
+
# The Joker movie was released on October 4, 2019.
|
|
229
|
+
```
|
|
230
|
+
|
|
231
|
+
## License
|
|
232
|
+
|
|
233
|
+
`llama-cpp-haystack` is distributed under the terms of the [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html) license.
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
from llama_cpp_haystack import LlamaCppGenerator
|
|
2
|
+
|
|
3
|
+
generator = LlamaCppGenerator(model_path="openchat-3.5-1210.Q3_K_S.gguf", n_ctx=512, n_batch=128)
|
|
4
|
+
generator.warm_up()
|
|
5
|
+
|
|
6
|
+
question = "Who is the best American actor?"
|
|
7
|
+
prompt = f"GPT4 Correct User: {question} <|end_of_turn|> GPT4 Correct Assistant:"
|
|
8
|
+
|
|
9
|
+
result = generator.run(prompt, generation_kwargs={"max_tokens": 128})
|
|
10
|
+
generated_text = result["replies"][0]
|
|
11
|
+
|
|
12
|
+
print(generated_text)
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
from datasets import load_dataset
|
|
2
|
+
from haystack import Document, Pipeline
|
|
3
|
+
from haystack.components.builders.answer_builder import AnswerBuilder
|
|
4
|
+
from haystack.components.builders.prompt_builder import PromptBuilder
|
|
5
|
+
from haystack.components.embedders import SentenceTransformersDocumentEmbedder, SentenceTransformersTextEmbedder
|
|
6
|
+
from haystack.components.retrievers import InMemoryEmbeddingRetriever
|
|
7
|
+
from haystack.components.writers import DocumentWriter
|
|
8
|
+
from haystack.document_stores import InMemoryDocumentStore
|
|
9
|
+
|
|
10
|
+
from llama_cpp_haystack import LlamaCppGenerator
|
|
11
|
+
|
|
12
|
+
# Load first 100 rows of the Simple Wikipedia Dataset from HuggingFace
|
|
13
|
+
dataset = load_dataset("pszemraj/simple_wikipedia", split="validation[:100]")
|
|
14
|
+
|
|
15
|
+
docs = [
|
|
16
|
+
Document(
|
|
17
|
+
content=doc["text"],
|
|
18
|
+
meta={
|
|
19
|
+
"title": doc["title"],
|
|
20
|
+
"url": doc["url"],
|
|
21
|
+
},
|
|
22
|
+
)
|
|
23
|
+
for doc in dataset
|
|
24
|
+
]
|
|
25
|
+
|
|
26
|
+
doc_store = InMemoryDocumentStore(embedding_similarity_function="cosine")
|
|
27
|
+
doc_embedder = SentenceTransformersDocumentEmbedder(model_name_or_path="sentence-transformers/all-MiniLM-L6-v2")
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
# Indexing Pipeline
|
|
31
|
+
indexing_pipeline = Pipeline()
|
|
32
|
+
indexing_pipeline.add_component(instance=doc_embedder, name="DocEmbedder")
|
|
33
|
+
indexing_pipeline.add_component(instance=DocumentWriter(document_store=doc_store), name="DocWriter")
|
|
34
|
+
indexing_pipeline.connect(connect_from="DocEmbedder", connect_to="DocWriter")
|
|
35
|
+
|
|
36
|
+
indexing_pipeline.run({"DocEmbedder": {"documents": docs}})
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
# RAG Pipeline
|
|
40
|
+
prompt_template = """GPT4 Correct User: Answer the question using the provided context.
|
|
41
|
+
Question: {{question}}
|
|
42
|
+
Context:
|
|
43
|
+
{% for doc in documents %}
|
|
44
|
+
{{ doc.content }}
|
|
45
|
+
{% endfor %}
|
|
46
|
+
<|end_of_turn|>
|
|
47
|
+
GPT4 Correct Assistant:
|
|
48
|
+
"""
|
|
49
|
+
rag_pipeline = Pipeline()
|
|
50
|
+
|
|
51
|
+
text_embedder = SentenceTransformersTextEmbedder(model_name_or_path="sentence-transformers/all-MiniLM-L6-v2")
|
|
52
|
+
|
|
53
|
+
model_path = "openchat-3.5-1210.Q3_K_S.gguf"
|
|
54
|
+
generator = LlamaCppGenerator(model_path=model_path, n_ctx=4096, n_batch=128)
|
|
55
|
+
|
|
56
|
+
rag_pipeline.add_component(
|
|
57
|
+
instance=text_embedder,
|
|
58
|
+
name="text_embedder",
|
|
59
|
+
)
|
|
60
|
+
rag_pipeline.add_component(instance=InMemoryEmbeddingRetriever(document_store=doc_store, top_k=3), name="retriever")
|
|
61
|
+
rag_pipeline.add_component(instance=PromptBuilder(template=prompt_template), name="prompt_builder")
|
|
62
|
+
rag_pipeline.add_component(instance=generator, name="llm")
|
|
63
|
+
rag_pipeline.add_component(instance=AnswerBuilder(), name="answer_builder")
|
|
64
|
+
|
|
65
|
+
rag_pipeline.connect("text_embedder", "retriever")
|
|
66
|
+
rag_pipeline.connect("retriever", "prompt_builder.documents")
|
|
67
|
+
rag_pipeline.connect("prompt_builder", "llm")
|
|
68
|
+
rag_pipeline.connect("llm.replies", "answer_builder.replies")
|
|
69
|
+
rag_pipeline.connect("retriever", "answer_builder.documents")
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
# Run Pipeline
|
|
73
|
+
question = "Which year did the Joker movie release?"
|
|
74
|
+
result = rag_pipeline.run(
|
|
75
|
+
{
|
|
76
|
+
"text_embedder": {"text": question},
|
|
77
|
+
"prompt_builder": {"question": question},
|
|
78
|
+
"llm": {"generation_kwargs": {"max_tokens": 128, "temperature": 0.1}},
|
|
79
|
+
"answer_builder": {"query": question},
|
|
80
|
+
}
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
generated_answer = result["answer_builder"]["answers"][0]
|
|
84
|
+
print(generated_answer.data)
|
|
85
|
+
# The Joker movie was released on October 4, 2019.
|
|
@@ -0,0 +1,185 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["hatchling", "hatch-vcs"]
|
|
3
|
+
build-backend = "hatchling.build"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "llama-cpp-haystack"
|
|
7
|
+
dynamic = ["version"]
|
|
8
|
+
description = 'An integration between the llama.cpp LLM framework and Haystack'
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
requires-python = ">=3.8"
|
|
11
|
+
license = "Apache-2.0"
|
|
12
|
+
keywords = []
|
|
13
|
+
authors = [
|
|
14
|
+
{ name = "deepset GmbH", email = "info@deepset.ai" },
|
|
15
|
+
{ name = "Ashwin Mathur", email = "" },
|
|
16
|
+
]
|
|
17
|
+
classifiers = [
|
|
18
|
+
"Development Status :: 4 - Beta",
|
|
19
|
+
"Programming Language :: Python",
|
|
20
|
+
"Programming Language :: Python :: 3.8",
|
|
21
|
+
"Programming Language :: Python :: 3.9",
|
|
22
|
+
"Programming Language :: Python :: 3.10",
|
|
23
|
+
"Programming Language :: Python :: 3.11",
|
|
24
|
+
"Programming Language :: Python :: 3.12",
|
|
25
|
+
"Programming Language :: Python :: Implementation :: CPython",
|
|
26
|
+
"Programming Language :: Python :: Implementation :: PyPy",
|
|
27
|
+
]
|
|
28
|
+
dependencies = [
|
|
29
|
+
"haystack-ai",
|
|
30
|
+
"llama-cpp-python"
|
|
31
|
+
]
|
|
32
|
+
|
|
33
|
+
[project.urls]
|
|
34
|
+
Documentation = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/llama_cpp#readme"
|
|
35
|
+
Issues = "https://github.com/deepset-ai/haystack-core-integrations/issues"
|
|
36
|
+
Source = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/llama_cpp"
|
|
37
|
+
|
|
38
|
+
[tool.hatch.version]
|
|
39
|
+
source = "vcs"
|
|
40
|
+
tag-pattern = 'integrations\/llama_cpp-v(?P<version>.*)'
|
|
41
|
+
|
|
42
|
+
[tool.hatch.version.raw-options]
|
|
43
|
+
root = "../.."
|
|
44
|
+
git_describe_command = 'git describe --tags --match="integrations/llama_cpp-v[0-9]*"'
|
|
45
|
+
|
|
46
|
+
[tool.hatch.envs.default]
|
|
47
|
+
dependencies = [
|
|
48
|
+
"coverage[toml]>=6.5",
|
|
49
|
+
"pytest",
|
|
50
|
+
]
|
|
51
|
+
[tool.hatch.envs.default.scripts]
|
|
52
|
+
test = "pytest {args:tests}"
|
|
53
|
+
test-cov = "coverage run -m pytest {args:tests}"
|
|
54
|
+
cov-report = [
|
|
55
|
+
"- coverage combine",
|
|
56
|
+
"coverage report",
|
|
57
|
+
]
|
|
58
|
+
cov = [
|
|
59
|
+
"test-cov",
|
|
60
|
+
"cov-report",
|
|
61
|
+
]
|
|
62
|
+
|
|
63
|
+
[[tool.hatch.envs.all.matrix]]
|
|
64
|
+
python = ["3.8", "3.9", "3.10", "3.11", "3.12"]
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
[tool.hatch.envs.lint]
|
|
68
|
+
detached = true
|
|
69
|
+
dependencies = [
|
|
70
|
+
"black>=23.1.0",
|
|
71
|
+
"mypy>=1.0.0",
|
|
72
|
+
"ruff>=0.0.243",
|
|
73
|
+
]
|
|
74
|
+
|
|
75
|
+
[tool.hatch.envs.lint.scripts]
|
|
76
|
+
typing = "mypy --install-types --non-interactive {args:src/llama_cpp_haystack tests}"
|
|
77
|
+
style = [
|
|
78
|
+
"ruff {args:.}",
|
|
79
|
+
"black --check --diff {args:.}",
|
|
80
|
+
]
|
|
81
|
+
fmt = [
|
|
82
|
+
"black {args:.}",
|
|
83
|
+
"ruff --fix {args:.}",
|
|
84
|
+
"style",
|
|
85
|
+
]
|
|
86
|
+
all = [
|
|
87
|
+
"style",
|
|
88
|
+
"typing",
|
|
89
|
+
]
|
|
90
|
+
|
|
91
|
+
[tool.hatch.metadata]
|
|
92
|
+
allow-direct-references = true
|
|
93
|
+
|
|
94
|
+
[tool.ruff.isort]
|
|
95
|
+
known-first-party = ["llama_cpp_haystack"]
|
|
96
|
+
|
|
97
|
+
[tool.black]
|
|
98
|
+
target-version = ["py37"]
|
|
99
|
+
line-length = 120
|
|
100
|
+
skip-string-normalization = true
|
|
101
|
+
|
|
102
|
+
[tool.ruff]
|
|
103
|
+
target-version = "py37"
|
|
104
|
+
line-length = 120
|
|
105
|
+
select = [
|
|
106
|
+
"A",
|
|
107
|
+
"ARG",
|
|
108
|
+
"B",
|
|
109
|
+
"C",
|
|
110
|
+
"DTZ",
|
|
111
|
+
"E",
|
|
112
|
+
"EM",
|
|
113
|
+
"F",
|
|
114
|
+
"I",
|
|
115
|
+
"ICN",
|
|
116
|
+
"ISC",
|
|
117
|
+
"N",
|
|
118
|
+
"PLC",
|
|
119
|
+
"PLE",
|
|
120
|
+
"PLR",
|
|
121
|
+
"PLW",
|
|
122
|
+
"Q",
|
|
123
|
+
"RUF",
|
|
124
|
+
"S",
|
|
125
|
+
"T",
|
|
126
|
+
"TID",
|
|
127
|
+
"UP",
|
|
128
|
+
"W",
|
|
129
|
+
"YTT",
|
|
130
|
+
]
|
|
131
|
+
ignore = [
|
|
132
|
+
# Allow non-abstract empty methods in abstract base classes
|
|
133
|
+
"B027",
|
|
134
|
+
# Ignore checks for possible passwords
|
|
135
|
+
"S105", "S106", "S107",
|
|
136
|
+
# Ignore complexity
|
|
137
|
+
"C901", "PLR0911", "PLR0912", "PLR0913", "PLR0915",
|
|
138
|
+
]
|
|
139
|
+
unfixable = [
|
|
140
|
+
# Don't touch unused imports
|
|
141
|
+
"F401",
|
|
142
|
+
]
|
|
143
|
+
|
|
144
|
+
[tool.ruff.flake8-tidy-imports]
|
|
145
|
+
ban-relative-imports = "all"
|
|
146
|
+
|
|
147
|
+
[tool.ruff.per-file-ignores]
|
|
148
|
+
# Tests can use magic values, assertions, and relative imports
|
|
149
|
+
"tests/**/*" = ["PLR2004", "S101", "TID252"]
|
|
150
|
+
# Examples can print their output
|
|
151
|
+
"examples/**" = ["T201"]
|
|
152
|
+
"tests/**" = ["T201"]
|
|
153
|
+
|
|
154
|
+
[tool.coverage.run]
|
|
155
|
+
source_pkgs = ["llama_cpp_haystack", "tests"]
|
|
156
|
+
branch = true
|
|
157
|
+
parallel = true
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
[tool.coverage.paths]
|
|
161
|
+
ollama_haystack = ["src/llama_cpp_haystack", "*/llama-cpp-haystack/src/llama_cpp_haystack"]
|
|
162
|
+
tests = ["tests", "*/llama-cpp-haystack/tests"]
|
|
163
|
+
|
|
164
|
+
[tool.coverage.report]
|
|
165
|
+
exclude_lines = [
|
|
166
|
+
"no cov",
|
|
167
|
+
"if __name__ == .__main__.:",
|
|
168
|
+
"if TYPE_CHECKING:",
|
|
169
|
+
]
|
|
170
|
+
|
|
171
|
+
[tool.pytest.ini_options]
|
|
172
|
+
markers = [
|
|
173
|
+
"integration: marks tests as slow (deselect with '-m \"not integration\"')",
|
|
174
|
+
]
|
|
175
|
+
addopts = [
|
|
176
|
+
"--import-mode=importlib",
|
|
177
|
+
]
|
|
178
|
+
|
|
179
|
+
[[tool.mypy.overrides]]
|
|
180
|
+
module = [
|
|
181
|
+
"haystack.*",
|
|
182
|
+
"pytest.*",
|
|
183
|
+
"llama_cpp.*"
|
|
184
|
+
]
|
|
185
|
+
ignore_missing_imports = true
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from typing import Any, Dict, List, Optional
|
|
3
|
+
|
|
4
|
+
from haystack import component
|
|
5
|
+
from llama_cpp import Llama
|
|
6
|
+
|
|
7
|
+
logger = logging.getLogger(__name__)
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@component
|
|
11
|
+
class LlamaCppGenerator:
|
|
12
|
+
"""
|
|
13
|
+
Generator for using a model with Llama.cpp.
|
|
14
|
+
This component provides an interface to generate text using a quantized model (GGUF) using llama.cpp.
|
|
15
|
+
|
|
16
|
+
Usage example:
|
|
17
|
+
```python
|
|
18
|
+
from llama_cpp_haystack import LlamaCppGenerator
|
|
19
|
+
generator = LlamaCppGenerator(model_path="zephyr-7b-beta.Q4_0.gguf", n_ctx=2048, n_batch=512)
|
|
20
|
+
|
|
21
|
+
print(generator.run("Who is the best American actor?", generation_kwargs={"max_tokens": 128}))
|
|
22
|
+
# {'replies': ['John Cusack'], 'meta': [{"object": "text_completion", ...}]}
|
|
23
|
+
```
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
def __init__(
|
|
27
|
+
self,
|
|
28
|
+
model_path: str,
|
|
29
|
+
n_ctx: Optional[int] = 0,
|
|
30
|
+
n_batch: Optional[int] = 512,
|
|
31
|
+
model_kwargs: Optional[Dict[str, Any]] = None,
|
|
32
|
+
generation_kwargs: Optional[Dict[str, Any]] = None,
|
|
33
|
+
):
|
|
34
|
+
"""
|
|
35
|
+
:param model_path: The path of a quantized model for text generation,
|
|
36
|
+
for example, "zephyr-7b-beta.Q4_0.gguf".
|
|
37
|
+
If the model_path is also specified in the `model_kwargs`, this parameter will be ignored.
|
|
38
|
+
:param n_ctx: The number of tokens in the context. When set to 0, the context will be taken from the model.
|
|
39
|
+
If the n_ctx is also specified in the `model_kwargs`, this parameter will be ignored.
|
|
40
|
+
:param n_batch: Prompt processing maximum batch size. Defaults to 512.
|
|
41
|
+
If the n_batch is also specified in the `model_kwargs`, this parameter will be ignored.
|
|
42
|
+
:param model_kwargs: Dictionary containing keyword arguments used to initialize the LLM for text generation.
|
|
43
|
+
These keyword arguments provide fine-grained control over the model loading.
|
|
44
|
+
In case of duplication, these kwargs override `model_path`, `n_ctx`, and `n_batch` init parameters.
|
|
45
|
+
See Llama.cpp's [documentation](https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#llama_cpp.Llama.__init__)
|
|
46
|
+
for more information on the available kwargs.
|
|
47
|
+
:param generation_kwargs: A dictionary containing keyword arguments to customize text generation.
|
|
48
|
+
Some examples: `max_tokens`, `temperature`, `top_k`, `top_p`,...
|
|
49
|
+
See Llama.cpp's documentation for more information:
|
|
50
|
+
https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#llama_cpp.Llama.create_completion
|
|
51
|
+
"""
|
|
52
|
+
|
|
53
|
+
model_kwargs = model_kwargs or {}
|
|
54
|
+
generation_kwargs = generation_kwargs or {}
|
|
55
|
+
|
|
56
|
+
# check if the huggingface_pipeline_kwargs contain the essential parameters
|
|
57
|
+
# otherwise, populate them with values from init parameters
|
|
58
|
+
model_kwargs.setdefault("model_path", model_path)
|
|
59
|
+
model_kwargs.setdefault("n_ctx", n_ctx)
|
|
60
|
+
model_kwargs.setdefault("n_batch", n_batch)
|
|
61
|
+
|
|
62
|
+
self.model_path = model_path
|
|
63
|
+
self.n_ctx = n_ctx
|
|
64
|
+
self.n_batch = n_batch
|
|
65
|
+
self.model_kwargs = model_kwargs
|
|
66
|
+
self.generation_kwargs = generation_kwargs
|
|
67
|
+
self.model = None
|
|
68
|
+
|
|
69
|
+
def warm_up(self):
|
|
70
|
+
if self.model is None:
|
|
71
|
+
self.model = Llama(**self.model_kwargs)
|
|
72
|
+
|
|
73
|
+
@component.output_types(replies=List[str], meta=List[Dict[str, Any]])
|
|
74
|
+
def run(self, prompt: str, generation_kwargs: Optional[Dict[str, Any]] = None):
|
|
75
|
+
"""
|
|
76
|
+
Run the text generation model on the given prompt.
|
|
77
|
+
|
|
78
|
+
:param prompt: A string representing the prompt.
|
|
79
|
+
:param generation_kwargs: A dictionary containing keyword arguments to customize text generation.
|
|
80
|
+
Some examples: `max_tokens`, `temperature`, `top_k`, `top_p`,...
|
|
81
|
+
See Llama.cpp's documentation for more information:
|
|
82
|
+
https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#llama_cpp.Llama.create_completion
|
|
83
|
+
:return: A dictionary of the returned responses and metadata.
|
|
84
|
+
"""
|
|
85
|
+
if self.model is None:
|
|
86
|
+
error_msg = "The model has not been loaded. Please call warm_up() before running."
|
|
87
|
+
raise RuntimeError(error_msg)
|
|
88
|
+
|
|
89
|
+
if not prompt:
|
|
90
|
+
return {"replies": []}
|
|
91
|
+
|
|
92
|
+
# merge generation kwargs from init method with those from run method
|
|
93
|
+
updated_generation_kwargs = {**self.generation_kwargs, **(generation_kwargs or {})}
|
|
94
|
+
|
|
95
|
+
output = self.model.create_completion(prompt=prompt, **updated_generation_kwargs)
|
|
96
|
+
replies = [output["choices"][0]["text"]]
|
|
97
|
+
|
|
98
|
+
return {"replies": replies, "meta": [output]}
|
|
@@ -0,0 +1,233 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import urllib.request
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from unittest.mock import MagicMock
|
|
5
|
+
|
|
6
|
+
import pytest
|
|
7
|
+
from haystack import Document, Pipeline
|
|
8
|
+
from haystack.components.builders.answer_builder import AnswerBuilder
|
|
9
|
+
from haystack.components.builders.prompt_builder import PromptBuilder
|
|
10
|
+
from haystack.components.retrievers import InMemoryBM25Retriever
|
|
11
|
+
from haystack.document_stores import InMemoryDocumentStore
|
|
12
|
+
|
|
13
|
+
from llama_cpp_haystack import LlamaCppGenerator
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@pytest.fixture
|
|
17
|
+
def model_path():
|
|
18
|
+
return Path(__file__).parent / "models"
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def download_file(file_link, filename, capsys):
|
|
22
|
+
# Checks if the file already exists before downloading
|
|
23
|
+
if not os.path.isfile(filename):
|
|
24
|
+
urllib.request.urlretrieve(file_link, filename) # noqa: S310
|
|
25
|
+
with capsys.disabled():
|
|
26
|
+
print("\nModel file downloaded successfully.")
|
|
27
|
+
else:
|
|
28
|
+
with capsys.disabled():
|
|
29
|
+
print("\nModel file already exists.")
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class TestLlamaCppGenerator:
|
|
33
|
+
@pytest.fixture
|
|
34
|
+
def generator(self, model_path, capsys):
|
|
35
|
+
ggml_model_path = (
|
|
36
|
+
"https://huggingface.co/TheBloke/openchat-3.5-1210-GGUF/resolve/main/openchat-3.5-1210.Q3_K_S.gguf"
|
|
37
|
+
)
|
|
38
|
+
filename = "openchat-3.5-1210.Q3_K_S.gguf"
|
|
39
|
+
|
|
40
|
+
# Download GGUF model from HuggingFace
|
|
41
|
+
download_file(ggml_model_path, str(model_path / filename), capsys)
|
|
42
|
+
|
|
43
|
+
model_path = str(model_path / filename)
|
|
44
|
+
generator = LlamaCppGenerator(model_path=model_path, n_ctx=128, n_batch=128)
|
|
45
|
+
generator.warm_up()
|
|
46
|
+
return generator
|
|
47
|
+
|
|
48
|
+
@pytest.fixture
|
|
49
|
+
def generator_mock(self):
|
|
50
|
+
mock_model = MagicMock()
|
|
51
|
+
generator = LlamaCppGenerator(model_path="test_model.gguf", n_ctx=2048, n_batch=512)
|
|
52
|
+
generator.model = mock_model
|
|
53
|
+
return generator, mock_model
|
|
54
|
+
|
|
55
|
+
def test_default_init(self):
|
|
56
|
+
"""
|
|
57
|
+
Test default initialization parameters.
|
|
58
|
+
"""
|
|
59
|
+
generator = LlamaCppGenerator(model_path="test_model.gguf")
|
|
60
|
+
|
|
61
|
+
assert generator.model_path == "test_model.gguf"
|
|
62
|
+
assert generator.n_ctx == 0
|
|
63
|
+
assert generator.n_batch == 512
|
|
64
|
+
assert generator.model_kwargs == {"model_path": "test_model.gguf", "n_ctx": 0, "n_batch": 512}
|
|
65
|
+
assert generator.generation_kwargs == {}
|
|
66
|
+
|
|
67
|
+
def test_custom_init(self):
|
|
68
|
+
"""
|
|
69
|
+
Test custom initialization parameters.
|
|
70
|
+
"""
|
|
71
|
+
generator = LlamaCppGenerator(
|
|
72
|
+
model_path="test_model.gguf",
|
|
73
|
+
n_ctx=2048,
|
|
74
|
+
n_batch=512,
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
assert generator.model_path == "test_model.gguf"
|
|
78
|
+
assert generator.n_ctx == 2048
|
|
79
|
+
assert generator.n_batch == 512
|
|
80
|
+
assert generator.model_kwargs == {"model_path": "test_model.gguf", "n_ctx": 2048, "n_batch": 512}
|
|
81
|
+
assert generator.generation_kwargs == {}
|
|
82
|
+
|
|
83
|
+
def test_ignores_model_path_if_specified_in_model_kwargs(self):
|
|
84
|
+
"""
|
|
85
|
+
Test that model_path is ignored if already specified in model_kwargs.
|
|
86
|
+
"""
|
|
87
|
+
generator = LlamaCppGenerator(
|
|
88
|
+
model_path="test_model.gguf",
|
|
89
|
+
n_ctx=512,
|
|
90
|
+
n_batch=512,
|
|
91
|
+
model_kwargs={"model_path": "other_model.gguf"},
|
|
92
|
+
)
|
|
93
|
+
assert generator.model_kwargs["model_path"] == "other_model.gguf"
|
|
94
|
+
|
|
95
|
+
def test_ignores_n_ctx_if_specified_in_model_kwargs(self):
|
|
96
|
+
"""
|
|
97
|
+
Test that n_ctx is ignored if already specified in model_kwargs.
|
|
98
|
+
"""
|
|
99
|
+
generator = LlamaCppGenerator(
|
|
100
|
+
model_path="test_model.gguf", n_ctx=512, n_batch=512, model_kwargs={"n_ctx": 1024}
|
|
101
|
+
)
|
|
102
|
+
assert generator.model_kwargs["n_ctx"] == 1024
|
|
103
|
+
|
|
104
|
+
def test_ignores_n_batch_if_specified_in_model_kwargs(self):
|
|
105
|
+
"""
|
|
106
|
+
Test that n_batch is ignored if already specified in model_kwargs.
|
|
107
|
+
"""
|
|
108
|
+
generator = LlamaCppGenerator(
|
|
109
|
+
model_path="test_model.gguf", n_ctx=512, n_batch=512, model_kwargs={"n_batch": 1024}
|
|
110
|
+
)
|
|
111
|
+
assert generator.model_kwargs["n_batch"] == 1024
|
|
112
|
+
|
|
113
|
+
def test_raises_error_without_warm_up(self):
|
|
114
|
+
"""
|
|
115
|
+
Test that the generator raises an error if warm_up() is not called before running.
|
|
116
|
+
"""
|
|
117
|
+
generator = LlamaCppGenerator(model_path="test_model.gguf", n_ctx=512, n_batch=512)
|
|
118
|
+
with pytest.raises(RuntimeError):
|
|
119
|
+
generator.run("What is the capital of China?")
|
|
120
|
+
|
|
121
|
+
def test_run_with_empty_prompt(self, generator_mock):
|
|
122
|
+
"""
|
|
123
|
+
Test that an empty prompt returns an empty list of replies.
|
|
124
|
+
"""
|
|
125
|
+
generator, _ = generator_mock
|
|
126
|
+
result = generator.run("")
|
|
127
|
+
assert result["replies"] == []
|
|
128
|
+
|
|
129
|
+
def test_run_with_valid_prompt(self, generator_mock):
|
|
130
|
+
"""
|
|
131
|
+
Test that a valid prompt returns a list of replies.
|
|
132
|
+
"""
|
|
133
|
+
generator, mock_model = generator_mock
|
|
134
|
+
mock_output = {
|
|
135
|
+
"choices": [{"text": "Generated text"}],
|
|
136
|
+
"metadata": {"other_info": "Some metadata"},
|
|
137
|
+
}
|
|
138
|
+
mock_model.create_completion.return_value = mock_output
|
|
139
|
+
result = generator.run("Test prompt")
|
|
140
|
+
assert result["replies"] == ["Generated text"]
|
|
141
|
+
assert result["meta"] == [mock_output]
|
|
142
|
+
|
|
143
|
+
def test_run_with_generation_kwargs(self, generator_mock):
|
|
144
|
+
"""
|
|
145
|
+
Test that a valid prompt and generation kwargs returns a list of replies.
|
|
146
|
+
"""
|
|
147
|
+
generator, mock_model = generator_mock
|
|
148
|
+
mock_output = {
|
|
149
|
+
"choices": [{"text": "Generated text"}],
|
|
150
|
+
"metadata": {"other_info": "Some metadata"},
|
|
151
|
+
}
|
|
152
|
+
mock_model.create_completion.return_value = mock_output
|
|
153
|
+
generation_kwargs = {"max_tokens": 128}
|
|
154
|
+
result = generator.run("Test prompt", generation_kwargs)
|
|
155
|
+
assert result["replies"] == ["Generated text"]
|
|
156
|
+
assert result["meta"] == [mock_output]
|
|
157
|
+
|
|
158
|
+
@pytest.mark.integration
|
|
159
|
+
def test_run(self, generator):
|
|
160
|
+
"""
|
|
161
|
+
Test that a valid prompt returns a list of replies.
|
|
162
|
+
"""
|
|
163
|
+
questions_and_answers = [
|
|
164
|
+
("What's the capital of France?", "Paris"),
|
|
165
|
+
("What is the capital of Canada?", "Ottawa"),
|
|
166
|
+
("What is the capital of Ghana?", "Accra"),
|
|
167
|
+
]
|
|
168
|
+
|
|
169
|
+
for question, answer in questions_and_answers:
|
|
170
|
+
prompt = f"GPT4 Correct User: Answer in a single word. {question} <|end_of_turn|>\n GPT4 Correct Assistant:"
|
|
171
|
+
result = generator.run(prompt)
|
|
172
|
+
|
|
173
|
+
assert "replies" in result
|
|
174
|
+
assert isinstance(result["replies"], list)
|
|
175
|
+
assert len(result["replies"]) > 0
|
|
176
|
+
assert answer.lower() in result["replies"][0].lower().strip()
|
|
177
|
+
|
|
178
|
+
@pytest.mark.integration
|
|
179
|
+
def test_run_rag_pipeline(self, generator):
|
|
180
|
+
"""
|
|
181
|
+
Test that a valid prompt returns a list of replies.
|
|
182
|
+
"""
|
|
183
|
+
prompt_template = """GPT4 Correct User: Answer the question in a single word. {{question}}
|
|
184
|
+
Context:
|
|
185
|
+
{% for doc in documents %}
|
|
186
|
+
{{ doc.content }}
|
|
187
|
+
{% endfor %}
|
|
188
|
+
<|end_of_turn|>
|
|
189
|
+
GPT4 Correct Assistant:
|
|
190
|
+
"""
|
|
191
|
+
rag_pipeline = Pipeline()
|
|
192
|
+
rag_pipeline.add_component(
|
|
193
|
+
instance=InMemoryBM25Retriever(document_store=InMemoryDocumentStore(), top_k=1), name="retriever"
|
|
194
|
+
)
|
|
195
|
+
rag_pipeline.add_component(instance=PromptBuilder(template=prompt_template), name="prompt_builder")
|
|
196
|
+
rag_pipeline.add_component(instance=generator, name="llm")
|
|
197
|
+
rag_pipeline.add_component(instance=AnswerBuilder(), name="answer_builder")
|
|
198
|
+
rag_pipeline.connect("retriever", "prompt_builder.documents")
|
|
199
|
+
rag_pipeline.connect("prompt_builder", "llm")
|
|
200
|
+
rag_pipeline.connect("llm.replies", "answer_builder.replies")
|
|
201
|
+
rag_pipeline.connect("retriever", "answer_builder.documents")
|
|
202
|
+
|
|
203
|
+
# Populate the document store
|
|
204
|
+
documents = [
|
|
205
|
+
Document(content="The capital of France is Paris."),
|
|
206
|
+
Document(content="The capital of Canada is Ottawa."),
|
|
207
|
+
Document(content="The capital of Ghana is Accra."),
|
|
208
|
+
]
|
|
209
|
+
rag_pipeline.get_component("retriever").document_store.write_documents(documents)
|
|
210
|
+
|
|
211
|
+
# Query and assert
|
|
212
|
+
questions_and_answers = [
|
|
213
|
+
("What's the capital of France?", "Paris"),
|
|
214
|
+
("What is the capital of Canada?", "Ottawa"),
|
|
215
|
+
("What is the capital of Ghana?", "Accra"),
|
|
216
|
+
]
|
|
217
|
+
|
|
218
|
+
for question, answer in questions_and_answers:
|
|
219
|
+
result = rag_pipeline.run(
|
|
220
|
+
{
|
|
221
|
+
"retriever": {"query": question},
|
|
222
|
+
"prompt_builder": {"question": question},
|
|
223
|
+
"llm": {"generation_kwargs": {"temperature": 0.1}},
|
|
224
|
+
"answer_builder": {"query": question},
|
|
225
|
+
}
|
|
226
|
+
)
|
|
227
|
+
|
|
228
|
+
assert len(result["answer_builder"]["answers"]) == 1
|
|
229
|
+
generated_answer = result["answer_builder"]["answers"][0]
|
|
230
|
+
assert answer.lower() in generated_answer.data.lower()
|
|
231
|
+
assert generated_answer.query == question
|
|
232
|
+
assert hasattr(generated_answer, "documents")
|
|
233
|
+
assert hasattr(generated_answer, "meta")
|