llama-cpp-haystack 0.4.2__tar.gz → 0.4.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (17) hide show
  1. llama_cpp_haystack-0.4.3/CHANGELOG.md +93 -0
  2. {llama_cpp_haystack-0.4.2 → llama_cpp_haystack-0.4.3}/PKG-INFO +4 -3
  3. {llama_cpp_haystack-0.4.2 → llama_cpp_haystack-0.4.3}/src/haystack_integrations/components/generators/llama_cpp/chat/chat_generator.py +30 -21
  4. {llama_cpp_haystack-0.4.2 → llama_cpp_haystack-0.4.3}/tests/test_chat_generator.py +5 -5
  5. llama_cpp_haystack-0.4.2/CHANGELOG.md +0 -50
  6. {llama_cpp_haystack-0.4.2 → llama_cpp_haystack-0.4.3}/.gitignore +0 -0
  7. {llama_cpp_haystack-0.4.2 → llama_cpp_haystack-0.4.3}/LICENSE.txt +0 -0
  8. {llama_cpp_haystack-0.4.2 → llama_cpp_haystack-0.4.3}/README.md +0 -0
  9. {llama_cpp_haystack-0.4.2 → llama_cpp_haystack-0.4.3}/examples/llama_cpp_generator_example.py +0 -0
  10. {llama_cpp_haystack-0.4.2 → llama_cpp_haystack-0.4.3}/examples/rag_pipeline_example.py +0 -0
  11. {llama_cpp_haystack-0.4.2 → llama_cpp_haystack-0.4.3}/pydoc/config.yml +0 -0
  12. {llama_cpp_haystack-0.4.2 → llama_cpp_haystack-0.4.3}/pyproject.toml +0 -0
  13. {llama_cpp_haystack-0.4.2 → llama_cpp_haystack-0.4.3}/src/haystack_integrations/components/generators/llama_cpp/__init__.py +0 -0
  14. {llama_cpp_haystack-0.4.2 → llama_cpp_haystack-0.4.3}/src/haystack_integrations/components/generators/llama_cpp/generator.py +0 -0
  15. {llama_cpp_haystack-0.4.2 → llama_cpp_haystack-0.4.3}/tests/__init__.py +0 -0
  16. {llama_cpp_haystack-0.4.2 → llama_cpp_haystack-0.4.3}/tests/models/.gitignore +0 -0
  17. {llama_cpp_haystack-0.4.2 → llama_cpp_haystack-0.4.3}/tests/test_generator.py +0 -0
@@ -0,0 +1,93 @@
1
+ # Changelog
2
+
3
+ ## [integrations/llama_cpp-v0.4.2] - 2024-12-10
4
+
5
+ ### 🧪 Testing
6
+
7
+ - Do not retry tests in `hatch run test` command (#954)
8
+
9
+ ### ⚙️ CI
10
+
11
+ - Adopt uv as installer (#1142)
12
+
13
+ ### 🧹 Chores
14
+
15
+ - Update ruff linting scripts and settings (#1105)
16
+ - Unpin `llama-cpp-python` (#1115)
17
+ - Fix linting/isort (#1215)
18
+ - Use text instead of content for ChatMessage in Llama.cpp, Langfuse and Mistral (#1238)
19
+
20
+ ### 🌀 Miscellaneous
21
+
22
+ - Chore: lamma_cpp - ruff update, don't ruff tests (#998)
23
+ - Fix: pin `llama-cpp-python<0.3.0` (#1111)
24
+
25
+ ## [integrations/llama_cpp-v0.4.1] - 2024-08-08
26
+
27
+ ### 🐛 Bug Fixes
28
+
29
+ - Replace DynamicChatPromptBuilder with ChatPromptBuilder (#940)
30
+
31
+ ### ⚙️ CI
32
+
33
+ - Retry tests to reduce flakyness (#836)
34
+
35
+ ### 🧹 Chores
36
+
37
+ - Update ruff invocation to include check parameter (#853)
38
+ - Pin `llama-cpp-python>=0.2.87` (#955)
39
+
40
+ ### 🌀 Miscellaneous
41
+
42
+ - Ci: install `pytest-rerunfailures` where needed; add retry config to `test-cov` script (#845)
43
+ - Fix: pin llama-cpp-python to an older version (#943)
44
+ - Refactor: introduce `_convert_message_to_llamacpp_format` utility function (#939)
45
+
46
+ ## [integrations/llama_cpp-v0.4.0] - 2024-05-13
47
+
48
+ ### 🐛 Bug Fixes
49
+
50
+ - Llama.cpp: change wrong links and imports (#436)
51
+ - Fix order of API docs (#447)
52
+
53
+ ### 📚 Documentation
54
+
55
+ - Update category slug (#442)
56
+ - Small consistency improvements (#536)
57
+ - Disable-class-def (#556)
58
+
59
+ ### 🧹 Chores
60
+
61
+ - [**breaking**] Rename model_path to model in the Llama.cpp integration (#243)
62
+
63
+ ### 🌀 Miscellaneous
64
+
65
+ - Generate api docs (#353)
66
+ - Model_name_or_path > model (#418)
67
+ - Llama.cpp - review docstrings (#510)
68
+ - Llama.cpp - update examples (#511)
69
+ - Make tests show coverage (#566)
70
+ - Remove references to Python 3.7 (#601)
71
+ - Chore: add license classifiers (#680)
72
+ - Chore: change the pydoc renderer class (#718)
73
+ - Basic implementation of llama.cpp chat generation (#723)
74
+
75
+ ## [integrations/llama_cpp-v0.2.1] - 2024-01-18
76
+
77
+ ### 🌀 Miscellaneous
78
+
79
+ - Update import paths for beta5 (#233)
80
+
81
+ ## [integrations/llama_cpp-v0.2.0] - 2024-01-17
82
+
83
+ ### 🌀 Miscellaneous
84
+
85
+ - Mount llama_cpp in haystack_integrations (#217)
86
+
87
+ ## [integrations/llama_cpp-v0.1.0] - 2024-01-09
88
+
89
+ ### 🚀 Features
90
+
91
+ - Add Llama.cpp Generator (#179)
92
+
93
+ <!-- generated by git-cliff -->
@@ -1,13 +1,14 @@
1
- Metadata-Version: 2.3
1
+ Metadata-Version: 2.4
2
2
  Name: llama-cpp-haystack
3
- Version: 0.4.2
3
+ Version: 0.4.3
4
4
  Summary: An integration between the llama.cpp LLM framework and Haystack
5
5
  Project-URL: Documentation, https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/llama_cpp#readme
6
6
  Project-URL: Issues, https://github.com/deepset-ai/haystack-core-integrations/issues
7
7
  Project-URL: Source, https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/llama_cpp
8
8
  Author: Ashwin Mathur
9
9
  Author-email: deepset GmbH <info@deepset.ai>
10
- License: Apache-2.0
10
+ License-Expression: Apache-2.0
11
+ License-File: LICENSE.txt
11
12
  Classifier: Development Status :: 4 - Beta
12
13
  Classifier: License :: OSI Approved :: Apache Software License
13
14
  Classifier: Programming Language :: Python
@@ -2,7 +2,7 @@ import logging
2
2
  from typing import Any, Dict, List, Optional
3
3
 
4
4
  from haystack import component
5
- from haystack.dataclasses import ChatMessage, ChatRole
5
+ from haystack.dataclasses import ChatMessage
6
6
  from llama_cpp import Llama
7
7
  from llama_cpp.llama_tokenizer import LlamaHFTokenizer
8
8
 
@@ -21,6 +21,10 @@ def _convert_message_to_llamacpp_format(message: ChatMessage) -> Dict[str, str]:
21
21
  if message.name:
22
22
  formatted_msg["name"] = message.name
23
23
 
24
+ if formatted_msg["role"] == "tool":
25
+ formatted_msg["name"] = message.tool_call_result.origin.tool_name
26
+ formatted_msg["content"] = message.tool_call_result.result
27
+
24
28
  return formatted_msg
25
29
 
26
30
 
@@ -114,26 +118,31 @@ class LlamaCppChatGenerator:
114
118
  formatted_messages = [_convert_message_to_llamacpp_format(msg) for msg in messages]
115
119
 
116
120
  response = self.model.create_chat_completion(messages=formatted_messages, **updated_generation_kwargs)
117
- replies = [
118
- ChatMessage(
119
- content=choice["message"]["content"],
120
- role=ChatRole[choice["message"]["role"].upper()],
121
- name=None,
122
- meta={
123
- "response_id": response["id"],
124
- "model": response["model"],
125
- "created": response["created"],
126
- "index": choice["index"],
127
- "finish_reason": choice["finish_reason"],
128
- "usage": response["usage"],
129
- },
130
- )
131
- for choice in response["choices"]
132
- ]
133
-
134
- for reply, choice in zip(replies, response["choices"]):
121
+
122
+ replies = []
123
+
124
+ for choice in response["choices"]:
125
+ meta = {
126
+ "response_id": response["id"],
127
+ "model": response["model"],
128
+ "created": response["created"],
129
+ "index": choice["index"],
130
+ "finish_reason": choice["finish_reason"],
131
+ "usage": response["usage"],
132
+ }
133
+
134
+ name = None
135
135
  tool_calls = choice.get("message", {}).get("tool_calls", [])
136
136
  if tool_calls:
137
- reply.meta["tool_calls"] = tool_calls
138
- reply.name = tool_calls[0]["function"]["name"] if tool_calls else None
137
+ meta["tool_calls"] = tool_calls
138
+ name = tool_calls[0]["function"]["name"]
139
+
140
+ reply = ChatMessage.from_assistant(choice["message"]["content"], meta=meta)
141
+ if name:
142
+ if hasattr(reply, "_name"):
143
+ reply._name = name # new ChatMessage
144
+ elif hasattr(reply, "name"):
145
+ reply.name = name # legacy ChatMessage
146
+ replies.append(reply)
147
+
139
148
  return {"replies": replies}
@@ -41,11 +41,11 @@ def test_convert_message_to_llamacpp_format():
41
41
  assert _convert_message_to_llamacpp_format(message) == {"role": "user", "content": "I have a question"}
42
42
 
43
43
  message = ChatMessage.from_function("Function call", "function_name")
44
- assert _convert_message_to_llamacpp_format(message) == {
45
- "role": "function",
46
- "content": "Function call",
47
- "name": "function_name",
48
- }
44
+ converted_message = _convert_message_to_llamacpp_format(message)
45
+
46
+ assert converted_message["role"] in ("function", "tool")
47
+ assert converted_message["name"] == "function_name"
48
+ assert converted_message["content"] == "Function call"
49
49
 
50
50
 
51
51
  class TestLlamaCppChatGenerator:
@@ -1,50 +0,0 @@
1
- # Changelog
2
-
3
- ## [integrations/llama_cpp-v0.4.1] - 2024-08-08
4
-
5
- ### 🐛 Bug Fixes
6
-
7
- - Replace DynamicChatPromptBuilder with ChatPromptBuilder (#940)
8
-
9
- ### ⚙️ Miscellaneous Tasks
10
-
11
- - Retry tests to reduce flakyness (#836)
12
- - Update ruff invocation to include check parameter (#853)
13
- - Pin `llama-cpp-python>=0.2.87` (#955)
14
-
15
- ## [integrations/llama_cpp-v0.4.0] - 2024-05-13
16
-
17
- ### 🐛 Bug Fixes
18
-
19
- - Fix commit (#436)
20
-
21
-
22
- - Fix order of API docs (#447)
23
-
24
- This PR will also push the docs to Readme
25
-
26
- ### 📚 Documentation
27
-
28
- - Update category slug (#442)
29
- - Small consistency improvements (#536)
30
- - Disable-class-def (#556)
31
-
32
- ### ⚙️ Miscellaneous Tasks
33
-
34
- - [**breaking**] Rename model_path to model in the Llama.cpp integration (#243)
35
-
36
- ### Llama.cpp
37
-
38
- - Generate api docs (#353)
39
-
40
- ## [integrations/llama_cpp-v0.2.1] - 2024-01-18
41
-
42
- ## [integrations/llama_cpp-v0.2.0] - 2024-01-17
43
-
44
- ## [integrations/llama_cpp-v0.1.0] - 2024-01-09
45
-
46
- ### 🚀 Features
47
-
48
- - Add Llama.cpp Generator (#179)
49
-
50
- <!-- generated by git-cliff -->