langchain-ollama 0.3.4__tar.gz → 0.3.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {langchain_ollama-0.3.4 → langchain_ollama-0.3.6}/PKG-INFO +2 -2
- {langchain_ollama-0.3.4 → langchain_ollama-0.3.6}/langchain_ollama/__init__.py +10 -1
- {langchain_ollama-0.3.4 → langchain_ollama-0.3.6}/langchain_ollama/chat_models.py +77 -26
- {langchain_ollama-0.3.4 → langchain_ollama-0.3.6}/langchain_ollama/embeddings.py +8 -2
- {langchain_ollama-0.3.4 → langchain_ollama-0.3.6}/langchain_ollama/llms.py +4 -1
- {langchain_ollama-0.3.4 → langchain_ollama-0.3.6}/pyproject.toml +2 -2
- langchain_ollama-0.3.6/tests/unit_tests/test_chat_models.py +270 -0
- langchain_ollama-0.3.6/tests/unit_tests/test_embeddings.py +63 -0
- langchain_ollama-0.3.4/tests/unit_tests/test_chat_models.py +0 -85
- langchain_ollama-0.3.4/tests/unit_tests/test_embeddings.py +0 -30
- {langchain_ollama-0.3.4 → langchain_ollama-0.3.6}/LICENSE +0 -0
- {langchain_ollama-0.3.4 → langchain_ollama-0.3.6}/README.md +0 -0
- {langchain_ollama-0.3.4 → langchain_ollama-0.3.6}/langchain_ollama/_utils.py +0 -0
- {langchain_ollama-0.3.4 → langchain_ollama-0.3.6}/langchain_ollama/py.typed +0 -0
- {langchain_ollama-0.3.4 → langchain_ollama-0.3.6}/tests/__init__.py +0 -0
- {langchain_ollama-0.3.4 → langchain_ollama-0.3.6}/tests/integration_tests/__init__.py +0 -0
- {langchain_ollama-0.3.4 → langchain_ollama-0.3.6}/tests/integration_tests/chat_models/cassettes/test_chat_models_standard/TestChatOllama.test_stream_time.yaml +0 -0
- {langchain_ollama-0.3.4 → langchain_ollama-0.3.6}/tests/integration_tests/chat_models/test_chat_models.py +0 -0
- {langchain_ollama-0.3.4 → langchain_ollama-0.3.6}/tests/integration_tests/chat_models/test_chat_models_reasoning.py +0 -0
- {langchain_ollama-0.3.4 → langchain_ollama-0.3.6}/tests/integration_tests/chat_models/test_chat_models_standard.py +0 -0
- {langchain_ollama-0.3.4 → langchain_ollama-0.3.6}/tests/integration_tests/test_compile.py +0 -0
- {langchain_ollama-0.3.4 → langchain_ollama-0.3.6}/tests/integration_tests/test_embeddings.py +0 -0
- {langchain_ollama-0.3.4 → langchain_ollama-0.3.6}/tests/integration_tests/test_llms.py +0 -0
- {langchain_ollama-0.3.4 → langchain_ollama-0.3.6}/tests/unit_tests/__init__.py +0 -0
- {langchain_ollama-0.3.4 → langchain_ollama-0.3.6}/tests/unit_tests/test_imports.py +0 -0
- {langchain_ollama-0.3.4 → langchain_ollama-0.3.6}/tests/unit_tests/test_llms.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: langchain-ollama
|
3
|
-
Version: 0.3.
|
3
|
+
Version: 0.3.6
|
4
4
|
Summary: An integration package connecting Ollama and LangChain
|
5
5
|
License: MIT
|
6
6
|
Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/partners/ollama
|
@@ -8,7 +8,7 @@ Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q
|
|
8
8
|
Project-URL: repository, https://github.com/langchain-ai/langchain
|
9
9
|
Requires-Python: >=3.9
|
10
10
|
Requires-Dist: ollama<1.0.0,>=0.5.1
|
11
|
-
Requires-Dist: langchain-core<1.0.0,>=0.3.
|
11
|
+
Requires-Dist: langchain-core<1.0.0,>=0.3.70
|
12
12
|
Description-Content-Type: text/markdown
|
13
13
|
|
14
14
|
# langchain-ollama
|
@@ -1,6 +1,15 @@
|
|
1
1
|
"""This is the langchain_ollama package.
|
2
2
|
|
3
|
-
|
3
|
+
Provides infrastructure for interacting with the `Ollama <https://ollama.com/>`__
|
4
|
+
service.
|
5
|
+
|
6
|
+
.. note::
|
7
|
+
**Newly added in 0.3.4:** ``validate_model_on_init`` param on all models.
|
8
|
+
This parameter allows you to validate the model exists in Ollama locally on
|
9
|
+
initialization. If set to ``True``, it will raise an error if the model does not
|
10
|
+
exist locally. This is useful for ensuring that the model is available before
|
11
|
+
attempting to use it, especially in environments where models may not be
|
12
|
+
pre-downloaded.
|
4
13
|
"""
|
5
14
|
|
6
15
|
from importlib import metadata
|
@@ -2,7 +2,9 @@
|
|
2
2
|
|
3
3
|
from __future__ import annotations
|
4
4
|
|
5
|
+
import ast
|
5
6
|
import json
|
7
|
+
import logging
|
6
8
|
from collections.abc import AsyncIterator, Iterator, Mapping, Sequence
|
7
9
|
from operator import itemgetter
|
8
10
|
from typing import (
|
@@ -57,6 +59,8 @@ from typing_extensions import Self, is_typeddict
|
|
57
59
|
|
58
60
|
from ._utils import validate_model
|
59
61
|
|
62
|
+
log = logging.getLogger(__name__)
|
63
|
+
|
60
64
|
|
61
65
|
def _get_usage_metadata_from_generation_info(
|
62
66
|
generation_info: Optional[Mapping[str, Any]],
|
@@ -77,33 +81,45 @@ def _get_usage_metadata_from_generation_info(
|
|
77
81
|
|
78
82
|
def _parse_json_string(
|
79
83
|
json_string: str,
|
84
|
+
*,
|
80
85
|
raw_tool_call: dict[str, Any],
|
81
|
-
skip: bool,
|
86
|
+
skip: bool,
|
82
87
|
) -> Any:
|
83
88
|
"""Attempt to parse a JSON string for tool calling.
|
84
89
|
|
90
|
+
It first tries to use the standard json.loads. If that fails, it falls
|
91
|
+
back to ast.literal_eval to safely parse Python literals, which is more
|
92
|
+
robust against models using single quotes or containing apostrophes.
|
93
|
+
|
85
94
|
Args:
|
86
95
|
json_string: JSON string to parse.
|
87
|
-
skip: Whether to ignore parsing errors and return the value anyways.
|
88
96
|
raw_tool_call: Raw tool call to include in error message.
|
97
|
+
skip: Whether to ignore parsing errors and return the value anyways.
|
89
98
|
|
90
99
|
Returns:
|
91
|
-
The parsed JSON string.
|
100
|
+
The parsed JSON string or Python literal.
|
92
101
|
|
93
102
|
Raises:
|
94
|
-
OutputParserException: If the
|
103
|
+
OutputParserException: If the string is invalid and skip=False.
|
95
104
|
"""
|
96
105
|
try:
|
97
106
|
return json.loads(json_string)
|
98
|
-
except json.JSONDecodeError
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
+
except json.JSONDecodeError:
|
108
|
+
try:
|
109
|
+
# Use ast.literal_eval to safely parse Python-style dicts
|
110
|
+
# (e.g. with single quotes)
|
111
|
+
return ast.literal_eval(json_string)
|
112
|
+
except (SyntaxError, ValueError) as e:
|
113
|
+
# If both fail, and we're not skipping, raise an informative error.
|
114
|
+
if skip:
|
115
|
+
return json_string
|
116
|
+
msg = (
|
117
|
+
f"Function {raw_tool_call['function']['name']} arguments:\n\n"
|
118
|
+
f"{raw_tool_call['function']['arguments']}"
|
119
|
+
"\n\nare not valid JSON or a Python literal. "
|
120
|
+
f"Received error: {e}"
|
121
|
+
)
|
122
|
+
raise OutputParserException(msg) from e
|
107
123
|
except TypeError as e:
|
108
124
|
if skip:
|
109
125
|
return json_string
|
@@ -424,7 +440,10 @@ class ChatOllama(BaseChatModel):
|
|
424
440
|
unless you set ``reasoning`` to ``True``."""
|
425
441
|
|
426
442
|
validate_model_on_init: bool = False
|
427
|
-
"""Whether to validate the model exists in Ollama locally on initialization.
|
443
|
+
"""Whether to validate the model exists in Ollama locally on initialization.
|
444
|
+
|
445
|
+
.. versionadded:: 0.3.4
|
446
|
+
"""
|
428
447
|
|
429
448
|
mirostat: Optional[int] = None
|
430
449
|
"""Enable Mirostat sampling for controlling perplexity.
|
@@ -821,6 +840,28 @@ class ChatOllama(BaseChatModel):
|
|
821
840
|
reasoning = kwargs.get("reasoning", self.reasoning)
|
822
841
|
for stream_resp in self._create_chat_stream(messages, stop, **kwargs):
|
823
842
|
if not isinstance(stream_resp, str):
|
843
|
+
content = (
|
844
|
+
stream_resp["message"]["content"]
|
845
|
+
if "message" in stream_resp and "content" in stream_resp["message"]
|
846
|
+
else ""
|
847
|
+
)
|
848
|
+
|
849
|
+
# Warn and skip responses with done_reason: 'load' and empty content
|
850
|
+
# These indicate the model was loaded but no actual generation occurred
|
851
|
+
is_load_response_with_empty_content = (
|
852
|
+
stream_resp.get("done") is True
|
853
|
+
and stream_resp.get("done_reason") == "load"
|
854
|
+
and not content.strip()
|
855
|
+
)
|
856
|
+
|
857
|
+
if is_load_response_with_empty_content:
|
858
|
+
log.warning(
|
859
|
+
"Ollama returned empty response with done_reason='load'."
|
860
|
+
"This typically indicates the model was loaded but no content "
|
861
|
+
"was generated. Skipping this response."
|
862
|
+
)
|
863
|
+
continue
|
864
|
+
|
824
865
|
if stream_resp.get("done") is True:
|
825
866
|
generation_info = dict(stream_resp)
|
826
867
|
if "model" in generation_info:
|
@@ -829,12 +870,6 @@ class ChatOllama(BaseChatModel):
|
|
829
870
|
else:
|
830
871
|
generation_info = None
|
831
872
|
|
832
|
-
content = (
|
833
|
-
stream_resp["message"]["content"]
|
834
|
-
if "message" in stream_resp and "content" in stream_resp["message"]
|
835
|
-
else ""
|
836
|
-
)
|
837
|
-
|
838
873
|
additional_kwargs = {}
|
839
874
|
if (
|
840
875
|
reasoning
|
@@ -881,6 +916,28 @@ class ChatOllama(BaseChatModel):
|
|
881
916
|
reasoning = kwargs.get("reasoning", self.reasoning)
|
882
917
|
async for stream_resp in self._acreate_chat_stream(messages, stop, **kwargs):
|
883
918
|
if not isinstance(stream_resp, str):
|
919
|
+
content = (
|
920
|
+
stream_resp["message"]["content"]
|
921
|
+
if "message" in stream_resp and "content" in stream_resp["message"]
|
922
|
+
else ""
|
923
|
+
)
|
924
|
+
|
925
|
+
# Warn and skip responses with done_reason: 'load' and empty content
|
926
|
+
# These indicate the model was loaded but no actual generation occurred
|
927
|
+
is_load_response_with_empty_content = (
|
928
|
+
stream_resp.get("done") is True
|
929
|
+
and stream_resp.get("done_reason") == "load"
|
930
|
+
and not content.strip()
|
931
|
+
)
|
932
|
+
|
933
|
+
if is_load_response_with_empty_content:
|
934
|
+
log.warning(
|
935
|
+
"Ollama returned empty response with done_reason='load'. "
|
936
|
+
"This typically indicates the model was loaded but no content "
|
937
|
+
"was generated. Skipping this response."
|
938
|
+
)
|
939
|
+
continue
|
940
|
+
|
884
941
|
if stream_resp.get("done") is True:
|
885
942
|
generation_info = dict(stream_resp)
|
886
943
|
if "model" in generation_info:
|
@@ -889,12 +946,6 @@ class ChatOllama(BaseChatModel):
|
|
889
946
|
else:
|
890
947
|
generation_info = None
|
891
948
|
|
892
|
-
content = (
|
893
|
-
stream_resp["message"]["content"]
|
894
|
-
if "message" in stream_resp and "content" in stream_resp["message"]
|
895
|
-
else ""
|
896
|
-
)
|
897
|
-
|
898
949
|
additional_kwargs = {}
|
899
950
|
if (
|
900
951
|
reasoning
|
@@ -128,7 +128,10 @@ class OllamaEmbeddings(BaseModel, Embeddings):
|
|
128
128
|
"""Model name to use."""
|
129
129
|
|
130
130
|
validate_model_on_init: bool = False
|
131
|
-
"""Whether to validate the model exists in ollama locally on initialization.
|
131
|
+
"""Whether to validate the model exists in ollama locally on initialization.
|
132
|
+
|
133
|
+
.. versionadded:: 0.3.4
|
134
|
+
"""
|
132
135
|
|
133
136
|
base_url: Optional[str] = None
|
134
137
|
"""Base url the model is hosted under."""
|
@@ -296,7 +299,10 @@ class OllamaEmbeddings(BaseModel, Embeddings):
|
|
296
299
|
raise ValueError(msg)
|
297
300
|
return (
|
298
301
|
await self._async_client.embed(
|
299
|
-
self.model,
|
302
|
+
self.model,
|
303
|
+
texts,
|
304
|
+
options=self._default_params,
|
305
|
+
keep_alive=self.keep_alive,
|
300
306
|
)
|
301
307
|
)["embeddings"]
|
302
308
|
|
@@ -53,7 +53,10 @@ class OllamaLLM(BaseLLM):
|
|
53
53
|
be present directly within the main response content."""
|
54
54
|
|
55
55
|
validate_model_on_init: bool = False
|
56
|
-
"""Whether to validate the model exists in ollama locally on initialization.
|
56
|
+
"""Whether to validate the model exists in ollama locally on initialization.
|
57
|
+
|
58
|
+
.. versionadded:: 0.3.4
|
59
|
+
"""
|
57
60
|
|
58
61
|
mirostat: Optional[int] = None
|
59
62
|
"""Enable Mirostat sampling for controlling perplexity.
|
@@ -9,10 +9,10 @@ authors = []
|
|
9
9
|
requires-python = ">=3.9"
|
10
10
|
dependencies = [
|
11
11
|
"ollama>=0.5.1,<1.0.0",
|
12
|
-
"langchain-core<1.0.0,>=0.3.
|
12
|
+
"langchain-core<1.0.0,>=0.3.70",
|
13
13
|
]
|
14
14
|
name = "langchain-ollama"
|
15
|
-
version = "0.3.
|
15
|
+
version = "0.3.6"
|
16
16
|
description = "An integration package connecting Ollama and LangChain"
|
17
17
|
readme = "README.md"
|
18
18
|
|
@@ -0,0 +1,270 @@
|
|
1
|
+
"""Test chat model integration."""
|
2
|
+
|
3
|
+
import json
|
4
|
+
import logging
|
5
|
+
from collections.abc import Generator
|
6
|
+
from contextlib import contextmanager
|
7
|
+
from typing import Any
|
8
|
+
from unittest.mock import MagicMock, patch
|
9
|
+
|
10
|
+
import pytest
|
11
|
+
from httpx import Client, Request, Response
|
12
|
+
from langchain_core.exceptions import OutputParserException
|
13
|
+
from langchain_core.messages import ChatMessage, HumanMessage
|
14
|
+
from langchain_tests.unit_tests import ChatModelUnitTests
|
15
|
+
|
16
|
+
from langchain_ollama.chat_models import (
|
17
|
+
ChatOllama,
|
18
|
+
_parse_arguments_from_tool_call,
|
19
|
+
_parse_json_string,
|
20
|
+
)
|
21
|
+
|
22
|
+
MODEL_NAME = "llama3.1"
|
23
|
+
|
24
|
+
|
25
|
+
class TestChatOllama(ChatModelUnitTests):
|
26
|
+
@property
|
27
|
+
def chat_model_class(self) -> type[ChatOllama]:
|
28
|
+
return ChatOllama
|
29
|
+
|
30
|
+
@property
|
31
|
+
def chat_model_params(self) -> dict:
|
32
|
+
return {"model": MODEL_NAME}
|
33
|
+
|
34
|
+
|
35
|
+
def test__parse_arguments_from_tool_call() -> None:
|
36
|
+
raw_response = '{"model":"sample-model","message":{"role":"assistant","content":"","tool_calls":[{"function":{"name":"get_profile_details","arguments":{"arg_1":"12345678901234567890123456"}}}]},"done":false}' # noqa: E501
|
37
|
+
raw_tool_calls = json.loads(raw_response)["message"]["tool_calls"]
|
38
|
+
response = _parse_arguments_from_tool_call(raw_tool_calls[0])
|
39
|
+
assert response is not None
|
40
|
+
assert isinstance(response["arg_1"], str)
|
41
|
+
|
42
|
+
|
43
|
+
@contextmanager
|
44
|
+
def _mock_httpx_client_stream(
|
45
|
+
*args: Any, **kwargs: Any
|
46
|
+
) -> Generator[Response, Any, Any]:
|
47
|
+
yield Response(
|
48
|
+
status_code=200,
|
49
|
+
content='{"message": {"role": "assistant", "content": "The meaning ..."}}',
|
50
|
+
request=Request(method="POST", url="http://whocares:11434"),
|
51
|
+
)
|
52
|
+
|
53
|
+
|
54
|
+
def test_arbitrary_roles_accepted_in_chatmessages(
|
55
|
+
monkeypatch: pytest.MonkeyPatch,
|
56
|
+
) -> None:
|
57
|
+
monkeypatch.setattr(Client, "stream", _mock_httpx_client_stream)
|
58
|
+
llm = ChatOllama(
|
59
|
+
model=MODEL_NAME,
|
60
|
+
verbose=True,
|
61
|
+
format=None,
|
62
|
+
)
|
63
|
+
messages = [
|
64
|
+
ChatMessage(
|
65
|
+
role="somerandomrole",
|
66
|
+
content="I'm ok with you adding any role message now!",
|
67
|
+
),
|
68
|
+
ChatMessage(role="control", content="thinking"),
|
69
|
+
ChatMessage(role="user", content="What is the meaning of life?"),
|
70
|
+
]
|
71
|
+
llm.invoke(messages)
|
72
|
+
|
73
|
+
|
74
|
+
@patch("langchain_ollama.chat_models.validate_model")
|
75
|
+
def test_validate_model_on_init(mock_validate_model: Any) -> None:
|
76
|
+
"""Test that the model is validated on initialization when requested."""
|
77
|
+
# Test that validate_model is called when validate_model_on_init=True
|
78
|
+
ChatOllama(model=MODEL_NAME, validate_model_on_init=True)
|
79
|
+
mock_validate_model.assert_called_once()
|
80
|
+
mock_validate_model.reset_mock()
|
81
|
+
|
82
|
+
# Test that validate_model is NOT called when validate_model_on_init=False
|
83
|
+
ChatOllama(model=MODEL_NAME, validate_model_on_init=False)
|
84
|
+
mock_validate_model.assert_not_called()
|
85
|
+
|
86
|
+
# Test that validate_model is NOT called by default
|
87
|
+
ChatOllama(model=MODEL_NAME)
|
88
|
+
mock_validate_model.assert_not_called()
|
89
|
+
|
90
|
+
|
91
|
+
# Define a dummy raw_tool_call for the function signature
|
92
|
+
dummy_raw_tool_call = {
|
93
|
+
"function": {"name": "test_func", "arguments": ""},
|
94
|
+
}
|
95
|
+
|
96
|
+
|
97
|
+
# --- Regression tests for tool-call argument parsing (see #30910) ---
|
98
|
+
|
99
|
+
|
100
|
+
@pytest.mark.parametrize(
|
101
|
+
"input_string, expected_output",
|
102
|
+
[
|
103
|
+
# Case 1: Standard double-quoted JSON
|
104
|
+
('{"key": "value", "number": 123}', {"key": "value", "number": 123}),
|
105
|
+
# Case 2: Single-quoted string (the original bug)
|
106
|
+
("{'key': 'value', 'number': 123}", {"key": "value", "number": 123}),
|
107
|
+
# Case 3: String with an internal apostrophe
|
108
|
+
('{"text": "It\'s a great test!"}', {"text": "It's a great test!"}),
|
109
|
+
# Case 4: Mixed quotes that ast can handle
|
110
|
+
("{'text': \"It's a great test!\"}", {"text": "It's a great test!"}),
|
111
|
+
],
|
112
|
+
)
|
113
|
+
def test_parse_json_string_success_cases(
|
114
|
+
input_string: str, expected_output: Any
|
115
|
+
) -> None:
|
116
|
+
"""Tests that _parse_json_string correctly parses valid and fixable strings."""
|
117
|
+
raw_tool_call = {"function": {"name": "test_func", "arguments": input_string}}
|
118
|
+
result = _parse_json_string(input_string, raw_tool_call=raw_tool_call, skip=False)
|
119
|
+
assert result == expected_output
|
120
|
+
|
121
|
+
|
122
|
+
def test_parse_json_string_failure_case_raises_exception() -> None:
|
123
|
+
"""Tests that _parse_json_string raises an exception for truly malformed strings."""
|
124
|
+
malformed_string = "{'key': 'value',,}"
|
125
|
+
raw_tool_call = {"function": {"name": "test_func", "arguments": malformed_string}}
|
126
|
+
with pytest.raises(OutputParserException):
|
127
|
+
_parse_json_string(
|
128
|
+
malformed_string,
|
129
|
+
raw_tool_call=raw_tool_call,
|
130
|
+
skip=False,
|
131
|
+
)
|
132
|
+
|
133
|
+
|
134
|
+
def test_parse_json_string_skip_returns_input_on_failure() -> None:
|
135
|
+
"""Tests that skip=True returns the original string on parse failure."""
|
136
|
+
malformed_string = "{'not': valid,,,}"
|
137
|
+
raw_tool_call = {"function": {"name": "test_func", "arguments": malformed_string}}
|
138
|
+
result = _parse_json_string(
|
139
|
+
malformed_string,
|
140
|
+
raw_tool_call=raw_tool_call,
|
141
|
+
skip=True,
|
142
|
+
)
|
143
|
+
assert result == malformed_string
|
144
|
+
|
145
|
+
|
146
|
+
def test_load_response_with_empty_content_is_skipped(
|
147
|
+
caplog: pytest.LogCaptureFixture,
|
148
|
+
) -> None:
|
149
|
+
"""Test that load responses with empty content log a warning and are skipped."""
|
150
|
+
load_only_response = [
|
151
|
+
{
|
152
|
+
"model": "test-model",
|
153
|
+
"created_at": "2025-01-01T00:00:00.000000000Z",
|
154
|
+
"done": True,
|
155
|
+
"done_reason": "load",
|
156
|
+
"message": {"role": "assistant", "content": ""},
|
157
|
+
}
|
158
|
+
]
|
159
|
+
|
160
|
+
with patch("langchain_ollama.chat_models.Client") as mock_client_class:
|
161
|
+
mock_client = MagicMock()
|
162
|
+
mock_client_class.return_value = mock_client
|
163
|
+
mock_client.chat.return_value = load_only_response
|
164
|
+
|
165
|
+
llm = ChatOllama(model="test-model")
|
166
|
+
|
167
|
+
with (
|
168
|
+
caplog.at_level(logging.WARNING),
|
169
|
+
pytest.raises(ValueError, match="No data received from Ollama stream"),
|
170
|
+
):
|
171
|
+
llm.invoke([HumanMessage("Hello")])
|
172
|
+
|
173
|
+
assert "Ollama returned empty response with done_reason='load'" in caplog.text
|
174
|
+
|
175
|
+
|
176
|
+
def test_load_response_with_whitespace_content_is_skipped(
|
177
|
+
caplog: pytest.LogCaptureFixture,
|
178
|
+
) -> None:
|
179
|
+
"""Test load responses w/ only whitespace content log a warning and are skipped."""
|
180
|
+
load_whitespace_response = [
|
181
|
+
{
|
182
|
+
"model": "test-model",
|
183
|
+
"created_at": "2025-01-01T00:00:00.000000000Z",
|
184
|
+
"done": True,
|
185
|
+
"done_reason": "load",
|
186
|
+
"message": {"role": "assistant", "content": " \n \t "},
|
187
|
+
}
|
188
|
+
]
|
189
|
+
|
190
|
+
with patch("langchain_ollama.chat_models.Client") as mock_client_class:
|
191
|
+
mock_client = MagicMock()
|
192
|
+
mock_client_class.return_value = mock_client
|
193
|
+
mock_client.chat.return_value = load_whitespace_response
|
194
|
+
|
195
|
+
llm = ChatOllama(model="test-model")
|
196
|
+
|
197
|
+
with (
|
198
|
+
caplog.at_level(logging.WARNING),
|
199
|
+
pytest.raises(ValueError, match="No data received from Ollama stream"),
|
200
|
+
):
|
201
|
+
llm.invoke([HumanMessage("Hello")])
|
202
|
+
assert "Ollama returned empty response with done_reason='load'" in caplog.text
|
203
|
+
|
204
|
+
|
205
|
+
def test_load_followed_by_content_response(
|
206
|
+
caplog: pytest.LogCaptureFixture,
|
207
|
+
) -> None:
|
208
|
+
"""Test load responses log a warning and are skipped when followed by content."""
|
209
|
+
load_then_content_response = [
|
210
|
+
{
|
211
|
+
"model": "test-model",
|
212
|
+
"created_at": "2025-01-01T00:00:00.000000000Z",
|
213
|
+
"done": True,
|
214
|
+
"done_reason": "load",
|
215
|
+
"message": {"role": "assistant", "content": ""},
|
216
|
+
},
|
217
|
+
{
|
218
|
+
"model": "test-model",
|
219
|
+
"created_at": "2025-01-01T00:00:01.000000000Z",
|
220
|
+
"done": True,
|
221
|
+
"done_reason": "stop",
|
222
|
+
"message": {
|
223
|
+
"role": "assistant",
|
224
|
+
"content": "Hello! How can I help you today?",
|
225
|
+
},
|
226
|
+
},
|
227
|
+
]
|
228
|
+
|
229
|
+
with patch("langchain_ollama.chat_models.Client") as mock_client_class:
|
230
|
+
mock_client = MagicMock()
|
231
|
+
mock_client_class.return_value = mock_client
|
232
|
+
mock_client.chat.return_value = load_then_content_response
|
233
|
+
|
234
|
+
llm = ChatOllama(model="test-model")
|
235
|
+
|
236
|
+
with caplog.at_level(logging.WARNING):
|
237
|
+
result = llm.invoke([HumanMessage("Hello")])
|
238
|
+
|
239
|
+
assert "Ollama returned empty response with done_reason='load'" in caplog.text
|
240
|
+
assert result.content == "Hello! How can I help you today?"
|
241
|
+
assert result.response_metadata.get("done_reason") == "stop"
|
242
|
+
|
243
|
+
|
244
|
+
def test_load_response_with_actual_content_is_not_skipped(
|
245
|
+
caplog: pytest.LogCaptureFixture,
|
246
|
+
) -> None:
|
247
|
+
"""Test load responses with actual content are NOT skipped and log no warning."""
|
248
|
+
load_with_content_response = [
|
249
|
+
{
|
250
|
+
"model": "test-model",
|
251
|
+
"created_at": "2025-01-01T00:00:00.000000000Z",
|
252
|
+
"done": True,
|
253
|
+
"done_reason": "load",
|
254
|
+
"message": {"role": "assistant", "content": "This is actual content"},
|
255
|
+
}
|
256
|
+
]
|
257
|
+
|
258
|
+
with patch("langchain_ollama.chat_models.Client") as mock_client_class:
|
259
|
+
mock_client = MagicMock()
|
260
|
+
mock_client_class.return_value = mock_client
|
261
|
+
mock_client.chat.return_value = load_with_content_response
|
262
|
+
|
263
|
+
llm = ChatOllama(model="test-model")
|
264
|
+
|
265
|
+
with caplog.at_level(logging.WARNING):
|
266
|
+
result = llm.invoke([HumanMessage("Hello")])
|
267
|
+
|
268
|
+
assert result.content == "This is actual content"
|
269
|
+
assert result.response_metadata.get("done_reason") == "load"
|
270
|
+
assert not caplog.text
|
@@ -0,0 +1,63 @@
|
|
1
|
+
"""Test embedding model integration."""
|
2
|
+
|
3
|
+
from typing import Any
|
4
|
+
from unittest.mock import Mock, patch
|
5
|
+
|
6
|
+
from langchain_ollama.embeddings import OllamaEmbeddings
|
7
|
+
|
8
|
+
MODEL_NAME = "llama3.1"
|
9
|
+
|
10
|
+
|
11
|
+
def test_initialization() -> None:
|
12
|
+
"""Test embedding model initialization."""
|
13
|
+
OllamaEmbeddings(model=MODEL_NAME, keep_alive=1)
|
14
|
+
|
15
|
+
|
16
|
+
@patch("langchain_ollama.embeddings.validate_model")
|
17
|
+
def test_validate_model_on_init(mock_validate_model: Any) -> None:
|
18
|
+
"""Test that the model is validated on initialization when requested."""
|
19
|
+
# Test that validate_model is called when validate_model_on_init=True
|
20
|
+
OllamaEmbeddings(model=MODEL_NAME, validate_model_on_init=True)
|
21
|
+
mock_validate_model.assert_called_once()
|
22
|
+
mock_validate_model.reset_mock()
|
23
|
+
|
24
|
+
# Test that validate_model is NOT called when validate_model_on_init=False
|
25
|
+
OllamaEmbeddings(model=MODEL_NAME, validate_model_on_init=False)
|
26
|
+
mock_validate_model.assert_not_called()
|
27
|
+
|
28
|
+
# Test that validate_model is NOT called by default
|
29
|
+
OllamaEmbeddings(model=MODEL_NAME)
|
30
|
+
mock_validate_model.assert_not_called()
|
31
|
+
|
32
|
+
|
33
|
+
@patch("langchain_ollama.embeddings.Client")
|
34
|
+
def test_embed_documents_passes_options(mock_client_class: Any) -> None:
|
35
|
+
"""Test that embed_documents method passes options including num_gpu."""
|
36
|
+
# Create a mock client instance
|
37
|
+
mock_client = Mock()
|
38
|
+
mock_client_class.return_value = mock_client
|
39
|
+
|
40
|
+
# Mock the embed method response
|
41
|
+
mock_client.embed.return_value = {"embeddings": [[0.1, 0.2, 0.3]]}
|
42
|
+
|
43
|
+
# Create embeddings with num_gpu parameter
|
44
|
+
embeddings = OllamaEmbeddings(model=MODEL_NAME, num_gpu=4, temperature=0.5)
|
45
|
+
|
46
|
+
# Call embed_documents
|
47
|
+
result = embeddings.embed_documents(["test text"])
|
48
|
+
|
49
|
+
# Verify the result
|
50
|
+
assert result == [[0.1, 0.2, 0.3]]
|
51
|
+
|
52
|
+
# Check that embed was called with correct arguments
|
53
|
+
mock_client.embed.assert_called_once()
|
54
|
+
call_args = mock_client.embed.call_args
|
55
|
+
|
56
|
+
# Verify the keyword arguments
|
57
|
+
assert "options" in call_args.kwargs
|
58
|
+
assert "keep_alive" in call_args.kwargs
|
59
|
+
|
60
|
+
# Verify options contain num_gpu and temperature
|
61
|
+
options = call_args.kwargs["options"]
|
62
|
+
assert options["num_gpu"] == 4
|
63
|
+
assert options["temperature"] == 0.5
|
@@ -1,85 +0,0 @@
|
|
1
|
-
"""Test chat model integration."""
|
2
|
-
|
3
|
-
import json
|
4
|
-
from collections.abc import Generator
|
5
|
-
from contextlib import contextmanager
|
6
|
-
from typing import Any
|
7
|
-
from unittest.mock import patch
|
8
|
-
|
9
|
-
import pytest
|
10
|
-
from httpx import Client, Request, Response
|
11
|
-
from langchain_core.messages import ChatMessage
|
12
|
-
from langchain_tests.unit_tests import ChatModelUnitTests
|
13
|
-
|
14
|
-
from langchain_ollama.chat_models import ChatOllama, _parse_arguments_from_tool_call
|
15
|
-
|
16
|
-
MODEL_NAME = "llama3.1"
|
17
|
-
|
18
|
-
|
19
|
-
class TestChatOllama(ChatModelUnitTests):
|
20
|
-
@property
|
21
|
-
def chat_model_class(self) -> type[ChatOllama]:
|
22
|
-
return ChatOllama
|
23
|
-
|
24
|
-
@property
|
25
|
-
def chat_model_params(self) -> dict:
|
26
|
-
return {"model": MODEL_NAME}
|
27
|
-
|
28
|
-
|
29
|
-
def test__parse_arguments_from_tool_call() -> None:
|
30
|
-
raw_response = '{"model":"sample-model","message":{"role":"assistant","content":"","tool_calls":[{"function":{"name":"get_profile_details","arguments":{"arg_1":"12345678901234567890123456"}}}]},"done":false}' # noqa: E501
|
31
|
-
raw_tool_calls = json.loads(raw_response)["message"]["tool_calls"]
|
32
|
-
response = _parse_arguments_from_tool_call(raw_tool_calls[0])
|
33
|
-
assert response is not None
|
34
|
-
assert isinstance(response["arg_1"], str)
|
35
|
-
|
36
|
-
|
37
|
-
@contextmanager
|
38
|
-
def _mock_httpx_client_stream(
|
39
|
-
*args: Any, **kwargs: Any
|
40
|
-
) -> Generator[Response, Any, Any]:
|
41
|
-
yield Response(
|
42
|
-
status_code=200,
|
43
|
-
content='{"message": {"role": "assistant", "content": "The meaning ..."}}',
|
44
|
-
request=Request(method="POST", url="http://whocares:11434"),
|
45
|
-
)
|
46
|
-
|
47
|
-
|
48
|
-
def test_arbitrary_roles_accepted_in_chatmessages(
|
49
|
-
monkeypatch: pytest.MonkeyPatch,
|
50
|
-
) -> None:
|
51
|
-
monkeypatch.setattr(Client, "stream", _mock_httpx_client_stream)
|
52
|
-
|
53
|
-
llm = ChatOllama(
|
54
|
-
model=MODEL_NAME,
|
55
|
-
verbose=True,
|
56
|
-
format=None,
|
57
|
-
)
|
58
|
-
|
59
|
-
messages = [
|
60
|
-
ChatMessage(
|
61
|
-
role="somerandomrole",
|
62
|
-
content="I'm ok with you adding any role message now!",
|
63
|
-
),
|
64
|
-
ChatMessage(role="control", content="thinking"),
|
65
|
-
ChatMessage(role="user", content="What is the meaning of life?"),
|
66
|
-
]
|
67
|
-
|
68
|
-
llm.invoke(messages)
|
69
|
-
|
70
|
-
|
71
|
-
@patch("langchain_ollama.chat_models.validate_model")
|
72
|
-
def test_validate_model_on_init(mock_validate_model: Any) -> None:
|
73
|
-
"""Test that the model is validated on initialization when requested."""
|
74
|
-
# Test that validate_model is called when validate_model_on_init=True
|
75
|
-
ChatOllama(model=MODEL_NAME, validate_model_on_init=True)
|
76
|
-
mock_validate_model.assert_called_once()
|
77
|
-
mock_validate_model.reset_mock()
|
78
|
-
|
79
|
-
# Test that validate_model is NOT called when validate_model_on_init=False
|
80
|
-
ChatOllama(model=MODEL_NAME, validate_model_on_init=False)
|
81
|
-
mock_validate_model.assert_not_called()
|
82
|
-
|
83
|
-
# Test that validate_model is NOT called by default
|
84
|
-
ChatOllama(model=MODEL_NAME)
|
85
|
-
mock_validate_model.assert_not_called()
|
@@ -1,30 +0,0 @@
|
|
1
|
-
"""Test embedding model integration."""
|
2
|
-
|
3
|
-
from typing import Any
|
4
|
-
from unittest.mock import patch
|
5
|
-
|
6
|
-
from langchain_ollama.embeddings import OllamaEmbeddings
|
7
|
-
|
8
|
-
MODEL_NAME = "llama3.1"
|
9
|
-
|
10
|
-
|
11
|
-
def test_initialization() -> None:
|
12
|
-
"""Test embedding model initialization."""
|
13
|
-
OllamaEmbeddings(model=MODEL_NAME, keep_alive=1)
|
14
|
-
|
15
|
-
|
16
|
-
@patch("langchain_ollama.embeddings.validate_model")
|
17
|
-
def test_validate_model_on_init(mock_validate_model: Any) -> None:
|
18
|
-
"""Test that the model is validated on initialization when requested."""
|
19
|
-
# Test that validate_model is called when validate_model_on_init=True
|
20
|
-
OllamaEmbeddings(model=MODEL_NAME, validate_model_on_init=True)
|
21
|
-
mock_validate_model.assert_called_once()
|
22
|
-
mock_validate_model.reset_mock()
|
23
|
-
|
24
|
-
# Test that validate_model is NOT called when validate_model_on_init=False
|
25
|
-
OllamaEmbeddings(model=MODEL_NAME, validate_model_on_init=False)
|
26
|
-
mock_validate_model.assert_not_called()
|
27
|
-
|
28
|
-
# Test that validate_model is NOT called by default
|
29
|
-
OllamaEmbeddings(model=MODEL_NAME)
|
30
|
-
mock_validate_model.assert_not_called()
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{langchain_ollama-0.3.4 → langchain_ollama-0.3.6}/tests/integration_tests/test_embeddings.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|