langchain-ollama 0.3.5__py3-none-any.whl → 0.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,15 @@
1
1
  """This is the langchain_ollama package.
2
2
 
3
- It provides infrastructure for interacting with the Ollama service.
3
+ Provides infrastructure for interacting with the `Ollama <https://ollama.com/>`__
4
+ service.
5
+
6
+ .. note::
7
+ **Newly added in 0.3.4:** ``validate_model_on_init`` param on all models.
8
+ This parameter allows you to validate the model exists in Ollama locally on
9
+ initialization. If set to ``True``, it will raise an error if the model does not
10
+ exist locally. This is useful for ensuring that the model is available before
11
+ attempting to use it, especially in environments where models may not be
12
+ pre-downloaded.
4
13
  """
5
14
 
6
15
  from importlib import metadata
@@ -2,7 +2,9 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
+ import ast
5
6
  import json
7
+ import logging
6
8
  from collections.abc import AsyncIterator, Iterator, Mapping, Sequence
7
9
  from operator import itemgetter
8
10
  from typing import (
@@ -57,6 +59,8 @@ from typing_extensions import Self, is_typeddict
57
59
 
58
60
  from ._utils import validate_model
59
61
 
62
+ log = logging.getLogger(__name__)
63
+
60
64
 
61
65
  def _get_usage_metadata_from_generation_info(
62
66
  generation_info: Optional[Mapping[str, Any]],
@@ -77,33 +81,45 @@ def _get_usage_metadata_from_generation_info(
77
81
 
78
82
  def _parse_json_string(
79
83
  json_string: str,
84
+ *,
80
85
  raw_tool_call: dict[str, Any],
81
- skip: bool, # noqa: FBT001
86
+ skip: bool,
82
87
  ) -> Any:
83
88
  """Attempt to parse a JSON string for tool calling.
84
89
 
90
+ It first tries to use the standard json.loads. If that fails, it falls
91
+ back to ast.literal_eval to safely parse Python literals, which is more
92
+ robust against models using single quotes or containing apostrophes.
93
+
85
94
  Args:
86
95
  json_string: JSON string to parse.
87
- skip: Whether to ignore parsing errors and return the value anyways.
88
96
  raw_tool_call: Raw tool call to include in error message.
97
+ skip: Whether to ignore parsing errors and return the value anyways.
89
98
 
90
99
  Returns:
91
- The parsed JSON string.
100
+ The parsed JSON string or Python literal.
92
101
 
93
102
  Raises:
94
- OutputParserException: If the JSON string wrong invalid and skip=False.
103
+ OutputParserException: If the string is invalid and skip=False.
95
104
  """
96
105
  try:
97
106
  return json.loads(json_string)
98
- except json.JSONDecodeError as e:
99
- if skip:
100
- return json_string
101
- msg = (
102
- f"Function {raw_tool_call['function']['name']} arguments:\n\n"
103
- f"{raw_tool_call['function']['arguments']}\n\nare not valid JSON. "
104
- f"Received JSONDecodeError {e}"
105
- )
106
- raise OutputParserException(msg) from e
107
+ except json.JSONDecodeError:
108
+ try:
109
+ # Use ast.literal_eval to safely parse Python-style dicts
110
+ # (e.g. with single quotes)
111
+ return ast.literal_eval(json_string)
112
+ except (SyntaxError, ValueError) as e:
113
+ # If both fail, and we're not skipping, raise an informative error.
114
+ if skip:
115
+ return json_string
116
+ msg = (
117
+ f"Function {raw_tool_call['function']['name']} arguments:\n\n"
118
+ f"{raw_tool_call['function']['arguments']}"
119
+ "\n\nare not valid JSON or a Python literal. "
120
+ f"Received error: {e}"
121
+ )
122
+ raise OutputParserException(msg) from e
107
123
  except TypeError as e:
108
124
  if skip:
109
125
  return json_string
@@ -424,7 +440,10 @@ class ChatOllama(BaseChatModel):
424
440
  unless you set ``reasoning`` to ``True``."""
425
441
 
426
442
  validate_model_on_init: bool = False
427
- """Whether to validate the model exists in Ollama locally on initialization."""
443
+ """Whether to validate the model exists in Ollama locally on initialization.
444
+
445
+ .. versionadded:: 0.3.4
446
+ """
428
447
 
429
448
  mirostat: Optional[int] = None
430
449
  """Enable Mirostat sampling for controlling perplexity.
@@ -821,6 +840,28 @@ class ChatOllama(BaseChatModel):
821
840
  reasoning = kwargs.get("reasoning", self.reasoning)
822
841
  for stream_resp in self._create_chat_stream(messages, stop, **kwargs):
823
842
  if not isinstance(stream_resp, str):
843
+ content = (
844
+ stream_resp["message"]["content"]
845
+ if "message" in stream_resp and "content" in stream_resp["message"]
846
+ else ""
847
+ )
848
+
849
+ # Warn and skip responses with done_reason: 'load' and empty content
850
+ # These indicate the model was loaded but no actual generation occurred
851
+ is_load_response_with_empty_content = (
852
+ stream_resp.get("done") is True
853
+ and stream_resp.get("done_reason") == "load"
854
+ and not content.strip()
855
+ )
856
+
857
+ if is_load_response_with_empty_content:
858
+ log.warning(
859
+ "Ollama returned empty response with done_reason='load'."
860
+ "This typically indicates the model was loaded but no content "
861
+ "was generated. Skipping this response."
862
+ )
863
+ continue
864
+
824
865
  if stream_resp.get("done") is True:
825
866
  generation_info = dict(stream_resp)
826
867
  if "model" in generation_info:
@@ -829,12 +870,6 @@ class ChatOllama(BaseChatModel):
829
870
  else:
830
871
  generation_info = None
831
872
 
832
- content = (
833
- stream_resp["message"]["content"]
834
- if "message" in stream_resp and "content" in stream_resp["message"]
835
- else ""
836
- )
837
-
838
873
  additional_kwargs = {}
839
874
  if (
840
875
  reasoning
@@ -881,6 +916,28 @@ class ChatOllama(BaseChatModel):
881
916
  reasoning = kwargs.get("reasoning", self.reasoning)
882
917
  async for stream_resp in self._acreate_chat_stream(messages, stop, **kwargs):
883
918
  if not isinstance(stream_resp, str):
919
+ content = (
920
+ stream_resp["message"]["content"]
921
+ if "message" in stream_resp and "content" in stream_resp["message"]
922
+ else ""
923
+ )
924
+
925
+ # Warn and skip responses with done_reason: 'load' and empty content
926
+ # These indicate the model was loaded but no actual generation occurred
927
+ is_load_response_with_empty_content = (
928
+ stream_resp.get("done") is True
929
+ and stream_resp.get("done_reason") == "load"
930
+ and not content.strip()
931
+ )
932
+
933
+ if is_load_response_with_empty_content:
934
+ log.warning(
935
+ "Ollama returned empty response with done_reason='load'. "
936
+ "This typically indicates the model was loaded but no content "
937
+ "was generated. Skipping this response."
938
+ )
939
+ continue
940
+
884
941
  if stream_resp.get("done") is True:
885
942
  generation_info = dict(stream_resp)
886
943
  if "model" in generation_info:
@@ -889,12 +946,6 @@ class ChatOllama(BaseChatModel):
889
946
  else:
890
947
  generation_info = None
891
948
 
892
- content = (
893
- stream_resp["message"]["content"]
894
- if "message" in stream_resp and "content" in stream_resp["message"]
895
- else ""
896
- )
897
-
898
949
  additional_kwargs = {}
899
950
  if (
900
951
  reasoning
@@ -128,7 +128,10 @@ class OllamaEmbeddings(BaseModel, Embeddings):
128
128
  """Model name to use."""
129
129
 
130
130
  validate_model_on_init: bool = False
131
- """Whether to validate the model exists in ollama locally on initialization."""
131
+ """Whether to validate the model exists in ollama locally on initialization.
132
+
133
+ .. versionadded:: 0.3.4
134
+ """
132
135
 
133
136
  base_url: Optional[str] = None
134
137
  """Base url the model is hosted under."""
langchain_ollama/llms.py CHANGED
@@ -53,7 +53,10 @@ class OllamaLLM(BaseLLM):
53
53
  be present directly within the main response content."""
54
54
 
55
55
  validate_model_on_init: bool = False
56
- """Whether to validate the model exists in ollama locally on initialization."""
56
+ """Whether to validate the model exists in ollama locally on initialization.
57
+
58
+ .. versionadded:: 0.3.4
59
+ """
57
60
 
58
61
  mirostat: Optional[int] = None
59
62
  """Enable Mirostat sampling for controlling perplexity.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langchain-ollama
3
- Version: 0.3.5
3
+ Version: 0.3.6
4
4
  Summary: An integration package connecting Ollama and LangChain
5
5
  License: MIT
6
6
  Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/partners/ollama
@@ -8,7 +8,7 @@ Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q
8
8
  Project-URL: repository, https://github.com/langchain-ai/langchain
9
9
  Requires-Python: >=3.9
10
10
  Requires-Dist: ollama<1.0.0,>=0.5.1
11
- Requires-Dist: langchain-core<1.0.0,>=0.3.69
11
+ Requires-Dist: langchain-core<1.0.0,>=0.3.70
12
12
  Description-Content-Type: text/markdown
13
13
 
14
14
  # langchain-ollama
@@ -0,0 +1,11 @@
1
+ langchain_ollama-0.3.6.dist-info/METADATA,sha256=2JNyhPQ9VVfYxjEqSXCn45-MJaYz8yJp3I9wWBQMCU8,2072
2
+ langchain_ollama-0.3.6.dist-info/WHEEL,sha256=9P2ygRxDrTJz3gsagc0Z96ukrxjr-LFBGOgv3AuKlCA,90
3
+ langchain_ollama-0.3.6.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
4
+ langchain_ollama-0.3.6.dist-info/licenses/LICENSE,sha256=2btS8uNUDWD_UNjw9ba6ZJt_00aUjEw9CGyK-xIHY8c,1072
5
+ langchain_ollama/__init__.py,sha256=iuHf2OeT-vff3152jLaa4XQKuToLnyhdlCKOZiUv12E,1089
6
+ langchain_ollama/_utils.py,sha256=dmFO4tSvDTeMALc89QnTBLNWPMZL0eNAq1EDwuMjRA8,1416
7
+ langchain_ollama/chat_models.py,sha256=X3hYpX0BuXX0pfRW780VYszuPxm8MT1TMZAaS9dfUeA,57787
8
+ langchain_ollama/embeddings.py,sha256=5e65w8EYSktEJGk1e7YYBhBQE-HJJWeLN-Apj01EmRg,10352
9
+ langchain_ollama/llms.py,sha256=S7wYvKwan7fRCW_jOU5pHpsZ4hecBFqIn4sFtg1r8NQ,16812
10
+ langchain_ollama/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
+ langchain_ollama-0.3.6.dist-info/RECORD,,
@@ -1,11 +0,0 @@
1
- langchain_ollama-0.3.5.dist-info/METADATA,sha256=15Fpg-jcUr1QRiPMxng3GPbvRHmAapo1s6qykTZvHk0,2072
2
- langchain_ollama-0.3.5.dist-info/WHEEL,sha256=9P2ygRxDrTJz3gsagc0Z96ukrxjr-LFBGOgv3AuKlCA,90
3
- langchain_ollama-0.3.5.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
4
- langchain_ollama-0.3.5.dist-info/licenses/LICENSE,sha256=2btS8uNUDWD_UNjw9ba6ZJt_00aUjEw9CGyK-xIHY8c,1072
5
- langchain_ollama/__init__.py,sha256=TI1gI0Wpg7mRXehGpxrJG2flF_t4Ev-aIJlLKV-CgL0,633
6
- langchain_ollama/_utils.py,sha256=dmFO4tSvDTeMALc89QnTBLNWPMZL0eNAq1EDwuMjRA8,1416
7
- langchain_ollama/chat_models.py,sha256=olz3KJeLG1vk47Xl38nN9bP4bcol5cBQnPnu5MyP8k8,55539
8
- langchain_ollama/embeddings.py,sha256=walU1vZq_YamLLPDSJLbMtOu6jFbiNnhJ5ni2sybCRs,10318
9
- langchain_ollama/llms.py,sha256=PSJ-VQMocp1nm-pgtnKnozidt66RKJiEnhdzftoLNNc,16778
10
- langchain_ollama/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
- langchain_ollama-0.3.5.dist-info/RECORD,,