langchain-ollama 0.3.0__tar.gz → 0.3.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.1}/PKG-INFO +2 -2
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.1}/langchain_ollama/chat_models.py +8 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.1}/langchain_ollama/llms.py +6 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.1}/pyproject.toml +2 -2
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.1}/LICENSE +0 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.1}/README.md +0 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.1}/langchain_ollama/__init__.py +0 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.1}/langchain_ollama/embeddings.py +0 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.1}/langchain_ollama/py.typed +0 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.1}/tests/__init__.py +0 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.1}/tests/integration_tests/__init__.py +0 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.1}/tests/integration_tests/chat_models/test_chat_models.py +0 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.1}/tests/integration_tests/chat_models/test_chat_models_reasoning.py +0 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.1}/tests/integration_tests/chat_models/test_chat_models_standard.py +0 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.1}/tests/integration_tests/test_compile.py +0 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.1}/tests/integration_tests/test_embeddings.py +0 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.1}/tests/integration_tests/test_llms.py +0 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.1}/tests/unit_tests/__init__.py +0 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.1}/tests/unit_tests/test_chat_models.py +0 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.1}/tests/unit_tests/test_embeddings.py +0 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.1}/tests/unit_tests/test_imports.py +0 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.1}/tests/unit_tests/test_llms.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: langchain-ollama
|
3
|
-
Version: 0.3.
|
3
|
+
Version: 0.3.1
|
4
4
|
Summary: An integration package connecting Ollama and LangChain
|
5
5
|
License: MIT
|
6
6
|
Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/partners/ollama
|
@@ -8,7 +8,7 @@ Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q
|
|
8
8
|
Project-URL: repository, https://github.com/langchain-ai/langchain
|
9
9
|
Requires-Python: <4.0,>=3.9
|
10
10
|
Requires-Dist: ollama<1,>=0.4.4
|
11
|
-
Requires-Dist: langchain-core<1.0.0,>=0.3.
|
11
|
+
Requires-Dist: langchain-core<1.0.0,>=0.3.51
|
12
12
|
Description-Content-Type: text/markdown
|
13
13
|
|
14
14
|
# langchain-ollama
|
@@ -743,6 +743,10 @@ class ChatOllama(BaseChatModel):
|
|
743
743
|
dict(stream_resp) if stream_resp.get("done") is True else None
|
744
744
|
),
|
745
745
|
)
|
746
|
+
if chunk.generation_info and (
|
747
|
+
model := chunk.generation_info.get("model")
|
748
|
+
):
|
749
|
+
chunk.generation_info["model_name"] = model # backwards compat
|
746
750
|
if self.extract_reasoning:
|
747
751
|
message, is_thinking = self._extract_reasoning(
|
748
752
|
chunk.message, is_thinking
|
@@ -791,6 +795,10 @@ class ChatOllama(BaseChatModel):
|
|
791
795
|
dict(stream_resp) if stream_resp.get("done") is True else None
|
792
796
|
),
|
793
797
|
)
|
798
|
+
if chunk.generation_info and (
|
799
|
+
model := chunk.generation_info.get("model")
|
800
|
+
):
|
801
|
+
chunk.generation_info["model_name"] = model # backwards compat
|
794
802
|
if self.extract_reasoning:
|
795
803
|
message, is_thinking = self._extract_reasoning(
|
796
804
|
chunk.message, is_thinking
|
@@ -84,6 +84,11 @@ class OllamaLLM(BaseLLM):
|
|
84
84
|
"""The temperature of the model. Increasing the temperature will
|
85
85
|
make the model answer more creatively. (Default: 0.8)"""
|
86
86
|
|
87
|
+
seed: Optional[int] = None
|
88
|
+
"""Sets the random number seed to use for generation. Setting this
|
89
|
+
to a specific number will make the model generate the same text for
|
90
|
+
the same prompt."""
|
91
|
+
|
87
92
|
stop: Optional[List[str]] = None
|
88
93
|
"""Sets the stop tokens to use."""
|
89
94
|
|
@@ -150,6 +155,7 @@ class OllamaLLM(BaseLLM):
|
|
150
155
|
"repeat_last_n": self.repeat_last_n,
|
151
156
|
"repeat_penalty": self.repeat_penalty,
|
152
157
|
"temperature": self.temperature,
|
158
|
+
"seed": self.seed,
|
153
159
|
"stop": self.stop if stop is None else stop,
|
154
160
|
"tfs_z": self.tfs_z,
|
155
161
|
"top_k": self.top_k,
|
@@ -9,10 +9,10 @@ authors = []
|
|
9
9
|
requires-python = "<4.0,>=3.9"
|
10
10
|
dependencies = [
|
11
11
|
"ollama<1,>=0.4.4",
|
12
|
-
"langchain-core<1.0.0,>=0.3.
|
12
|
+
"langchain-core<1.0.0,>=0.3.51",
|
13
13
|
]
|
14
14
|
name = "langchain-ollama"
|
15
|
-
version = "0.3.
|
15
|
+
version = "0.3.1"
|
16
16
|
description = "An integration package connecting Ollama and LangChain"
|
17
17
|
readme = "README.md"
|
18
18
|
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{langchain_ollama-0.3.0 → langchain_ollama-0.3.1}/tests/integration_tests/test_embeddings.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|