langchain-ollama 0.3.7__tar.gz → 0.3.8__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {langchain_ollama-0.3.7 → langchain_ollama-0.3.8}/PKG-INFO +2 -2
- {langchain_ollama-0.3.7 → langchain_ollama-0.3.8}/langchain_ollama/chat_models.py +4 -2
- {langchain_ollama-0.3.7 → langchain_ollama-0.3.8}/langchain_ollama/llms.py +83 -5
- {langchain_ollama-0.3.7 → langchain_ollama-0.3.8}/pyproject.toml +2 -2
- {langchain_ollama-0.3.7 → langchain_ollama-0.3.8}/LICENSE +0 -0
- {langchain_ollama-0.3.7 → langchain_ollama-0.3.8}/README.md +0 -0
- {langchain_ollama-0.3.7 → langchain_ollama-0.3.8}/langchain_ollama/__init__.py +0 -0
- {langchain_ollama-0.3.7 → langchain_ollama-0.3.8}/langchain_ollama/_utils.py +0 -0
- {langchain_ollama-0.3.7 → langchain_ollama-0.3.8}/langchain_ollama/embeddings.py +0 -0
- {langchain_ollama-0.3.7 → langchain_ollama-0.3.8}/langchain_ollama/py.typed +0 -0
- {langchain_ollama-0.3.7 → langchain_ollama-0.3.8}/tests/__init__.py +0 -0
- {langchain_ollama-0.3.7 → langchain_ollama-0.3.8}/tests/integration_tests/__init__.py +0 -0
- {langchain_ollama-0.3.7 → langchain_ollama-0.3.8}/tests/integration_tests/chat_models/cassettes/test_chat_models_standard/TestChatOllama.test_stream_time.yaml +0 -0
- {langchain_ollama-0.3.7 → langchain_ollama-0.3.8}/tests/integration_tests/chat_models/test_chat_models.py +0 -0
- {langchain_ollama-0.3.7 → langchain_ollama-0.3.8}/tests/integration_tests/chat_models/test_chat_models_reasoning.py +0 -0
- {langchain_ollama-0.3.7 → langchain_ollama-0.3.8}/tests/integration_tests/chat_models/test_chat_models_standard.py +0 -0
- {langchain_ollama-0.3.7 → langchain_ollama-0.3.8}/tests/integration_tests/test_compile.py +0 -0
- {langchain_ollama-0.3.7 → langchain_ollama-0.3.8}/tests/integration_tests/test_embeddings.py +0 -0
- {langchain_ollama-0.3.7 → langchain_ollama-0.3.8}/tests/integration_tests/test_llms.py +0 -0
- {langchain_ollama-0.3.7 → langchain_ollama-0.3.8}/tests/unit_tests/__init__.py +0 -0
- {langchain_ollama-0.3.7 → langchain_ollama-0.3.8}/tests/unit_tests/test_chat_models.py +0 -0
- {langchain_ollama-0.3.7 → langchain_ollama-0.3.8}/tests/unit_tests/test_embeddings.py +0 -0
- {langchain_ollama-0.3.7 → langchain_ollama-0.3.8}/tests/unit_tests/test_imports.py +0 -0
- {langchain_ollama-0.3.7 → langchain_ollama-0.3.8}/tests/unit_tests/test_llms.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: langchain-ollama
|
3
|
-
Version: 0.3.
|
3
|
+
Version: 0.3.8
|
4
4
|
Summary: An integration package connecting Ollama and LangChain
|
5
5
|
License: MIT
|
6
6
|
Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/partners/ollama
|
@@ -8,7 +8,7 @@ Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q
|
|
8
8
|
Project-URL: repository, https://github.com/langchain-ai/langchain
|
9
9
|
Requires-Python: >=3.9
|
10
10
|
Requires-Dist: ollama<1.0.0,>=0.5.3
|
11
|
-
Requires-Dist: langchain-core<1.0.0,>=0.3.
|
11
|
+
Requires-Dist: langchain-core<1.0.0,>=0.3.76
|
12
12
|
Description-Content-Type: text/markdown
|
13
13
|
|
14
14
|
# langchain-ollama
|
@@ -661,8 +661,10 @@ class ChatOllama(BaseChatModel):
|
|
661
661
|
if isinstance(message.content, str):
|
662
662
|
content = message.content
|
663
663
|
else:
|
664
|
-
for content_part in
|
665
|
-
if content_part
|
664
|
+
for content_part in message.content:
|
665
|
+
if isinstance(content_part, str):
|
666
|
+
content += f"\n{content_part}"
|
667
|
+
elif content_part.get("type") == "text":
|
666
668
|
content += f"\n{content_part['text']}"
|
667
669
|
elif content_part.get("type") == "tool_use":
|
668
670
|
continue
|
@@ -24,15 +24,93 @@ from ._utils import validate_model
|
|
24
24
|
|
25
25
|
|
26
26
|
class OllamaLLM(BaseLLM):
|
27
|
-
"""
|
28
|
-
|
29
|
-
|
27
|
+
"""Ollama large language models.
|
28
|
+
|
29
|
+
Setup:
|
30
|
+
Install ``langchain-ollama`` and install/run the Ollama server locally:
|
31
|
+
|
32
|
+
.. code-block:: bash
|
33
|
+
|
34
|
+
pip install -U langchain-ollama
|
35
|
+
# Visit https://ollama.com/download to download and install Ollama
|
36
|
+
# (Linux users): start the server with ``ollama serve``
|
37
|
+
|
38
|
+
Download a model to use:
|
39
|
+
|
40
|
+
.. code-block:: bash
|
41
|
+
|
42
|
+
ollama pull llama3.1
|
43
|
+
|
44
|
+
Key init args — generation params:
|
45
|
+
model: str
|
46
|
+
Name of the Ollama model to use (e.g. ``'llama4'``).
|
47
|
+
temperature: Optional[float]
|
48
|
+
Sampling temperature. Higher values make output more creative.
|
49
|
+
num_predict: Optional[int]
|
50
|
+
Maximum number of tokens to predict.
|
51
|
+
top_k: Optional[int]
|
52
|
+
Limits the next token selection to the K most probable tokens.
|
53
|
+
top_p: Optional[float]
|
54
|
+
Nucleus sampling parameter. Higher values lead to more diverse text.
|
55
|
+
mirostat: Optional[int]
|
56
|
+
Enable Mirostat sampling for controlling perplexity.
|
57
|
+
seed: Optional[int]
|
58
|
+
Random number seed for generation reproducibility.
|
59
|
+
|
60
|
+
Key init args — client params:
|
61
|
+
base_url: Optional[str]
|
62
|
+
Base URL where Ollama server is hosted.
|
63
|
+
keep_alive: Optional[Union[int, str]]
|
64
|
+
How long the model stays loaded into memory.
|
65
|
+
format: Literal["", "json"]
|
66
|
+
Specify the format of the output.
|
67
|
+
|
68
|
+
See full list of supported init args and their descriptions in the params section.
|
69
|
+
|
70
|
+
Instantiate:
|
30
71
|
.. code-block:: python
|
31
72
|
|
32
73
|
from langchain_ollama import OllamaLLM
|
33
74
|
|
34
|
-
|
35
|
-
|
75
|
+
llm = OllamaLLM(
|
76
|
+
model="llama3.1",
|
77
|
+
temperature=0.7,
|
78
|
+
num_predict=256,
|
79
|
+
# base_url="http://localhost:11434",
|
80
|
+
# other params...
|
81
|
+
)
|
82
|
+
|
83
|
+
Invoke:
|
84
|
+
.. code-block:: python
|
85
|
+
|
86
|
+
input_text = "The meaning of life is "
|
87
|
+
response = llm.invoke(input_text)
|
88
|
+
print(response)
|
89
|
+
|
90
|
+
.. code-block:: none
|
91
|
+
|
92
|
+
"a philosophical question that has been contemplated by humans for
|
93
|
+
centuries..."
|
94
|
+
|
95
|
+
Stream:
|
96
|
+
.. code-block:: python
|
97
|
+
|
98
|
+
for chunk in llm.stream(input_text):
|
99
|
+
print(chunk, end="")
|
100
|
+
|
101
|
+
.. code-block:: none
|
102
|
+
|
103
|
+
a philosophical question that has been contemplated by humans for
|
104
|
+
centuries...
|
105
|
+
|
106
|
+
Async:
|
107
|
+
.. code-block:: python
|
108
|
+
|
109
|
+
response = await llm.ainvoke(input_text)
|
110
|
+
|
111
|
+
# stream:
|
112
|
+
# async for chunk in llm.astream(input_text):
|
113
|
+
# print(chunk, end="")
|
36
114
|
|
37
115
|
"""
|
38
116
|
|
@@ -9,10 +9,10 @@ authors = []
|
|
9
9
|
requires-python = ">=3.9"
|
10
10
|
dependencies = [
|
11
11
|
"ollama>=0.5.3,<1.0.0",
|
12
|
-
"langchain-core<1.0.0,>=0.3.
|
12
|
+
"langchain-core<1.0.0,>=0.3.76",
|
13
13
|
]
|
14
14
|
name = "langchain-ollama"
|
15
|
-
version = "0.3.
|
15
|
+
version = "0.3.8"
|
16
16
|
description = "An integration package connecting Ollama and LangChain"
|
17
17
|
readme = "README.md"
|
18
18
|
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{langchain_ollama-0.3.7 → langchain_ollama-0.3.8}/tests/integration_tests/test_embeddings.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|