langchain-ollama 0.3.3__tar.gz → 0.3.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. {langchain_ollama-0.3.3 → langchain_ollama-0.3.5}/PKG-INFO +23 -12
  2. langchain_ollama-0.3.5/README.md +55 -0
  3. langchain_ollama-0.3.5/langchain_ollama/_utils.py +39 -0
  4. {langchain_ollama-0.3.3 → langchain_ollama-0.3.5}/langchain_ollama/chat_models.py +190 -118
  5. {langchain_ollama-0.3.3 → langchain_ollama-0.3.5}/langchain_ollama/embeddings.py +39 -16
  6. {langchain_ollama-0.3.3 → langchain_ollama-0.3.5}/langchain_ollama/llms.py +99 -35
  7. {langchain_ollama-0.3.3 → langchain_ollama-0.3.5}/pyproject.toml +60 -6
  8. langchain_ollama-0.3.5/tests/integration_tests/chat_models/cassettes/test_chat_models_standard/TestChatOllama.test_stream_time.yaml +32 -0
  9. {langchain_ollama-0.3.3 → langchain_ollama-0.3.5}/tests/integration_tests/chat_models/test_chat_models.py +9 -5
  10. langchain_ollama-0.3.5/tests/integration_tests/chat_models/test_chat_models_reasoning.py +252 -0
  11. langchain_ollama-0.3.5/tests/integration_tests/chat_models/test_chat_models_standard.py +180 -0
  12. {langchain_ollama-0.3.3 → langchain_ollama-0.3.5}/tests/integration_tests/test_compile.py +0 -1
  13. {langchain_ollama-0.3.3 → langchain_ollama-0.3.5}/tests/integration_tests/test_embeddings.py +3 -1
  14. langchain_ollama-0.3.5/tests/integration_tests/test_llms.py +152 -0
  15. {langchain_ollama-0.3.3 → langchain_ollama-0.3.5}/tests/unit_tests/test_chat_models.py +22 -3
  16. langchain_ollama-0.3.5/tests/unit_tests/test_embeddings.py +63 -0
  17. langchain_ollama-0.3.5/tests/unit_tests/test_llms.py +71 -0
  18. langchain_ollama-0.3.3/README.md +0 -44
  19. langchain_ollama-0.3.3/tests/integration_tests/chat_models/test_chat_models_reasoning.py +0 -162
  20. langchain_ollama-0.3.3/tests/integration_tests/chat_models/test_chat_models_standard.py +0 -42
  21. langchain_ollama-0.3.3/tests/integration_tests/test_llms.py +0 -66
  22. langchain_ollama-0.3.3/tests/unit_tests/test_embeddings.py +0 -8
  23. langchain_ollama-0.3.3/tests/unit_tests/test_llms.py +0 -28
  24. {langchain_ollama-0.3.3 → langchain_ollama-0.3.5}/LICENSE +0 -0
  25. {langchain_ollama-0.3.3 → langchain_ollama-0.3.5}/langchain_ollama/__init__.py +1 -1
  26. {langchain_ollama-0.3.3 → langchain_ollama-0.3.5}/langchain_ollama/py.typed +0 -0
  27. {langchain_ollama-0.3.3 → langchain_ollama-0.3.5}/tests/__init__.py +0 -0
  28. {langchain_ollama-0.3.3 → langchain_ollama-0.3.5}/tests/integration_tests/__init__.py +0 -0
  29. {langchain_ollama-0.3.3 → langchain_ollama-0.3.5}/tests/unit_tests/__init__.py +0 -0
  30. {langchain_ollama-0.3.3 → langchain_ollama-0.3.5}/tests/unit_tests/test_imports.py +0 -0
@@ -1,14 +1,14 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langchain-ollama
3
- Version: 0.3.3
3
+ Version: 0.3.5
4
4
  Summary: An integration package connecting Ollama and LangChain
5
5
  License: MIT
6
6
  Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/partners/ollama
7
7
  Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-ollama%3D%3D0%22&expanded=true
8
8
  Project-URL: repository, https://github.com/langchain-ai/langchain
9
9
  Requires-Python: >=3.9
10
- Requires-Dist: ollama<1.0.0,>=0.4.8
11
- Requires-Dist: langchain-core<1.0.0,>=0.3.60
10
+ Requires-Dist: ollama<1.0.0,>=0.5.1
11
+ Requires-Dist: langchain-core<1.0.0,>=0.3.69
12
12
  Description-Content-Type: text/markdown
13
13
 
14
14
  # langchain-ollama
@@ -21,37 +21,48 @@ This package contains the LangChain integration with Ollama
21
21
  pip install -U langchain-ollama
22
22
  ```
23
23
 
24
- You will also need to run the Ollama server locally.
25
- You can download it [here](https://ollama.com/download).
24
+ For the package to work, you will need to install and run the Ollama server locally ([download](https://ollama.com/download)).
26
25
 
27
- ## Chat Models
26
+ To run integration tests (`make integration_tests`), you will need the following models installed in your Ollama server:
27
+
28
+ - `llama3.1`
29
+ - `deepseek-r1:1.5b`
30
+
31
+ Install these models by running:
32
+
33
+ ```bash
34
+ ollama pull <name-of-model>
35
+ ```
36
+
37
+ ## [Chat Models](https://python.langchain.com/api_reference/ollama/chat_models/langchain_ollama.chat_models.ChatOllama.html#chatollama)
28
38
 
29
39
  `ChatOllama` class exposes chat models from Ollama.
30
40
 
31
41
  ```python
32
42
  from langchain_ollama import ChatOllama
33
43
 
34
- llm = ChatOllama(model="llama3-groq-tool-use")
44
+ llm = ChatOllama(model="llama3.1")
35
45
  llm.invoke("Sing a ballad of LangChain.")
36
46
  ```
37
47
 
38
- ## Embeddings
48
+ ## [Embeddings](https://python.langchain.com/api_reference/ollama/embeddings/langchain_ollama.embeddings.OllamaEmbeddings.html#ollamaembeddings)
39
49
 
40
50
  `OllamaEmbeddings` class exposes embeddings from Ollama.
41
51
 
42
52
  ```python
43
53
  from langchain_ollama import OllamaEmbeddings
44
54
 
45
- embeddings = OllamaEmbeddings(model="llama3")
55
+ embeddings = OllamaEmbeddings(model="llama3.1")
46
56
  embeddings.embed_query("What is the meaning of life?")
47
57
  ```
48
58
 
49
- ## LLMs
50
- `OllamaLLM` class exposes LLMs from Ollama.
59
+ ## [LLMs](https://python.langchain.com/api_reference/ollama/llms/langchain_ollama.llms.OllamaLLM.html#ollamallm)
60
+
61
+ `OllamaLLM` class exposes traditional LLMs from Ollama.
51
62
 
52
63
  ```python
53
64
  from langchain_ollama import OllamaLLM
54
65
 
55
- llm = OllamaLLM(model="llama3")
66
+ llm = OllamaLLM(model="llama3.1")
56
67
  llm.invoke("The meaning of life is")
57
68
  ```
@@ -0,0 +1,55 @@
1
+ # langchain-ollama
2
+
3
+ This package contains the LangChain integration with Ollama
4
+
5
+ ## Installation
6
+
7
+ ```bash
8
+ pip install -U langchain-ollama
9
+ ```
10
+
11
+ For the package to work, you will need to install and run the Ollama server locally ([download](https://ollama.com/download)).
12
+
13
+ To run integration tests (`make integration_tests`), you will need the following models installed in your Ollama server:
14
+
15
+ - `llama3.1`
16
+ - `deepseek-r1:1.5b`
17
+
18
+ Install these models by running:
19
+
20
+ ```bash
21
+ ollama pull <name-of-model>
22
+ ```
23
+
24
+ ## [Chat Models](https://python.langchain.com/api_reference/ollama/chat_models/langchain_ollama.chat_models.ChatOllama.html#chatollama)
25
+
26
+ `ChatOllama` class exposes chat models from Ollama.
27
+
28
+ ```python
29
+ from langchain_ollama import ChatOllama
30
+
31
+ llm = ChatOllama(model="llama3.1")
32
+ llm.invoke("Sing a ballad of LangChain.")
33
+ ```
34
+
35
+ ## [Embeddings](https://python.langchain.com/api_reference/ollama/embeddings/langchain_ollama.embeddings.OllamaEmbeddings.html#ollamaembeddings)
36
+
37
+ `OllamaEmbeddings` class exposes embeddings from Ollama.
38
+
39
+ ```python
40
+ from langchain_ollama import OllamaEmbeddings
41
+
42
+ embeddings = OllamaEmbeddings(model="llama3.1")
43
+ embeddings.embed_query("What is the meaning of life?")
44
+ ```
45
+
46
+ ## [LLMs](https://python.langchain.com/api_reference/ollama/llms/langchain_ollama.llms.OllamaLLM.html#ollamallm)
47
+
48
+ `OllamaLLM` class exposes traditional LLMs from Ollama.
49
+
50
+ ```python
51
+ from langchain_ollama import OllamaLLM
52
+
53
+ llm = OllamaLLM(model="llama3.1")
54
+ llm.invoke("The meaning of life is")
55
+ ```
@@ -0,0 +1,39 @@
1
+ """Utility functions for validating Ollama models."""
2
+
3
+ from httpx import ConnectError
4
+ from ollama import Client, ResponseError
5
+
6
+
7
+ def validate_model(client: Client, model_name: str) -> None:
8
+ """Validate that a model exists in the Ollama instance.
9
+
10
+ Args:
11
+ client: The Ollama client.
12
+ model_name: The name of the model to validate.
13
+
14
+ Raises:
15
+ ValueError: If the model is not found or if there's a connection issue.
16
+ """
17
+ try:
18
+ response = client.list()
19
+
20
+ model_names: list[str] = [model["model"] for model in response["models"]]
21
+
22
+ if not any(
23
+ model_name == m or m.startswith(f"{model_name}:") for m in model_names
24
+ ):
25
+ msg = (
26
+ f"Model `{model_name}` not found in Ollama. Please pull the "
27
+ f"model (using `ollama pull {model_name}`) or specify a valid "
28
+ f"model name. Available local models: {', '.join(model_names)}"
29
+ )
30
+ raise ValueError(msg)
31
+ except ConnectError as e:
32
+ msg = "Failed to connect to Ollama. Please check that Ollama is downloaded, running and accessible. https://ollama.com/download" # noqa: E501
33
+ raise ValueError(msg) from e
34
+ except ResponseError as e:
35
+ msg = (
36
+ "Received an error from the Ollama API. "
37
+ "Please check your Ollama server logs."
38
+ )
39
+ raise ValueError(msg) from e