langchain-ollama 0.3.0__tar.gz → 0.3.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.2}/PKG-INFO +2 -2
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.2}/langchain_ollama/__init__.py +0 -1
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.2}/langchain_ollama/chat_models.py +74 -50
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.2}/langchain_ollama/embeddings.py +7 -7
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.2}/langchain_ollama/llms.py +21 -19
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.2}/pyproject.toml +9 -2
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.2}/tests/integration_tests/chat_models/test_chat_models.py +3 -3
- langchain_ollama-0.3.2/tests/integration_tests/chat_models/test_chat_models_standard.py +42 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.2}/tests/integration_tests/test_embeddings.py +1 -3
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.2}/tests/unit_tests/test_chat_models.py +3 -3
- langchain_ollama-0.3.0/tests/integration_tests/chat_models/test_chat_models_standard.py +0 -29
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.2}/LICENSE +0 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.2}/README.md +0 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.2}/langchain_ollama/py.typed +0 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.2}/tests/__init__.py +0 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.2}/tests/integration_tests/__init__.py +0 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.2}/tests/integration_tests/chat_models/test_chat_models_reasoning.py +0 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.2}/tests/integration_tests/test_compile.py +0 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.2}/tests/integration_tests/test_llms.py +0 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.2}/tests/unit_tests/__init__.py +0 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.2}/tests/unit_tests/test_embeddings.py +0 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.2}/tests/unit_tests/test_imports.py +0 -0
- {langchain_ollama-0.3.0 → langchain_ollama-0.3.2}/tests/unit_tests/test_llms.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: langchain-ollama
|
3
|
-
Version: 0.3.
|
3
|
+
Version: 0.3.2
|
4
4
|
Summary: An integration package connecting Ollama and LangChain
|
5
5
|
License: MIT
|
6
6
|
Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/partners/ollama
|
@@ -8,7 +8,7 @@ Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q
|
|
8
8
|
Project-URL: repository, https://github.com/langchain-ai/langchain
|
9
9
|
Requires-Python: <4.0,>=3.9
|
10
10
|
Requires-Dist: ollama<1,>=0.4.4
|
11
|
-
Requires-Dist: langchain-core<1.0.0,>=0.3.
|
11
|
+
Requires-Dist: langchain-core<1.0.0,>=0.3.52
|
12
12
|
Description-Content-Type: text/markdown
|
13
13
|
|
14
14
|
# langchain-ollama
|
@@ -1,21 +1,14 @@
|
|
1
1
|
"""Ollama chat models."""
|
2
2
|
|
3
3
|
import json
|
4
|
+
from collections.abc import AsyncIterator, Iterator, Mapping, Sequence
|
4
5
|
from operator import itemgetter
|
5
6
|
from typing import (
|
6
7
|
Any,
|
7
|
-
AsyncIterator,
|
8
8
|
Callable,
|
9
|
-
Dict,
|
10
9
|
Final,
|
11
|
-
Iterator,
|
12
|
-
List,
|
13
10
|
Literal,
|
14
|
-
Mapping,
|
15
11
|
Optional,
|
16
|
-
Sequence,
|
17
|
-
Tuple,
|
18
|
-
Type,
|
19
12
|
Union,
|
20
13
|
cast,
|
21
14
|
)
|
@@ -37,6 +30,7 @@ from langchain_core.messages import (
|
|
37
30
|
SystemMessage,
|
38
31
|
ToolCall,
|
39
32
|
ToolMessage,
|
33
|
+
is_data_content_block,
|
40
34
|
)
|
41
35
|
from langchain_core.messages.ai import UsageMetadata
|
42
36
|
from langchain_core.messages.tool import tool_call
|
@@ -153,7 +147,7 @@ def _parse_arguments_from_tool_call(
|
|
153
147
|
|
154
148
|
def _get_tool_calls_from_response(
|
155
149
|
response: Mapping[str, Any],
|
156
|
-
) ->
|
150
|
+
) -> list[ToolCall]:
|
157
151
|
"""Get tool calls from ollama response."""
|
158
152
|
tool_calls = []
|
159
153
|
if "message" in response:
|
@@ -180,6 +174,20 @@ def _lc_tool_call_to_openai_tool_call(tool_call: ToolCall) -> dict:
|
|
180
174
|
}
|
181
175
|
|
182
176
|
|
177
|
+
def _get_image_from_data_content_block(block: dict) -> str:
|
178
|
+
"""Format standard data content block to format expected by Ollama."""
|
179
|
+
if block["type"] == "image":
|
180
|
+
if block["source_type"] == "base64":
|
181
|
+
return block["data"]
|
182
|
+
else:
|
183
|
+
error_message = "Image data only supported through in-line base64 format."
|
184
|
+
raise ValueError(error_message)
|
185
|
+
|
186
|
+
else:
|
187
|
+
error_message = f"Blocks of type {block['type']} not supported."
|
188
|
+
raise ValueError(error_message)
|
189
|
+
|
190
|
+
|
183
191
|
def _is_pydantic_class(obj: Any) -> bool:
|
184
192
|
return isinstance(obj, type) and is_basemodel_subclass(obj)
|
185
193
|
|
@@ -341,7 +349,7 @@ class ChatOllama(BaseChatModel):
|
|
341
349
|
model: str
|
342
350
|
"""Model name to use."""
|
343
351
|
|
344
|
-
extract_reasoning: Optional[Union[bool,
|
352
|
+
extract_reasoning: Optional[Union[bool, tuple[str, str]]] = False
|
345
353
|
"""Whether to extract the reasoning tokens in think blocks.
|
346
354
|
Extracts `chunk.content` to `chunk.additional_kwargs.reasoning_content`.
|
347
355
|
If a tuple is supplied, they are assumed to be the (start, end) tokens.
|
@@ -399,7 +407,7 @@ class ChatOllama(BaseChatModel):
|
|
399
407
|
to a specific number will make the model generate the same text for
|
400
408
|
the same prompt."""
|
401
409
|
|
402
|
-
stop: Optional[
|
410
|
+
stop: Optional[list[str]] = None
|
403
411
|
"""Sets the stop tokens to use."""
|
404
412
|
|
405
413
|
tfs_z: Optional[float] = None
|
@@ -443,10 +451,10 @@ class ChatOllama(BaseChatModel):
|
|
443
451
|
|
444
452
|
def _chat_params(
|
445
453
|
self,
|
446
|
-
messages:
|
447
|
-
stop: Optional[
|
454
|
+
messages: list[BaseMessage],
|
455
|
+
stop: Optional[list[str]] = None,
|
448
456
|
**kwargs: Any,
|
449
|
-
) ->
|
457
|
+
) -> dict[str, Any]:
|
450
458
|
ollama_messages = self._convert_messages_to_ollama_messages(messages)
|
451
459
|
|
452
460
|
if self.stop is not None and stop is not None:
|
@@ -499,13 +507,13 @@ class ChatOllama(BaseChatModel):
|
|
499
507
|
return self
|
500
508
|
|
501
509
|
def _convert_messages_to_ollama_messages(
|
502
|
-
self, messages:
|
510
|
+
self, messages: list[BaseMessage]
|
503
511
|
) -> Sequence[Message]:
|
504
|
-
ollama_messages:
|
512
|
+
ollama_messages: list = []
|
505
513
|
for message in messages:
|
506
514
|
role: Literal["user", "assistant", "system", "tool"]
|
507
515
|
tool_call_id: Optional[str] = None
|
508
|
-
tool_calls: Optional[
|
516
|
+
tool_calls: Optional[list[dict[str, Any]]] = None
|
509
517
|
if isinstance(message, HumanMessage):
|
510
518
|
role = "user"
|
511
519
|
elif isinstance(message, AIMessage):
|
@@ -531,7 +539,7 @@ class ChatOllama(BaseChatModel):
|
|
531
539
|
if isinstance(message.content, str):
|
532
540
|
content = message.content
|
533
541
|
else:
|
534
|
-
for content_part in cast(
|
542
|
+
for content_part in cast(list[dict], message.content):
|
535
543
|
if content_part.get("type") == "text":
|
536
544
|
content += f"\n{content_part['text']}"
|
537
545
|
elif content_part.get("type") == "tool_use":
|
@@ -560,7 +568,9 @@ class ChatOllama(BaseChatModel):
|
|
560
568
|
images.append(image_url_components[1])
|
561
569
|
else:
|
562
570
|
images.append(image_url_components[0])
|
563
|
-
|
571
|
+
elif is_data_content_block(content_part):
|
572
|
+
image = _get_image_from_data_content_block(content_part)
|
573
|
+
images.append(image)
|
564
574
|
else:
|
565
575
|
raise ValueError(
|
566
576
|
"Unsupported message content type. "
|
@@ -583,7 +593,7 @@ class ChatOllama(BaseChatModel):
|
|
583
593
|
|
584
594
|
def _extract_reasoning(
|
585
595
|
self, message_chunk: BaseMessageChunk, is_thinking: bool
|
586
|
-
) ->
|
596
|
+
) -> tuple[BaseMessageChunk, bool]:
|
587
597
|
"""Mutate a message chunk to extract reasoning content."""
|
588
598
|
if not self.extract_reasoning:
|
589
599
|
return message_chunk, is_thinking
|
@@ -605,8 +615,8 @@ class ChatOllama(BaseChatModel):
|
|
605
615
|
|
606
616
|
async def _acreate_chat_stream(
|
607
617
|
self,
|
608
|
-
messages:
|
609
|
-
stop: Optional[
|
618
|
+
messages: list[BaseMessage],
|
619
|
+
stop: Optional[list[str]] = None,
|
610
620
|
**kwargs: Any,
|
611
621
|
) -> AsyncIterator[Union[Mapping[str, Any], str]]:
|
612
622
|
chat_params = self._chat_params(messages, stop, **kwargs)
|
@@ -619,8 +629,8 @@ class ChatOllama(BaseChatModel):
|
|
619
629
|
|
620
630
|
def _create_chat_stream(
|
621
631
|
self,
|
622
|
-
messages:
|
623
|
-
stop: Optional[
|
632
|
+
messages: list[BaseMessage],
|
633
|
+
stop: Optional[list[str]] = None,
|
624
634
|
**kwargs: Any,
|
625
635
|
) -> Iterator[Union[Mapping[str, Any], str]]:
|
626
636
|
chat_params = self._chat_params(messages, stop, **kwargs)
|
@@ -632,8 +642,8 @@ class ChatOllama(BaseChatModel):
|
|
632
642
|
|
633
643
|
def _chat_stream_with_aggregation(
|
634
644
|
self,
|
635
|
-
messages:
|
636
|
-
stop: Optional[
|
645
|
+
messages: list[BaseMessage],
|
646
|
+
stop: Optional[list[str]] = None,
|
637
647
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
638
648
|
verbose: bool = False,
|
639
649
|
**kwargs: Any,
|
@@ -657,8 +667,8 @@ class ChatOllama(BaseChatModel):
|
|
657
667
|
|
658
668
|
async def _achat_stream_with_aggregation(
|
659
669
|
self,
|
660
|
-
messages:
|
661
|
-
stop: Optional[
|
670
|
+
messages: list[BaseMessage],
|
671
|
+
stop: Optional[list[str]] = None,
|
662
672
|
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
663
673
|
verbose: bool = False,
|
664
674
|
**kwargs: Any,
|
@@ -681,7 +691,7 @@ class ChatOllama(BaseChatModel):
|
|
681
691
|
return final_chunk
|
682
692
|
|
683
693
|
def _get_ls_params(
|
684
|
-
self, stop: Optional[
|
694
|
+
self, stop: Optional[list[str]] = None, **kwargs: Any
|
685
695
|
) -> LangSmithParams:
|
686
696
|
"""Get standard params for tracing."""
|
687
697
|
params = self._get_invocation_params(stop=stop, **kwargs)
|
@@ -697,8 +707,8 @@ class ChatOllama(BaseChatModel):
|
|
697
707
|
|
698
708
|
def _generate(
|
699
709
|
self,
|
700
|
-
messages:
|
701
|
-
stop: Optional[
|
710
|
+
messages: list[BaseMessage],
|
711
|
+
stop: Optional[list[str]] = None,
|
702
712
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
703
713
|
**kwargs: Any,
|
704
714
|
) -> ChatResult:
|
@@ -719,13 +729,18 @@ class ChatOllama(BaseChatModel):
|
|
719
729
|
|
720
730
|
def _iterate_over_stream(
|
721
731
|
self,
|
722
|
-
messages:
|
723
|
-
stop: Optional[
|
732
|
+
messages: list[BaseMessage],
|
733
|
+
stop: Optional[list[str]] = None,
|
724
734
|
**kwargs: Any,
|
725
735
|
) -> Iterator[ChatGenerationChunk]:
|
726
736
|
is_thinking = False
|
727
737
|
for stream_resp in self._create_chat_stream(messages, stop, **kwargs):
|
728
738
|
if not isinstance(stream_resp, str):
|
739
|
+
if stream_resp.get("done") is True:
|
740
|
+
generation_info = dict(stream_resp)
|
741
|
+
_ = generation_info.pop("message", None)
|
742
|
+
else:
|
743
|
+
generation_info = None
|
729
744
|
chunk = ChatGenerationChunk(
|
730
745
|
message=AIMessageChunk(
|
731
746
|
content=(
|
@@ -739,10 +754,12 @@ class ChatOllama(BaseChatModel):
|
|
739
754
|
),
|
740
755
|
tool_calls=_get_tool_calls_from_response(stream_resp),
|
741
756
|
),
|
742
|
-
generation_info=
|
743
|
-
dict(stream_resp) if stream_resp.get("done") is True else None
|
744
|
-
),
|
757
|
+
generation_info=generation_info,
|
745
758
|
)
|
759
|
+
if chunk.generation_info and (
|
760
|
+
model := chunk.generation_info.get("model")
|
761
|
+
):
|
762
|
+
chunk.generation_info["model_name"] = model # backwards compat
|
746
763
|
if self.extract_reasoning:
|
747
764
|
message, is_thinking = self._extract_reasoning(
|
748
765
|
chunk.message, is_thinking
|
@@ -752,8 +769,8 @@ class ChatOllama(BaseChatModel):
|
|
752
769
|
|
753
770
|
def _stream(
|
754
771
|
self,
|
755
|
-
messages:
|
756
|
-
stop: Optional[
|
772
|
+
messages: list[BaseMessage],
|
773
|
+
stop: Optional[list[str]] = None,
|
757
774
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
758
775
|
**kwargs: Any,
|
759
776
|
) -> Iterator[ChatGenerationChunk]:
|
@@ -767,13 +784,18 @@ class ChatOllama(BaseChatModel):
|
|
767
784
|
|
768
785
|
async def _aiterate_over_stream(
|
769
786
|
self,
|
770
|
-
messages:
|
771
|
-
stop: Optional[
|
787
|
+
messages: list[BaseMessage],
|
788
|
+
stop: Optional[list[str]] = None,
|
772
789
|
**kwargs: Any,
|
773
790
|
) -> AsyncIterator[ChatGenerationChunk]:
|
774
791
|
is_thinking = False
|
775
792
|
async for stream_resp in self._acreate_chat_stream(messages, stop, **kwargs):
|
776
793
|
if not isinstance(stream_resp, str):
|
794
|
+
if stream_resp.get("done") is True:
|
795
|
+
generation_info = dict(stream_resp)
|
796
|
+
_ = generation_info.pop("message", None)
|
797
|
+
else:
|
798
|
+
generation_info = None
|
777
799
|
chunk = ChatGenerationChunk(
|
778
800
|
message=AIMessageChunk(
|
779
801
|
content=(
|
@@ -787,10 +809,12 @@ class ChatOllama(BaseChatModel):
|
|
787
809
|
),
|
788
810
|
tool_calls=_get_tool_calls_from_response(stream_resp),
|
789
811
|
),
|
790
|
-
generation_info=
|
791
|
-
dict(stream_resp) if stream_resp.get("done") is True else None
|
792
|
-
),
|
812
|
+
generation_info=generation_info,
|
793
813
|
)
|
814
|
+
if chunk.generation_info and (
|
815
|
+
model := chunk.generation_info.get("model")
|
816
|
+
):
|
817
|
+
chunk.generation_info["model_name"] = model # backwards compat
|
794
818
|
if self.extract_reasoning:
|
795
819
|
message, is_thinking = self._extract_reasoning(
|
796
820
|
chunk.message, is_thinking
|
@@ -800,8 +824,8 @@ class ChatOllama(BaseChatModel):
|
|
800
824
|
|
801
825
|
async def _astream(
|
802
826
|
self,
|
803
|
-
messages:
|
804
|
-
stop: Optional[
|
827
|
+
messages: list[BaseMessage],
|
828
|
+
stop: Optional[list[str]] = None,
|
805
829
|
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
806
830
|
**kwargs: Any,
|
807
831
|
) -> AsyncIterator[ChatGenerationChunk]:
|
@@ -815,8 +839,8 @@ class ChatOllama(BaseChatModel):
|
|
815
839
|
|
816
840
|
async def _agenerate(
|
817
841
|
self,
|
818
|
-
messages:
|
819
|
-
stop: Optional[
|
842
|
+
messages: list[BaseMessage],
|
843
|
+
stop: Optional[list[str]] = None,
|
820
844
|
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
821
845
|
**kwargs: Any,
|
822
846
|
) -> ChatResult:
|
@@ -842,7 +866,7 @@ class ChatOllama(BaseChatModel):
|
|
842
866
|
|
843
867
|
def bind_tools(
|
844
868
|
self,
|
845
|
-
tools: Sequence[Union[
|
869
|
+
tools: Sequence[Union[dict[str, Any], type, Callable, BaseTool]],
|
846
870
|
*,
|
847
871
|
tool_choice: Optional[Union[dict, str, Literal["auto", "any"], bool]] = None,
|
848
872
|
**kwargs: Any,
|
@@ -865,12 +889,12 @@ class ChatOllama(BaseChatModel):
|
|
865
889
|
|
866
890
|
def with_structured_output(
|
867
891
|
self,
|
868
|
-
schema: Union[
|
892
|
+
schema: Union[dict, type],
|
869
893
|
*,
|
870
894
|
method: Literal["function_calling", "json_mode", "json_schema"] = "json_schema",
|
871
895
|
include_raw: bool = False,
|
872
896
|
**kwargs: Any,
|
873
|
-
) -> Runnable[LanguageModelInput, Union[
|
897
|
+
) -> Runnable[LanguageModelInput, Union[dict, BaseModel]]:
|
874
898
|
"""Model wrapper that returns outputs formatted to match the given schema.
|
875
899
|
|
876
900
|
Args:
|
@@ -1,6 +1,6 @@
|
|
1
1
|
"""Ollama embeddings models."""
|
2
2
|
|
3
|
-
from typing import Any,
|
3
|
+
from typing import Any, Optional
|
4
4
|
|
5
5
|
from langchain_core.embeddings import Embeddings
|
6
6
|
from ollama import AsyncClient, Client
|
@@ -188,7 +188,7 @@ class OllamaEmbeddings(BaseModel, Embeddings):
|
|
188
188
|
"""The temperature of the model. Increasing the temperature will
|
189
189
|
make the model answer more creatively. (Default: 0.8)"""
|
190
190
|
|
191
|
-
stop: Optional[
|
191
|
+
stop: Optional[list[str]] = None
|
192
192
|
"""Sets the stop tokens to use."""
|
193
193
|
|
194
194
|
tfs_z: Optional[float] = None
|
@@ -211,7 +211,7 @@ class OllamaEmbeddings(BaseModel, Embeddings):
|
|
211
211
|
)
|
212
212
|
|
213
213
|
@property
|
214
|
-
def _default_params(self) ->
|
214
|
+
def _default_params(self) -> dict[str, Any]:
|
215
215
|
"""Get the default parameters for calling Ollama."""
|
216
216
|
return {
|
217
217
|
"mirostat": self.mirostat,
|
@@ -237,18 +237,18 @@ class OllamaEmbeddings(BaseModel, Embeddings):
|
|
237
237
|
self._async_client = AsyncClient(host=self.base_url, **client_kwargs)
|
238
238
|
return self
|
239
239
|
|
240
|
-
def embed_documents(self, texts:
|
240
|
+
def embed_documents(self, texts: list[str]) -> list[list[float]]:
|
241
241
|
"""Embed search docs."""
|
242
242
|
embedded_docs = self._client.embed(
|
243
243
|
self.model, texts, options=self._default_params, keep_alive=self.keep_alive
|
244
244
|
)["embeddings"]
|
245
245
|
return embedded_docs
|
246
246
|
|
247
|
-
def embed_query(self, text: str) ->
|
247
|
+
def embed_query(self, text: str) -> list[float]:
|
248
248
|
"""Embed query text."""
|
249
249
|
return self.embed_documents([text])[0]
|
250
250
|
|
251
|
-
async def aembed_documents(self, texts:
|
251
|
+
async def aembed_documents(self, texts: list[str]) -> list[list[float]]:
|
252
252
|
"""Embed search docs."""
|
253
253
|
embedded_docs = (
|
254
254
|
await self._async_client.embed(
|
@@ -257,6 +257,6 @@ class OllamaEmbeddings(BaseModel, Embeddings):
|
|
257
257
|
)["embeddings"]
|
258
258
|
return embedded_docs
|
259
259
|
|
260
|
-
async def aembed_query(self, text: str) ->
|
260
|
+
async def aembed_query(self, text: str) -> list[float]:
|
261
261
|
"""Embed query text."""
|
262
262
|
return (await self.aembed_documents([text]))[0]
|
@@ -1,13 +1,9 @@
|
|
1
1
|
"""Ollama large language models."""
|
2
2
|
|
3
|
+
from collections.abc import AsyncIterator, Iterator, Mapping
|
3
4
|
from typing import (
|
4
5
|
Any,
|
5
|
-
AsyncIterator,
|
6
|
-
Dict,
|
7
|
-
Iterator,
|
8
|
-
List,
|
9
6
|
Literal,
|
10
|
-
Mapping,
|
11
7
|
Optional,
|
12
8
|
Union,
|
13
9
|
)
|
@@ -84,7 +80,12 @@ class OllamaLLM(BaseLLM):
|
|
84
80
|
"""The temperature of the model. Increasing the temperature will
|
85
81
|
make the model answer more creatively. (Default: 0.8)"""
|
86
82
|
|
87
|
-
|
83
|
+
seed: Optional[int] = None
|
84
|
+
"""Sets the random number seed to use for generation. Setting this
|
85
|
+
to a specific number will make the model generate the same text for
|
86
|
+
the same prompt."""
|
87
|
+
|
88
|
+
stop: Optional[list[str]] = None
|
88
89
|
"""Sets the stop tokens to use."""
|
89
90
|
|
90
91
|
tfs_z: Optional[float] = None
|
@@ -129,9 +130,9 @@ class OllamaLLM(BaseLLM):
|
|
129
130
|
def _generate_params(
|
130
131
|
self,
|
131
132
|
prompt: str,
|
132
|
-
stop: Optional[
|
133
|
+
stop: Optional[list[str]] = None,
|
133
134
|
**kwargs: Any,
|
134
|
-
) ->
|
135
|
+
) -> dict[str, Any]:
|
135
136
|
if self.stop is not None and stop is not None:
|
136
137
|
raise ValueError("`stop` found in both the input and default params.")
|
137
138
|
elif self.stop is not None:
|
@@ -150,6 +151,7 @@ class OllamaLLM(BaseLLM):
|
|
150
151
|
"repeat_last_n": self.repeat_last_n,
|
151
152
|
"repeat_penalty": self.repeat_penalty,
|
152
153
|
"temperature": self.temperature,
|
154
|
+
"seed": self.seed,
|
153
155
|
"stop": self.stop if stop is None else stop,
|
154
156
|
"tfs_z": self.tfs_z,
|
155
157
|
"top_k": self.top_k,
|
@@ -175,7 +177,7 @@ class OllamaLLM(BaseLLM):
|
|
175
177
|
return "ollama-llm"
|
176
178
|
|
177
179
|
def _get_ls_params(
|
178
|
-
self, stop: Optional[
|
180
|
+
self, stop: Optional[list[str]] = None, **kwargs: Any
|
179
181
|
) -> LangSmithParams:
|
180
182
|
"""Get standard params for tracing."""
|
181
183
|
params = super()._get_ls_params(stop=stop, **kwargs)
|
@@ -194,7 +196,7 @@ class OllamaLLM(BaseLLM):
|
|
194
196
|
async def _acreate_generate_stream(
|
195
197
|
self,
|
196
198
|
prompt: str,
|
197
|
-
stop: Optional[
|
199
|
+
stop: Optional[list[str]] = None,
|
198
200
|
**kwargs: Any,
|
199
201
|
) -> AsyncIterator[Union[Mapping[str, Any], str]]:
|
200
202
|
async for part in await self._async_client.generate(
|
@@ -205,7 +207,7 @@ class OllamaLLM(BaseLLM):
|
|
205
207
|
def _create_generate_stream(
|
206
208
|
self,
|
207
209
|
prompt: str,
|
208
|
-
stop: Optional[
|
210
|
+
stop: Optional[list[str]] = None,
|
209
211
|
**kwargs: Any,
|
210
212
|
) -> Iterator[Union[Mapping[str, Any], str]]:
|
211
213
|
yield from self._client.generate(
|
@@ -215,7 +217,7 @@ class OllamaLLM(BaseLLM):
|
|
215
217
|
async def _astream_with_aggregation(
|
216
218
|
self,
|
217
219
|
prompt: str,
|
218
|
-
stop: Optional[
|
220
|
+
stop: Optional[list[str]] = None,
|
219
221
|
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
220
222
|
verbose: bool = False,
|
221
223
|
**kwargs: Any,
|
@@ -247,7 +249,7 @@ class OllamaLLM(BaseLLM):
|
|
247
249
|
def _stream_with_aggregation(
|
248
250
|
self,
|
249
251
|
prompt: str,
|
250
|
-
stop: Optional[
|
252
|
+
stop: Optional[list[str]] = None,
|
251
253
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
252
254
|
verbose: bool = False,
|
253
255
|
**kwargs: Any,
|
@@ -278,8 +280,8 @@ class OllamaLLM(BaseLLM):
|
|
278
280
|
|
279
281
|
def _generate(
|
280
282
|
self,
|
281
|
-
prompts:
|
282
|
-
stop: Optional[
|
283
|
+
prompts: list[str],
|
284
|
+
stop: Optional[list[str]] = None,
|
283
285
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
284
286
|
**kwargs: Any,
|
285
287
|
) -> LLMResult:
|
@@ -297,8 +299,8 @@ class OllamaLLM(BaseLLM):
|
|
297
299
|
|
298
300
|
async def _agenerate(
|
299
301
|
self,
|
300
|
-
prompts:
|
301
|
-
stop: Optional[
|
302
|
+
prompts: list[str],
|
303
|
+
stop: Optional[list[str]] = None,
|
302
304
|
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
303
305
|
**kwargs: Any,
|
304
306
|
) -> LLMResult:
|
@@ -317,7 +319,7 @@ class OllamaLLM(BaseLLM):
|
|
317
319
|
def _stream(
|
318
320
|
self,
|
319
321
|
prompt: str,
|
320
|
-
stop: Optional[
|
322
|
+
stop: Optional[list[str]] = None,
|
321
323
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
322
324
|
**kwargs: Any,
|
323
325
|
) -> Iterator[GenerationChunk]:
|
@@ -339,7 +341,7 @@ class OllamaLLM(BaseLLM):
|
|
339
341
|
async def _astream(
|
340
342
|
self,
|
341
343
|
prompt: str,
|
342
|
-
stop: Optional[
|
344
|
+
stop: Optional[list[str]] = None,
|
343
345
|
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
344
346
|
**kwargs: Any,
|
345
347
|
) -> AsyncIterator[GenerationChunk]:
|
@@ -9,10 +9,10 @@ authors = []
|
|
9
9
|
requires-python = "<4.0,>=3.9"
|
10
10
|
dependencies = [
|
11
11
|
"ollama<1,>=0.4.4",
|
12
|
-
"langchain-core<1.0.0,>=0.3.
|
12
|
+
"langchain-core<1.0.0,>=0.3.52",
|
13
13
|
]
|
14
14
|
name = "langchain-ollama"
|
15
|
-
version = "0.3.
|
15
|
+
version = "0.3.2"
|
16
16
|
description = "An integration package connecting Ollama and LangChain"
|
17
17
|
readme = "README.md"
|
18
18
|
|
@@ -60,6 +60,9 @@ editable = true
|
|
60
60
|
[tool.mypy]
|
61
61
|
disallow_untyped_defs = "True"
|
62
62
|
|
63
|
+
[tool.ruff]
|
64
|
+
target-version = "py39"
|
65
|
+
|
63
66
|
[tool.ruff.lint]
|
64
67
|
select = [
|
65
68
|
"E",
|
@@ -67,6 +70,10 @@ select = [
|
|
67
70
|
"I",
|
68
71
|
"T201",
|
69
72
|
"D",
|
73
|
+
"UP",
|
74
|
+
]
|
75
|
+
ignore = [
|
76
|
+
"UP007",
|
70
77
|
]
|
71
78
|
|
72
79
|
[tool.ruff.lint.pydocstyle]
|
@@ -1,10 +1,10 @@
|
|
1
1
|
"""Ollama specific chat model integration tests"""
|
2
2
|
|
3
|
-
from typing import
|
3
|
+
from typing import Annotated, Optional
|
4
4
|
|
5
5
|
import pytest
|
6
6
|
from pydantic import BaseModel, Field
|
7
|
-
from typing_extensions import
|
7
|
+
from typing_extensions import TypedDict
|
8
8
|
|
9
9
|
from langchain_ollama import ChatOllama
|
10
10
|
|
@@ -78,7 +78,7 @@ def test_structured_output_deeply_nested(model: str) -> None:
|
|
78
78
|
class Data(BaseModel):
|
79
79
|
"""Extracted data about people."""
|
80
80
|
|
81
|
-
people:
|
81
|
+
people: list[Person]
|
82
82
|
|
83
83
|
chat = llm.with_structured_output(Data) # type: ignore[arg-type]
|
84
84
|
text = (
|
@@ -0,0 +1,42 @@
|
|
1
|
+
"""Test chat model integration using standard integration tests."""
|
2
|
+
|
3
|
+
from langchain_tests.integration_tests import ChatModelIntegrationTests
|
4
|
+
|
5
|
+
from langchain_ollama.chat_models import ChatOllama
|
6
|
+
|
7
|
+
|
8
|
+
class TestChatOllama(ChatModelIntegrationTests):
|
9
|
+
@property
|
10
|
+
def chat_model_class(self) -> type[ChatOllama]:
|
11
|
+
return ChatOllama
|
12
|
+
|
13
|
+
@property
|
14
|
+
def chat_model_params(self) -> dict:
|
15
|
+
return {"model": "llama3.1"}
|
16
|
+
|
17
|
+
@property
|
18
|
+
def supports_json_mode(self) -> bool:
|
19
|
+
return True
|
20
|
+
|
21
|
+
@property
|
22
|
+
def has_tool_choice(self) -> bool:
|
23
|
+
return False
|
24
|
+
|
25
|
+
|
26
|
+
def test_image_model() -> None:
|
27
|
+
class ImageModelTests(ChatModelIntegrationTests):
|
28
|
+
@property
|
29
|
+
def chat_model_class(self) -> type[ChatOllama]:
|
30
|
+
return ChatOllama
|
31
|
+
|
32
|
+
@property
|
33
|
+
def chat_model_params(self) -> dict:
|
34
|
+
return {"model": "gemma3:4b"}
|
35
|
+
|
36
|
+
@property
|
37
|
+
def supports_image_inputs(self) -> bool:
|
38
|
+
return True
|
39
|
+
|
40
|
+
test_instance = ImageModelTests()
|
41
|
+
model = test_instance.chat_model_class(**test_instance.chat_model_params)
|
42
|
+
ImageModelTests().test_image_inputs(model)
|
{langchain_ollama-0.3.0 → langchain_ollama-0.3.2}/tests/integration_tests/test_embeddings.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1
1
|
"""Test Ollama embeddings."""
|
2
2
|
|
3
|
-
from typing import Type
|
4
|
-
|
5
3
|
from langchain_tests.integration_tests import EmbeddingsIntegrationTests
|
6
4
|
|
7
5
|
from langchain_ollama.embeddings import OllamaEmbeddings
|
@@ -9,7 +7,7 @@ from langchain_ollama.embeddings import OllamaEmbeddings
|
|
9
7
|
|
10
8
|
class TestOllamaEmbeddings(EmbeddingsIntegrationTests):
|
11
9
|
@property
|
12
|
-
def embeddings_class(self) ->
|
10
|
+
def embeddings_class(self) -> type[OllamaEmbeddings]:
|
13
11
|
return OllamaEmbeddings
|
14
12
|
|
15
13
|
@property
|
@@ -1,6 +1,6 @@
|
|
1
1
|
"""Test chat model integration."""
|
2
|
+
|
2
3
|
import json
|
3
|
-
from typing import Dict, Type
|
4
4
|
|
5
5
|
from langchain_tests.unit_tests import ChatModelUnitTests
|
6
6
|
|
@@ -9,11 +9,11 @@ from langchain_ollama.chat_models import ChatOllama, _parse_arguments_from_tool_
|
|
9
9
|
|
10
10
|
class TestChatOllama(ChatModelUnitTests):
|
11
11
|
@property
|
12
|
-
def chat_model_class(self) ->
|
12
|
+
def chat_model_class(self) -> type[ChatOllama]:
|
13
13
|
return ChatOllama
|
14
14
|
|
15
15
|
@property
|
16
|
-
def chat_model_params(self) ->
|
16
|
+
def chat_model_params(self) -> dict:
|
17
17
|
return {"model": "llama3-groq-tool-use"}
|
18
18
|
|
19
19
|
|
@@ -1,29 +0,0 @@
|
|
1
|
-
"""Test chat model integration using standard integration tests."""
|
2
|
-
|
3
|
-
from typing import Type
|
4
|
-
|
5
|
-
from langchain_tests.integration_tests import ChatModelIntegrationTests
|
6
|
-
|
7
|
-
from langchain_ollama.chat_models import ChatOllama
|
8
|
-
|
9
|
-
|
10
|
-
class TestChatOllama(ChatModelIntegrationTests):
|
11
|
-
@property
|
12
|
-
def chat_model_class(self) -> Type[ChatOllama]:
|
13
|
-
return ChatOllama
|
14
|
-
|
15
|
-
@property
|
16
|
-
def chat_model_params(self) -> dict:
|
17
|
-
return {"model": "llama3.1"}
|
18
|
-
|
19
|
-
@property
|
20
|
-
def supports_image_inputs(self) -> bool:
|
21
|
-
return True
|
22
|
-
|
23
|
-
@property
|
24
|
-
def supports_json_mode(self) -> bool:
|
25
|
-
return True
|
26
|
-
|
27
|
-
@property
|
28
|
-
def has_tool_choice(self) -> bool:
|
29
|
-
return False
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|