langchain-ollama 0.3.1__tar.gz → 0.3.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {langchain_ollama-0.3.1 → langchain_ollama-0.3.2}/PKG-INFO +2 -2
- {langchain_ollama-0.3.1 → langchain_ollama-0.3.2}/langchain_ollama/__init__.py +0 -1
- {langchain_ollama-0.3.1 → langchain_ollama-0.3.2}/langchain_ollama/chat_models.py +66 -50
- {langchain_ollama-0.3.1 → langchain_ollama-0.3.2}/langchain_ollama/embeddings.py +7 -7
- {langchain_ollama-0.3.1 → langchain_ollama-0.3.2}/langchain_ollama/llms.py +15 -19
- {langchain_ollama-0.3.1 → langchain_ollama-0.3.2}/pyproject.toml +9 -2
- {langchain_ollama-0.3.1 → langchain_ollama-0.3.2}/tests/integration_tests/chat_models/test_chat_models.py +3 -3
- langchain_ollama-0.3.2/tests/integration_tests/chat_models/test_chat_models_standard.py +42 -0
- {langchain_ollama-0.3.1 → langchain_ollama-0.3.2}/tests/integration_tests/test_embeddings.py +1 -3
- {langchain_ollama-0.3.1 → langchain_ollama-0.3.2}/tests/unit_tests/test_chat_models.py +3 -3
- langchain_ollama-0.3.1/tests/integration_tests/chat_models/test_chat_models_standard.py +0 -29
- {langchain_ollama-0.3.1 → langchain_ollama-0.3.2}/LICENSE +0 -0
- {langchain_ollama-0.3.1 → langchain_ollama-0.3.2}/README.md +0 -0
- {langchain_ollama-0.3.1 → langchain_ollama-0.3.2}/langchain_ollama/py.typed +0 -0
- {langchain_ollama-0.3.1 → langchain_ollama-0.3.2}/tests/__init__.py +0 -0
- {langchain_ollama-0.3.1 → langchain_ollama-0.3.2}/tests/integration_tests/__init__.py +0 -0
- {langchain_ollama-0.3.1 → langchain_ollama-0.3.2}/tests/integration_tests/chat_models/test_chat_models_reasoning.py +0 -0
- {langchain_ollama-0.3.1 → langchain_ollama-0.3.2}/tests/integration_tests/test_compile.py +0 -0
- {langchain_ollama-0.3.1 → langchain_ollama-0.3.2}/tests/integration_tests/test_llms.py +0 -0
- {langchain_ollama-0.3.1 → langchain_ollama-0.3.2}/tests/unit_tests/__init__.py +0 -0
- {langchain_ollama-0.3.1 → langchain_ollama-0.3.2}/tests/unit_tests/test_embeddings.py +0 -0
- {langchain_ollama-0.3.1 → langchain_ollama-0.3.2}/tests/unit_tests/test_imports.py +0 -0
- {langchain_ollama-0.3.1 → langchain_ollama-0.3.2}/tests/unit_tests/test_llms.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: langchain-ollama
|
3
|
-
Version: 0.3.
|
3
|
+
Version: 0.3.2
|
4
4
|
Summary: An integration package connecting Ollama and LangChain
|
5
5
|
License: MIT
|
6
6
|
Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/partners/ollama
|
@@ -8,7 +8,7 @@ Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q
|
|
8
8
|
Project-URL: repository, https://github.com/langchain-ai/langchain
|
9
9
|
Requires-Python: <4.0,>=3.9
|
10
10
|
Requires-Dist: ollama<1,>=0.4.4
|
11
|
-
Requires-Dist: langchain-core<1.0.0,>=0.3.
|
11
|
+
Requires-Dist: langchain-core<1.0.0,>=0.3.52
|
12
12
|
Description-Content-Type: text/markdown
|
13
13
|
|
14
14
|
# langchain-ollama
|
@@ -1,21 +1,14 @@
|
|
1
1
|
"""Ollama chat models."""
|
2
2
|
|
3
3
|
import json
|
4
|
+
from collections.abc import AsyncIterator, Iterator, Mapping, Sequence
|
4
5
|
from operator import itemgetter
|
5
6
|
from typing import (
|
6
7
|
Any,
|
7
|
-
AsyncIterator,
|
8
8
|
Callable,
|
9
|
-
Dict,
|
10
9
|
Final,
|
11
|
-
Iterator,
|
12
|
-
List,
|
13
10
|
Literal,
|
14
|
-
Mapping,
|
15
11
|
Optional,
|
16
|
-
Sequence,
|
17
|
-
Tuple,
|
18
|
-
Type,
|
19
12
|
Union,
|
20
13
|
cast,
|
21
14
|
)
|
@@ -37,6 +30,7 @@ from langchain_core.messages import (
|
|
37
30
|
SystemMessage,
|
38
31
|
ToolCall,
|
39
32
|
ToolMessage,
|
33
|
+
is_data_content_block,
|
40
34
|
)
|
41
35
|
from langchain_core.messages.ai import UsageMetadata
|
42
36
|
from langchain_core.messages.tool import tool_call
|
@@ -153,7 +147,7 @@ def _parse_arguments_from_tool_call(
|
|
153
147
|
|
154
148
|
def _get_tool_calls_from_response(
|
155
149
|
response: Mapping[str, Any],
|
156
|
-
) ->
|
150
|
+
) -> list[ToolCall]:
|
157
151
|
"""Get tool calls from ollama response."""
|
158
152
|
tool_calls = []
|
159
153
|
if "message" in response:
|
@@ -180,6 +174,20 @@ def _lc_tool_call_to_openai_tool_call(tool_call: ToolCall) -> dict:
|
|
180
174
|
}
|
181
175
|
|
182
176
|
|
177
|
+
def _get_image_from_data_content_block(block: dict) -> str:
|
178
|
+
"""Format standard data content block to format expected by Ollama."""
|
179
|
+
if block["type"] == "image":
|
180
|
+
if block["source_type"] == "base64":
|
181
|
+
return block["data"]
|
182
|
+
else:
|
183
|
+
error_message = "Image data only supported through in-line base64 format."
|
184
|
+
raise ValueError(error_message)
|
185
|
+
|
186
|
+
else:
|
187
|
+
error_message = f"Blocks of type {block['type']} not supported."
|
188
|
+
raise ValueError(error_message)
|
189
|
+
|
190
|
+
|
183
191
|
def _is_pydantic_class(obj: Any) -> bool:
|
184
192
|
return isinstance(obj, type) and is_basemodel_subclass(obj)
|
185
193
|
|
@@ -341,7 +349,7 @@ class ChatOllama(BaseChatModel):
|
|
341
349
|
model: str
|
342
350
|
"""Model name to use."""
|
343
351
|
|
344
|
-
extract_reasoning: Optional[Union[bool,
|
352
|
+
extract_reasoning: Optional[Union[bool, tuple[str, str]]] = False
|
345
353
|
"""Whether to extract the reasoning tokens in think blocks.
|
346
354
|
Extracts `chunk.content` to `chunk.additional_kwargs.reasoning_content`.
|
347
355
|
If a tuple is supplied, they are assumed to be the (start, end) tokens.
|
@@ -399,7 +407,7 @@ class ChatOllama(BaseChatModel):
|
|
399
407
|
to a specific number will make the model generate the same text for
|
400
408
|
the same prompt."""
|
401
409
|
|
402
|
-
stop: Optional[
|
410
|
+
stop: Optional[list[str]] = None
|
403
411
|
"""Sets the stop tokens to use."""
|
404
412
|
|
405
413
|
tfs_z: Optional[float] = None
|
@@ -443,10 +451,10 @@ class ChatOllama(BaseChatModel):
|
|
443
451
|
|
444
452
|
def _chat_params(
|
445
453
|
self,
|
446
|
-
messages:
|
447
|
-
stop: Optional[
|
454
|
+
messages: list[BaseMessage],
|
455
|
+
stop: Optional[list[str]] = None,
|
448
456
|
**kwargs: Any,
|
449
|
-
) ->
|
457
|
+
) -> dict[str, Any]:
|
450
458
|
ollama_messages = self._convert_messages_to_ollama_messages(messages)
|
451
459
|
|
452
460
|
if self.stop is not None and stop is not None:
|
@@ -499,13 +507,13 @@ class ChatOllama(BaseChatModel):
|
|
499
507
|
return self
|
500
508
|
|
501
509
|
def _convert_messages_to_ollama_messages(
|
502
|
-
self, messages:
|
510
|
+
self, messages: list[BaseMessage]
|
503
511
|
) -> Sequence[Message]:
|
504
|
-
ollama_messages:
|
512
|
+
ollama_messages: list = []
|
505
513
|
for message in messages:
|
506
514
|
role: Literal["user", "assistant", "system", "tool"]
|
507
515
|
tool_call_id: Optional[str] = None
|
508
|
-
tool_calls: Optional[
|
516
|
+
tool_calls: Optional[list[dict[str, Any]]] = None
|
509
517
|
if isinstance(message, HumanMessage):
|
510
518
|
role = "user"
|
511
519
|
elif isinstance(message, AIMessage):
|
@@ -531,7 +539,7 @@ class ChatOllama(BaseChatModel):
|
|
531
539
|
if isinstance(message.content, str):
|
532
540
|
content = message.content
|
533
541
|
else:
|
534
|
-
for content_part in cast(
|
542
|
+
for content_part in cast(list[dict], message.content):
|
535
543
|
if content_part.get("type") == "text":
|
536
544
|
content += f"\n{content_part['text']}"
|
537
545
|
elif content_part.get("type") == "tool_use":
|
@@ -560,7 +568,9 @@ class ChatOllama(BaseChatModel):
|
|
560
568
|
images.append(image_url_components[1])
|
561
569
|
else:
|
562
570
|
images.append(image_url_components[0])
|
563
|
-
|
571
|
+
elif is_data_content_block(content_part):
|
572
|
+
image = _get_image_from_data_content_block(content_part)
|
573
|
+
images.append(image)
|
564
574
|
else:
|
565
575
|
raise ValueError(
|
566
576
|
"Unsupported message content type. "
|
@@ -583,7 +593,7 @@ class ChatOllama(BaseChatModel):
|
|
583
593
|
|
584
594
|
def _extract_reasoning(
|
585
595
|
self, message_chunk: BaseMessageChunk, is_thinking: bool
|
586
|
-
) ->
|
596
|
+
) -> tuple[BaseMessageChunk, bool]:
|
587
597
|
"""Mutate a message chunk to extract reasoning content."""
|
588
598
|
if not self.extract_reasoning:
|
589
599
|
return message_chunk, is_thinking
|
@@ -605,8 +615,8 @@ class ChatOllama(BaseChatModel):
|
|
605
615
|
|
606
616
|
async def _acreate_chat_stream(
|
607
617
|
self,
|
608
|
-
messages:
|
609
|
-
stop: Optional[
|
618
|
+
messages: list[BaseMessage],
|
619
|
+
stop: Optional[list[str]] = None,
|
610
620
|
**kwargs: Any,
|
611
621
|
) -> AsyncIterator[Union[Mapping[str, Any], str]]:
|
612
622
|
chat_params = self._chat_params(messages, stop, **kwargs)
|
@@ -619,8 +629,8 @@ class ChatOllama(BaseChatModel):
|
|
619
629
|
|
620
630
|
def _create_chat_stream(
|
621
631
|
self,
|
622
|
-
messages:
|
623
|
-
stop: Optional[
|
632
|
+
messages: list[BaseMessage],
|
633
|
+
stop: Optional[list[str]] = None,
|
624
634
|
**kwargs: Any,
|
625
635
|
) -> Iterator[Union[Mapping[str, Any], str]]:
|
626
636
|
chat_params = self._chat_params(messages, stop, **kwargs)
|
@@ -632,8 +642,8 @@ class ChatOllama(BaseChatModel):
|
|
632
642
|
|
633
643
|
def _chat_stream_with_aggregation(
|
634
644
|
self,
|
635
|
-
messages:
|
636
|
-
stop: Optional[
|
645
|
+
messages: list[BaseMessage],
|
646
|
+
stop: Optional[list[str]] = None,
|
637
647
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
638
648
|
verbose: bool = False,
|
639
649
|
**kwargs: Any,
|
@@ -657,8 +667,8 @@ class ChatOllama(BaseChatModel):
|
|
657
667
|
|
658
668
|
async def _achat_stream_with_aggregation(
|
659
669
|
self,
|
660
|
-
messages:
|
661
|
-
stop: Optional[
|
670
|
+
messages: list[BaseMessage],
|
671
|
+
stop: Optional[list[str]] = None,
|
662
672
|
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
663
673
|
verbose: bool = False,
|
664
674
|
**kwargs: Any,
|
@@ -681,7 +691,7 @@ class ChatOllama(BaseChatModel):
|
|
681
691
|
return final_chunk
|
682
692
|
|
683
693
|
def _get_ls_params(
|
684
|
-
self, stop: Optional[
|
694
|
+
self, stop: Optional[list[str]] = None, **kwargs: Any
|
685
695
|
) -> LangSmithParams:
|
686
696
|
"""Get standard params for tracing."""
|
687
697
|
params = self._get_invocation_params(stop=stop, **kwargs)
|
@@ -697,8 +707,8 @@ class ChatOllama(BaseChatModel):
|
|
697
707
|
|
698
708
|
def _generate(
|
699
709
|
self,
|
700
|
-
messages:
|
701
|
-
stop: Optional[
|
710
|
+
messages: list[BaseMessage],
|
711
|
+
stop: Optional[list[str]] = None,
|
702
712
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
703
713
|
**kwargs: Any,
|
704
714
|
) -> ChatResult:
|
@@ -719,13 +729,18 @@ class ChatOllama(BaseChatModel):
|
|
719
729
|
|
720
730
|
def _iterate_over_stream(
|
721
731
|
self,
|
722
|
-
messages:
|
723
|
-
stop: Optional[
|
732
|
+
messages: list[BaseMessage],
|
733
|
+
stop: Optional[list[str]] = None,
|
724
734
|
**kwargs: Any,
|
725
735
|
) -> Iterator[ChatGenerationChunk]:
|
726
736
|
is_thinking = False
|
727
737
|
for stream_resp in self._create_chat_stream(messages, stop, **kwargs):
|
728
738
|
if not isinstance(stream_resp, str):
|
739
|
+
if stream_resp.get("done") is True:
|
740
|
+
generation_info = dict(stream_resp)
|
741
|
+
_ = generation_info.pop("message", None)
|
742
|
+
else:
|
743
|
+
generation_info = None
|
729
744
|
chunk = ChatGenerationChunk(
|
730
745
|
message=AIMessageChunk(
|
731
746
|
content=(
|
@@ -739,9 +754,7 @@ class ChatOllama(BaseChatModel):
|
|
739
754
|
),
|
740
755
|
tool_calls=_get_tool_calls_from_response(stream_resp),
|
741
756
|
),
|
742
|
-
generation_info=
|
743
|
-
dict(stream_resp) if stream_resp.get("done") is True else None
|
744
|
-
),
|
757
|
+
generation_info=generation_info,
|
745
758
|
)
|
746
759
|
if chunk.generation_info and (
|
747
760
|
model := chunk.generation_info.get("model")
|
@@ -756,8 +769,8 @@ class ChatOllama(BaseChatModel):
|
|
756
769
|
|
757
770
|
def _stream(
|
758
771
|
self,
|
759
|
-
messages:
|
760
|
-
stop: Optional[
|
772
|
+
messages: list[BaseMessage],
|
773
|
+
stop: Optional[list[str]] = None,
|
761
774
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
762
775
|
**kwargs: Any,
|
763
776
|
) -> Iterator[ChatGenerationChunk]:
|
@@ -771,13 +784,18 @@ class ChatOllama(BaseChatModel):
|
|
771
784
|
|
772
785
|
async def _aiterate_over_stream(
|
773
786
|
self,
|
774
|
-
messages:
|
775
|
-
stop: Optional[
|
787
|
+
messages: list[BaseMessage],
|
788
|
+
stop: Optional[list[str]] = None,
|
776
789
|
**kwargs: Any,
|
777
790
|
) -> AsyncIterator[ChatGenerationChunk]:
|
778
791
|
is_thinking = False
|
779
792
|
async for stream_resp in self._acreate_chat_stream(messages, stop, **kwargs):
|
780
793
|
if not isinstance(stream_resp, str):
|
794
|
+
if stream_resp.get("done") is True:
|
795
|
+
generation_info = dict(stream_resp)
|
796
|
+
_ = generation_info.pop("message", None)
|
797
|
+
else:
|
798
|
+
generation_info = None
|
781
799
|
chunk = ChatGenerationChunk(
|
782
800
|
message=AIMessageChunk(
|
783
801
|
content=(
|
@@ -791,9 +809,7 @@ class ChatOllama(BaseChatModel):
|
|
791
809
|
),
|
792
810
|
tool_calls=_get_tool_calls_from_response(stream_resp),
|
793
811
|
),
|
794
|
-
generation_info=
|
795
|
-
dict(stream_resp) if stream_resp.get("done") is True else None
|
796
|
-
),
|
812
|
+
generation_info=generation_info,
|
797
813
|
)
|
798
814
|
if chunk.generation_info and (
|
799
815
|
model := chunk.generation_info.get("model")
|
@@ -808,8 +824,8 @@ class ChatOllama(BaseChatModel):
|
|
808
824
|
|
809
825
|
async def _astream(
|
810
826
|
self,
|
811
|
-
messages:
|
812
|
-
stop: Optional[
|
827
|
+
messages: list[BaseMessage],
|
828
|
+
stop: Optional[list[str]] = None,
|
813
829
|
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
814
830
|
**kwargs: Any,
|
815
831
|
) -> AsyncIterator[ChatGenerationChunk]:
|
@@ -823,8 +839,8 @@ class ChatOllama(BaseChatModel):
|
|
823
839
|
|
824
840
|
async def _agenerate(
|
825
841
|
self,
|
826
|
-
messages:
|
827
|
-
stop: Optional[
|
842
|
+
messages: list[BaseMessage],
|
843
|
+
stop: Optional[list[str]] = None,
|
828
844
|
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
829
845
|
**kwargs: Any,
|
830
846
|
) -> ChatResult:
|
@@ -850,7 +866,7 @@ class ChatOllama(BaseChatModel):
|
|
850
866
|
|
851
867
|
def bind_tools(
|
852
868
|
self,
|
853
|
-
tools: Sequence[Union[
|
869
|
+
tools: Sequence[Union[dict[str, Any], type, Callable, BaseTool]],
|
854
870
|
*,
|
855
871
|
tool_choice: Optional[Union[dict, str, Literal["auto", "any"], bool]] = None,
|
856
872
|
**kwargs: Any,
|
@@ -873,12 +889,12 @@ class ChatOllama(BaseChatModel):
|
|
873
889
|
|
874
890
|
def with_structured_output(
|
875
891
|
self,
|
876
|
-
schema: Union[
|
892
|
+
schema: Union[dict, type],
|
877
893
|
*,
|
878
894
|
method: Literal["function_calling", "json_mode", "json_schema"] = "json_schema",
|
879
895
|
include_raw: bool = False,
|
880
896
|
**kwargs: Any,
|
881
|
-
) -> Runnable[LanguageModelInput, Union[
|
897
|
+
) -> Runnable[LanguageModelInput, Union[dict, BaseModel]]:
|
882
898
|
"""Model wrapper that returns outputs formatted to match the given schema.
|
883
899
|
|
884
900
|
Args:
|
@@ -1,6 +1,6 @@
|
|
1
1
|
"""Ollama embeddings models."""
|
2
2
|
|
3
|
-
from typing import Any,
|
3
|
+
from typing import Any, Optional
|
4
4
|
|
5
5
|
from langchain_core.embeddings import Embeddings
|
6
6
|
from ollama import AsyncClient, Client
|
@@ -188,7 +188,7 @@ class OllamaEmbeddings(BaseModel, Embeddings):
|
|
188
188
|
"""The temperature of the model. Increasing the temperature will
|
189
189
|
make the model answer more creatively. (Default: 0.8)"""
|
190
190
|
|
191
|
-
stop: Optional[
|
191
|
+
stop: Optional[list[str]] = None
|
192
192
|
"""Sets the stop tokens to use."""
|
193
193
|
|
194
194
|
tfs_z: Optional[float] = None
|
@@ -211,7 +211,7 @@ class OllamaEmbeddings(BaseModel, Embeddings):
|
|
211
211
|
)
|
212
212
|
|
213
213
|
@property
|
214
|
-
def _default_params(self) ->
|
214
|
+
def _default_params(self) -> dict[str, Any]:
|
215
215
|
"""Get the default parameters for calling Ollama."""
|
216
216
|
return {
|
217
217
|
"mirostat": self.mirostat,
|
@@ -237,18 +237,18 @@ class OllamaEmbeddings(BaseModel, Embeddings):
|
|
237
237
|
self._async_client = AsyncClient(host=self.base_url, **client_kwargs)
|
238
238
|
return self
|
239
239
|
|
240
|
-
def embed_documents(self, texts:
|
240
|
+
def embed_documents(self, texts: list[str]) -> list[list[float]]:
|
241
241
|
"""Embed search docs."""
|
242
242
|
embedded_docs = self._client.embed(
|
243
243
|
self.model, texts, options=self._default_params, keep_alive=self.keep_alive
|
244
244
|
)["embeddings"]
|
245
245
|
return embedded_docs
|
246
246
|
|
247
|
-
def embed_query(self, text: str) ->
|
247
|
+
def embed_query(self, text: str) -> list[float]:
|
248
248
|
"""Embed query text."""
|
249
249
|
return self.embed_documents([text])[0]
|
250
250
|
|
251
|
-
async def aembed_documents(self, texts:
|
251
|
+
async def aembed_documents(self, texts: list[str]) -> list[list[float]]:
|
252
252
|
"""Embed search docs."""
|
253
253
|
embedded_docs = (
|
254
254
|
await self._async_client.embed(
|
@@ -257,6 +257,6 @@ class OllamaEmbeddings(BaseModel, Embeddings):
|
|
257
257
|
)["embeddings"]
|
258
258
|
return embedded_docs
|
259
259
|
|
260
|
-
async def aembed_query(self, text: str) ->
|
260
|
+
async def aembed_query(self, text: str) -> list[float]:
|
261
261
|
"""Embed query text."""
|
262
262
|
return (await self.aembed_documents([text]))[0]
|
@@ -1,13 +1,9 @@
|
|
1
1
|
"""Ollama large language models."""
|
2
2
|
|
3
|
+
from collections.abc import AsyncIterator, Iterator, Mapping
|
3
4
|
from typing import (
|
4
5
|
Any,
|
5
|
-
AsyncIterator,
|
6
|
-
Dict,
|
7
|
-
Iterator,
|
8
|
-
List,
|
9
6
|
Literal,
|
10
|
-
Mapping,
|
11
7
|
Optional,
|
12
8
|
Union,
|
13
9
|
)
|
@@ -89,7 +85,7 @@ class OllamaLLM(BaseLLM):
|
|
89
85
|
to a specific number will make the model generate the same text for
|
90
86
|
the same prompt."""
|
91
87
|
|
92
|
-
stop: Optional[
|
88
|
+
stop: Optional[list[str]] = None
|
93
89
|
"""Sets the stop tokens to use."""
|
94
90
|
|
95
91
|
tfs_z: Optional[float] = None
|
@@ -134,9 +130,9 @@ class OllamaLLM(BaseLLM):
|
|
134
130
|
def _generate_params(
|
135
131
|
self,
|
136
132
|
prompt: str,
|
137
|
-
stop: Optional[
|
133
|
+
stop: Optional[list[str]] = None,
|
138
134
|
**kwargs: Any,
|
139
|
-
) ->
|
135
|
+
) -> dict[str, Any]:
|
140
136
|
if self.stop is not None and stop is not None:
|
141
137
|
raise ValueError("`stop` found in both the input and default params.")
|
142
138
|
elif self.stop is not None:
|
@@ -181,7 +177,7 @@ class OllamaLLM(BaseLLM):
|
|
181
177
|
return "ollama-llm"
|
182
178
|
|
183
179
|
def _get_ls_params(
|
184
|
-
self, stop: Optional[
|
180
|
+
self, stop: Optional[list[str]] = None, **kwargs: Any
|
185
181
|
) -> LangSmithParams:
|
186
182
|
"""Get standard params for tracing."""
|
187
183
|
params = super()._get_ls_params(stop=stop, **kwargs)
|
@@ -200,7 +196,7 @@ class OllamaLLM(BaseLLM):
|
|
200
196
|
async def _acreate_generate_stream(
|
201
197
|
self,
|
202
198
|
prompt: str,
|
203
|
-
stop: Optional[
|
199
|
+
stop: Optional[list[str]] = None,
|
204
200
|
**kwargs: Any,
|
205
201
|
) -> AsyncIterator[Union[Mapping[str, Any], str]]:
|
206
202
|
async for part in await self._async_client.generate(
|
@@ -211,7 +207,7 @@ class OllamaLLM(BaseLLM):
|
|
211
207
|
def _create_generate_stream(
|
212
208
|
self,
|
213
209
|
prompt: str,
|
214
|
-
stop: Optional[
|
210
|
+
stop: Optional[list[str]] = None,
|
215
211
|
**kwargs: Any,
|
216
212
|
) -> Iterator[Union[Mapping[str, Any], str]]:
|
217
213
|
yield from self._client.generate(
|
@@ -221,7 +217,7 @@ class OllamaLLM(BaseLLM):
|
|
221
217
|
async def _astream_with_aggregation(
|
222
218
|
self,
|
223
219
|
prompt: str,
|
224
|
-
stop: Optional[
|
220
|
+
stop: Optional[list[str]] = None,
|
225
221
|
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
226
222
|
verbose: bool = False,
|
227
223
|
**kwargs: Any,
|
@@ -253,7 +249,7 @@ class OllamaLLM(BaseLLM):
|
|
253
249
|
def _stream_with_aggregation(
|
254
250
|
self,
|
255
251
|
prompt: str,
|
256
|
-
stop: Optional[
|
252
|
+
stop: Optional[list[str]] = None,
|
257
253
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
258
254
|
verbose: bool = False,
|
259
255
|
**kwargs: Any,
|
@@ -284,8 +280,8 @@ class OllamaLLM(BaseLLM):
|
|
284
280
|
|
285
281
|
def _generate(
|
286
282
|
self,
|
287
|
-
prompts:
|
288
|
-
stop: Optional[
|
283
|
+
prompts: list[str],
|
284
|
+
stop: Optional[list[str]] = None,
|
289
285
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
290
286
|
**kwargs: Any,
|
291
287
|
) -> LLMResult:
|
@@ -303,8 +299,8 @@ class OllamaLLM(BaseLLM):
|
|
303
299
|
|
304
300
|
async def _agenerate(
|
305
301
|
self,
|
306
|
-
prompts:
|
307
|
-
stop: Optional[
|
302
|
+
prompts: list[str],
|
303
|
+
stop: Optional[list[str]] = None,
|
308
304
|
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
309
305
|
**kwargs: Any,
|
310
306
|
) -> LLMResult:
|
@@ -323,7 +319,7 @@ class OllamaLLM(BaseLLM):
|
|
323
319
|
def _stream(
|
324
320
|
self,
|
325
321
|
prompt: str,
|
326
|
-
stop: Optional[
|
322
|
+
stop: Optional[list[str]] = None,
|
327
323
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
328
324
|
**kwargs: Any,
|
329
325
|
) -> Iterator[GenerationChunk]:
|
@@ -345,7 +341,7 @@ class OllamaLLM(BaseLLM):
|
|
345
341
|
async def _astream(
|
346
342
|
self,
|
347
343
|
prompt: str,
|
348
|
-
stop: Optional[
|
344
|
+
stop: Optional[list[str]] = None,
|
349
345
|
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
350
346
|
**kwargs: Any,
|
351
347
|
) -> AsyncIterator[GenerationChunk]:
|
@@ -9,10 +9,10 @@ authors = []
|
|
9
9
|
requires-python = "<4.0,>=3.9"
|
10
10
|
dependencies = [
|
11
11
|
"ollama<1,>=0.4.4",
|
12
|
-
"langchain-core<1.0.0,>=0.3.
|
12
|
+
"langchain-core<1.0.0,>=0.3.52",
|
13
13
|
]
|
14
14
|
name = "langchain-ollama"
|
15
|
-
version = "0.3.
|
15
|
+
version = "0.3.2"
|
16
16
|
description = "An integration package connecting Ollama and LangChain"
|
17
17
|
readme = "README.md"
|
18
18
|
|
@@ -60,6 +60,9 @@ editable = true
|
|
60
60
|
[tool.mypy]
|
61
61
|
disallow_untyped_defs = "True"
|
62
62
|
|
63
|
+
[tool.ruff]
|
64
|
+
target-version = "py39"
|
65
|
+
|
63
66
|
[tool.ruff.lint]
|
64
67
|
select = [
|
65
68
|
"E",
|
@@ -67,6 +70,10 @@ select = [
|
|
67
70
|
"I",
|
68
71
|
"T201",
|
69
72
|
"D",
|
73
|
+
"UP",
|
74
|
+
]
|
75
|
+
ignore = [
|
76
|
+
"UP007",
|
70
77
|
]
|
71
78
|
|
72
79
|
[tool.ruff.lint.pydocstyle]
|
@@ -1,10 +1,10 @@
|
|
1
1
|
"""Ollama specific chat model integration tests"""
|
2
2
|
|
3
|
-
from typing import
|
3
|
+
from typing import Annotated, Optional
|
4
4
|
|
5
5
|
import pytest
|
6
6
|
from pydantic import BaseModel, Field
|
7
|
-
from typing_extensions import
|
7
|
+
from typing_extensions import TypedDict
|
8
8
|
|
9
9
|
from langchain_ollama import ChatOllama
|
10
10
|
|
@@ -78,7 +78,7 @@ def test_structured_output_deeply_nested(model: str) -> None:
|
|
78
78
|
class Data(BaseModel):
|
79
79
|
"""Extracted data about people."""
|
80
80
|
|
81
|
-
people:
|
81
|
+
people: list[Person]
|
82
82
|
|
83
83
|
chat = llm.with_structured_output(Data) # type: ignore[arg-type]
|
84
84
|
text = (
|
@@ -0,0 +1,42 @@
|
|
1
|
+
"""Test chat model integration using standard integration tests."""
|
2
|
+
|
3
|
+
from langchain_tests.integration_tests import ChatModelIntegrationTests
|
4
|
+
|
5
|
+
from langchain_ollama.chat_models import ChatOllama
|
6
|
+
|
7
|
+
|
8
|
+
class TestChatOllama(ChatModelIntegrationTests):
|
9
|
+
@property
|
10
|
+
def chat_model_class(self) -> type[ChatOllama]:
|
11
|
+
return ChatOllama
|
12
|
+
|
13
|
+
@property
|
14
|
+
def chat_model_params(self) -> dict:
|
15
|
+
return {"model": "llama3.1"}
|
16
|
+
|
17
|
+
@property
|
18
|
+
def supports_json_mode(self) -> bool:
|
19
|
+
return True
|
20
|
+
|
21
|
+
@property
|
22
|
+
def has_tool_choice(self) -> bool:
|
23
|
+
return False
|
24
|
+
|
25
|
+
|
26
|
+
def test_image_model() -> None:
|
27
|
+
class ImageModelTests(ChatModelIntegrationTests):
|
28
|
+
@property
|
29
|
+
def chat_model_class(self) -> type[ChatOllama]:
|
30
|
+
return ChatOllama
|
31
|
+
|
32
|
+
@property
|
33
|
+
def chat_model_params(self) -> dict:
|
34
|
+
return {"model": "gemma3:4b"}
|
35
|
+
|
36
|
+
@property
|
37
|
+
def supports_image_inputs(self) -> bool:
|
38
|
+
return True
|
39
|
+
|
40
|
+
test_instance = ImageModelTests()
|
41
|
+
model = test_instance.chat_model_class(**test_instance.chat_model_params)
|
42
|
+
ImageModelTests().test_image_inputs(model)
|
{langchain_ollama-0.3.1 → langchain_ollama-0.3.2}/tests/integration_tests/test_embeddings.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1
1
|
"""Test Ollama embeddings."""
|
2
2
|
|
3
|
-
from typing import Type
|
4
|
-
|
5
3
|
from langchain_tests.integration_tests import EmbeddingsIntegrationTests
|
6
4
|
|
7
5
|
from langchain_ollama.embeddings import OllamaEmbeddings
|
@@ -9,7 +7,7 @@ from langchain_ollama.embeddings import OllamaEmbeddings
|
|
9
7
|
|
10
8
|
class TestOllamaEmbeddings(EmbeddingsIntegrationTests):
|
11
9
|
@property
|
12
|
-
def embeddings_class(self) ->
|
10
|
+
def embeddings_class(self) -> type[OllamaEmbeddings]:
|
13
11
|
return OllamaEmbeddings
|
14
12
|
|
15
13
|
@property
|
@@ -1,6 +1,6 @@
|
|
1
1
|
"""Test chat model integration."""
|
2
|
+
|
2
3
|
import json
|
3
|
-
from typing import Dict, Type
|
4
4
|
|
5
5
|
from langchain_tests.unit_tests import ChatModelUnitTests
|
6
6
|
|
@@ -9,11 +9,11 @@ from langchain_ollama.chat_models import ChatOllama, _parse_arguments_from_tool_
|
|
9
9
|
|
10
10
|
class TestChatOllama(ChatModelUnitTests):
|
11
11
|
@property
|
12
|
-
def chat_model_class(self) ->
|
12
|
+
def chat_model_class(self) -> type[ChatOllama]:
|
13
13
|
return ChatOllama
|
14
14
|
|
15
15
|
@property
|
16
|
-
def chat_model_params(self) ->
|
16
|
+
def chat_model_params(self) -> dict:
|
17
17
|
return {"model": "llama3-groq-tool-use"}
|
18
18
|
|
19
19
|
|
@@ -1,29 +0,0 @@
|
|
1
|
-
"""Test chat model integration using standard integration tests."""
|
2
|
-
|
3
|
-
from typing import Type
|
4
|
-
|
5
|
-
from langchain_tests.integration_tests import ChatModelIntegrationTests
|
6
|
-
|
7
|
-
from langchain_ollama.chat_models import ChatOllama
|
8
|
-
|
9
|
-
|
10
|
-
class TestChatOllama(ChatModelIntegrationTests):
|
11
|
-
@property
|
12
|
-
def chat_model_class(self) -> Type[ChatOllama]:
|
13
|
-
return ChatOllama
|
14
|
-
|
15
|
-
@property
|
16
|
-
def chat_model_params(self) -> dict:
|
17
|
-
return {"model": "llama3.1"}
|
18
|
-
|
19
|
-
@property
|
20
|
-
def supports_image_inputs(self) -> bool:
|
21
|
-
return True
|
22
|
-
|
23
|
-
@property
|
24
|
-
def supports_json_mode(self) -> bool:
|
25
|
-
return True
|
26
|
-
|
27
|
-
@property
|
28
|
-
def has_tool_choice(self) -> bool:
|
29
|
-
return False
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|