langchain-ollama 0.2.3__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain_ollama/chat_models.py +132 -92
- langchain_ollama/embeddings.py +11 -4
- {langchain_ollama-0.2.3.dist-info → langchain_ollama-0.3.0.dist-info}/METADATA +6 -15
- langchain_ollama-0.3.0.dist-info/RECORD +10 -0
- {langchain_ollama-0.2.3.dist-info → langchain_ollama-0.3.0.dist-info}/WHEEL +1 -1
- langchain_ollama-0.3.0.dist-info/entry_points.txt +4 -0
- langchain_ollama-0.2.3.dist-info/RECORD +0 -9
- {langchain_ollama-0.2.3.dist-info → langchain_ollama-0.3.0.dist-info/licenses}/LICENSE +0 -0
langchain_ollama/chat_models.py
CHANGED
@@ -7,12 +7,14 @@ from typing import (
|
|
7
7
|
AsyncIterator,
|
8
8
|
Callable,
|
9
9
|
Dict,
|
10
|
+
Final,
|
10
11
|
Iterator,
|
11
12
|
List,
|
12
13
|
Literal,
|
13
14
|
Mapping,
|
14
15
|
Optional,
|
15
16
|
Sequence,
|
17
|
+
Tuple,
|
16
18
|
Type,
|
17
19
|
Union,
|
18
20
|
cast,
|
@@ -30,6 +32,7 @@ from langchain_core.messages import (
|
|
30
32
|
AIMessage,
|
31
33
|
AIMessageChunk,
|
32
34
|
BaseMessage,
|
35
|
+
BaseMessageChunk,
|
33
36
|
HumanMessage,
|
34
37
|
SystemMessage,
|
35
38
|
ToolCall,
|
@@ -47,15 +50,19 @@ from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResu
|
|
47
50
|
from langchain_core.runnables import Runnable, RunnableMap, RunnablePassthrough
|
48
51
|
from langchain_core.tools import BaseTool
|
49
52
|
from langchain_core.utils.function_calling import (
|
50
|
-
|
53
|
+
convert_to_json_schema,
|
54
|
+
convert_to_openai_tool,
|
51
55
|
)
|
52
|
-
from langchain_core.utils.function_calling import convert_to_openai_tool
|
53
56
|
from langchain_core.utils.pydantic import TypeBaseModel, is_basemodel_subclass
|
54
57
|
from ollama import AsyncClient, Client, Message, Options
|
55
58
|
from pydantic import BaseModel, PrivateAttr, model_validator
|
56
59
|
from pydantic.json_schema import JsonSchemaValue
|
60
|
+
from pydantic.v1 import BaseModel as BaseModelV1
|
57
61
|
from typing_extensions import Self, is_typeddict
|
58
62
|
|
63
|
+
DEFAULT_THINK_TOKEN_START: Final[str] = "<think>"
|
64
|
+
DEFAULT_THINK_TOKEN_END: Final[str] = "</think>"
|
65
|
+
|
59
66
|
|
60
67
|
def _get_usage_metadata_from_generation_info(
|
61
68
|
generation_info: Optional[Mapping[str, Any]],
|
@@ -124,13 +131,17 @@ def _parse_arguments_from_tool_call(
|
|
124
131
|
if "function" not in raw_tool_call:
|
125
132
|
return None
|
126
133
|
arguments = raw_tool_call["function"]["arguments"]
|
127
|
-
parsed_arguments = {}
|
134
|
+
parsed_arguments: dict = {}
|
128
135
|
if isinstance(arguments, dict):
|
129
136
|
for key, value in arguments.items():
|
130
137
|
if isinstance(value, str):
|
131
|
-
|
138
|
+
parsed_value = _parse_json_string(
|
132
139
|
value, skip=True, raw_tool_call=raw_tool_call
|
133
140
|
)
|
141
|
+
if isinstance(parsed_value, (dict, list)):
|
142
|
+
parsed_arguments[key] = parsed_value
|
143
|
+
else:
|
144
|
+
parsed_arguments[key] = value
|
134
145
|
else:
|
135
146
|
parsed_arguments[key] = value
|
136
147
|
else:
|
@@ -228,7 +239,7 @@ class ChatOllama(BaseChatModel):
|
|
228
239
|
("human", "Return the words Hello World!"),
|
229
240
|
]
|
230
241
|
for chunk in llm.stream(messages):
|
231
|
-
print(chunk)
|
242
|
+
print(chunk.text(), end="")
|
232
243
|
|
233
244
|
|
234
245
|
.. code-block:: python
|
@@ -330,6 +341,13 @@ class ChatOllama(BaseChatModel):
|
|
330
341
|
model: str
|
331
342
|
"""Model name to use."""
|
332
343
|
|
344
|
+
extract_reasoning: Optional[Union[bool, Tuple[str, str]]] = False
|
345
|
+
"""Whether to extract the reasoning tokens in think blocks.
|
346
|
+
Extracts `chunk.content` to `chunk.additional_kwargs.reasoning_content`.
|
347
|
+
If a tuple is supplied, they are assumed to be the (start, end) tokens.
|
348
|
+
If `extract_reasoning=True`, the tokens will default to (<think>, </think>).
|
349
|
+
"""
|
350
|
+
|
333
351
|
mirostat: Optional[int] = None
|
334
352
|
"""Enable Mirostat sampling for controlling perplexity.
|
335
353
|
(default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)"""
|
@@ -563,6 +581,28 @@ class ChatOllama(BaseChatModel):
|
|
563
581
|
|
564
582
|
return ollama_messages
|
565
583
|
|
584
|
+
def _extract_reasoning(
|
585
|
+
self, message_chunk: BaseMessageChunk, is_thinking: bool
|
586
|
+
) -> Tuple[BaseMessageChunk, bool]:
|
587
|
+
"""Mutate a message chunk to extract reasoning content."""
|
588
|
+
if not self.extract_reasoning:
|
589
|
+
return message_chunk, is_thinking
|
590
|
+
elif self.extract_reasoning is True:
|
591
|
+
start_token = DEFAULT_THINK_TOKEN_START
|
592
|
+
end_token = DEFAULT_THINK_TOKEN_END
|
593
|
+
else:
|
594
|
+
start_token, end_token = cast(tuple, self.extract_reasoning)
|
595
|
+
if start_token in message_chunk.content:
|
596
|
+
is_thinking = True
|
597
|
+
content = message_chunk.content
|
598
|
+
if is_thinking:
|
599
|
+
message_chunk.additional_kwargs["reasoning_content"] = content
|
600
|
+
message_chunk.content = ""
|
601
|
+
if end_token in content:
|
602
|
+
is_thinking = False
|
603
|
+
|
604
|
+
return message_chunk, is_thinking
|
605
|
+
|
566
606
|
async def _acreate_chat_stream(
|
567
607
|
self,
|
568
608
|
messages: List[BaseMessage],
|
@@ -599,35 +639,17 @@ class ChatOllama(BaseChatModel):
|
|
599
639
|
**kwargs: Any,
|
600
640
|
) -> ChatGenerationChunk:
|
601
641
|
final_chunk = None
|
602
|
-
for
|
603
|
-
if
|
604
|
-
|
605
|
-
|
606
|
-
|
607
|
-
|
608
|
-
|
609
|
-
|
610
|
-
|
611
|
-
|
612
|
-
usage_metadata=_get_usage_metadata_from_generation_info(
|
613
|
-
stream_resp
|
614
|
-
),
|
615
|
-
tool_calls=_get_tool_calls_from_response(stream_resp),
|
616
|
-
),
|
617
|
-
generation_info=(
|
618
|
-
dict(stream_resp) if stream_resp.get("done") is True else None
|
619
|
-
),
|
642
|
+
for chunk in self._iterate_over_stream(messages, stop, **kwargs):
|
643
|
+
if final_chunk is None:
|
644
|
+
final_chunk = chunk
|
645
|
+
else:
|
646
|
+
final_chunk += chunk
|
647
|
+
if run_manager:
|
648
|
+
run_manager.on_llm_new_token(
|
649
|
+
chunk.text,
|
650
|
+
chunk=chunk,
|
651
|
+
verbose=verbose,
|
620
652
|
)
|
621
|
-
if final_chunk is None:
|
622
|
-
final_chunk = chunk
|
623
|
-
else:
|
624
|
-
final_chunk += chunk
|
625
|
-
if run_manager:
|
626
|
-
run_manager.on_llm_new_token(
|
627
|
-
chunk.text,
|
628
|
-
chunk=chunk,
|
629
|
-
verbose=verbose,
|
630
|
-
)
|
631
653
|
if final_chunk is None:
|
632
654
|
raise ValueError("No data received from Ollama stream.")
|
633
655
|
|
@@ -642,35 +664,17 @@ class ChatOllama(BaseChatModel):
|
|
642
664
|
**kwargs: Any,
|
643
665
|
) -> ChatGenerationChunk:
|
644
666
|
final_chunk = None
|
645
|
-
async for
|
646
|
-
if
|
647
|
-
|
648
|
-
|
649
|
-
|
650
|
-
|
651
|
-
|
652
|
-
|
653
|
-
|
654
|
-
|
655
|
-
usage_metadata=_get_usage_metadata_from_generation_info(
|
656
|
-
stream_resp
|
657
|
-
),
|
658
|
-
tool_calls=_get_tool_calls_from_response(stream_resp),
|
659
|
-
),
|
660
|
-
generation_info=(
|
661
|
-
dict(stream_resp) if stream_resp.get("done") is True else None
|
662
|
-
),
|
667
|
+
async for chunk in self._aiterate_over_stream(messages, stop, **kwargs):
|
668
|
+
if final_chunk is None:
|
669
|
+
final_chunk = chunk
|
670
|
+
else:
|
671
|
+
final_chunk += chunk
|
672
|
+
if run_manager:
|
673
|
+
await run_manager.on_llm_new_token(
|
674
|
+
chunk.text,
|
675
|
+
chunk=chunk,
|
676
|
+
verbose=verbose,
|
663
677
|
)
|
664
|
-
if final_chunk is None:
|
665
|
-
final_chunk = chunk
|
666
|
-
else:
|
667
|
-
final_chunk += chunk
|
668
|
-
if run_manager:
|
669
|
-
await run_manager.on_llm_new_token(
|
670
|
-
chunk.text,
|
671
|
-
chunk=chunk,
|
672
|
-
verbose=verbose,
|
673
|
-
)
|
674
678
|
if final_chunk is None:
|
675
679
|
raise ValueError("No data received from Ollama stream.")
|
676
680
|
|
@@ -707,18 +711,19 @@ class ChatOllama(BaseChatModel):
|
|
707
711
|
content=final_chunk.text,
|
708
712
|
usage_metadata=cast(AIMessageChunk, final_chunk.message).usage_metadata,
|
709
713
|
tool_calls=cast(AIMessageChunk, final_chunk.message).tool_calls,
|
714
|
+
additional_kwargs=final_chunk.message.additional_kwargs,
|
710
715
|
),
|
711
716
|
generation_info=generation_info,
|
712
717
|
)
|
713
718
|
return ChatResult(generations=[chat_generation])
|
714
719
|
|
715
|
-
def
|
720
|
+
def _iterate_over_stream(
|
716
721
|
self,
|
717
722
|
messages: List[BaseMessage],
|
718
723
|
stop: Optional[List[str]] = None,
|
719
|
-
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
720
724
|
**kwargs: Any,
|
721
725
|
) -> Iterator[ChatGenerationChunk]:
|
726
|
+
is_thinking = False
|
722
727
|
for stream_resp in self._create_chat_stream(messages, stop, **kwargs):
|
723
728
|
if not isinstance(stream_resp, str):
|
724
729
|
chunk = ChatGenerationChunk(
|
@@ -738,20 +743,35 @@ class ChatOllama(BaseChatModel):
|
|
738
743
|
dict(stream_resp) if stream_resp.get("done") is True else None
|
739
744
|
),
|
740
745
|
)
|
741
|
-
if
|
742
|
-
|
743
|
-
chunk.
|
744
|
-
verbose=self.verbose,
|
746
|
+
if self.extract_reasoning:
|
747
|
+
message, is_thinking = self._extract_reasoning(
|
748
|
+
chunk.message, is_thinking
|
745
749
|
)
|
750
|
+
chunk.message = message
|
746
751
|
yield chunk
|
747
752
|
|
748
|
-
|
753
|
+
def _stream(
|
754
|
+
self,
|
755
|
+
messages: List[BaseMessage],
|
756
|
+
stop: Optional[List[str]] = None,
|
757
|
+
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
758
|
+
**kwargs: Any,
|
759
|
+
) -> Iterator[ChatGenerationChunk]:
|
760
|
+
for chunk in self._iterate_over_stream(messages, stop, **kwargs):
|
761
|
+
if run_manager:
|
762
|
+
run_manager.on_llm_new_token(
|
763
|
+
chunk.text,
|
764
|
+
verbose=self.verbose,
|
765
|
+
)
|
766
|
+
yield chunk
|
767
|
+
|
768
|
+
async def _aiterate_over_stream(
|
749
769
|
self,
|
750
770
|
messages: List[BaseMessage],
|
751
771
|
stop: Optional[List[str]] = None,
|
752
|
-
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
753
772
|
**kwargs: Any,
|
754
773
|
) -> AsyncIterator[ChatGenerationChunk]:
|
774
|
+
is_thinking = False
|
755
775
|
async for stream_resp in self._acreate_chat_stream(messages, stop, **kwargs):
|
756
776
|
if not isinstance(stream_resp, str):
|
757
777
|
chunk = ChatGenerationChunk(
|
@@ -771,13 +791,28 @@ class ChatOllama(BaseChatModel):
|
|
771
791
|
dict(stream_resp) if stream_resp.get("done") is True else None
|
772
792
|
),
|
773
793
|
)
|
774
|
-
if
|
775
|
-
|
776
|
-
chunk.
|
777
|
-
verbose=self.verbose,
|
794
|
+
if self.extract_reasoning:
|
795
|
+
message, is_thinking = self._extract_reasoning(
|
796
|
+
chunk.message, is_thinking
|
778
797
|
)
|
798
|
+
chunk.message = message
|
779
799
|
yield chunk
|
780
800
|
|
801
|
+
async def _astream(
|
802
|
+
self,
|
803
|
+
messages: List[BaseMessage],
|
804
|
+
stop: Optional[List[str]] = None,
|
805
|
+
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
806
|
+
**kwargs: Any,
|
807
|
+
) -> AsyncIterator[ChatGenerationChunk]:
|
808
|
+
async for chunk in self._aiterate_over_stream(messages, stop, **kwargs):
|
809
|
+
if run_manager:
|
810
|
+
await run_manager.on_llm_new_token(
|
811
|
+
chunk.text,
|
812
|
+
verbose=self.verbose,
|
813
|
+
)
|
814
|
+
yield chunk
|
815
|
+
|
781
816
|
async def _agenerate(
|
782
817
|
self,
|
783
818
|
messages: List[BaseMessage],
|
@@ -794,6 +829,7 @@ class ChatOllama(BaseChatModel):
|
|
794
829
|
content=final_chunk.text,
|
795
830
|
usage_metadata=cast(AIMessageChunk, final_chunk.message).usage_metadata,
|
796
831
|
tool_calls=cast(AIMessageChunk, final_chunk.message).tool_calls,
|
832
|
+
additional_kwargs=final_chunk.message.additional_kwargs,
|
797
833
|
),
|
798
834
|
generation_info=generation_info,
|
799
835
|
)
|
@@ -831,9 +867,7 @@ class ChatOllama(BaseChatModel):
|
|
831
867
|
self,
|
832
868
|
schema: Union[Dict, type],
|
833
869
|
*,
|
834
|
-
method: Literal[
|
835
|
-
"function_calling", "json_mode", "json_schema"
|
836
|
-
] = "function_calling",
|
870
|
+
method: Literal["function_calling", "json_mode", "json_schema"] = "json_schema",
|
837
871
|
include_raw: bool = False,
|
838
872
|
**kwargs: Any,
|
839
873
|
) -> Runnable[LanguageModelInput, Union[Dict, BaseModel]]:
|
@@ -857,10 +891,10 @@ class ChatOllama(BaseChatModel):
|
|
857
891
|
|
858
892
|
method: The method for steering model generation, one of:
|
859
893
|
|
860
|
-
- "function_calling":
|
861
|
-
Uses Ollama's tool-calling API
|
862
894
|
- "json_schema":
|
863
895
|
Uses Ollama's structured output API: https://ollama.com/blog/structured-outputs
|
896
|
+
- "function_calling":
|
897
|
+
Uses Ollama's tool-calling API
|
864
898
|
- "json_mode":
|
865
899
|
Specifies ``format="json"``. Note that if using JSON mode then you
|
866
900
|
must include instructions for formatting the output into the
|
@@ -891,7 +925,11 @@ class ChatOllama(BaseChatModel):
|
|
891
925
|
|
892
926
|
Added support for structured output API via ``format`` parameter.
|
893
927
|
|
894
|
-
..
|
928
|
+
.. versionchanged:: 0.3.0
|
929
|
+
|
930
|
+
Updated default ``method`` to ``"json_schema"``.
|
931
|
+
|
932
|
+
.. dropdown:: Example: schema=Pydantic class, method="json_schema", include_raw=False
|
895
933
|
|
896
934
|
.. code-block:: python
|
897
935
|
|
@@ -924,7 +962,7 @@ class ChatOllama(BaseChatModel):
|
|
924
962
|
# justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
|
925
963
|
# )
|
926
964
|
|
927
|
-
.. dropdown:: Example: schema=Pydantic class, method="
|
965
|
+
.. dropdown:: Example: schema=Pydantic class, method="json_schema", include_raw=True
|
928
966
|
|
929
967
|
.. code-block:: python
|
930
968
|
|
@@ -953,7 +991,7 @@ class ChatOllama(BaseChatModel):
|
|
953
991
|
# 'parsing_error': None
|
954
992
|
# }
|
955
993
|
|
956
|
-
.. dropdown:: Example: schema=Pydantic class, method="
|
994
|
+
.. dropdown:: Example: schema=Pydantic class, method="function_calling", include_raw=False
|
957
995
|
|
958
996
|
.. code-block:: python
|
959
997
|
|
@@ -974,7 +1012,7 @@ class ChatOllama(BaseChatModel):
|
|
974
1012
|
|
975
1013
|
llm = ChatOllama(model="llama3.1", temperature=0)
|
976
1014
|
structured_llm = llm.with_structured_output(
|
977
|
-
AnswerWithJustification, method="
|
1015
|
+
AnswerWithJustification, method="function_calling"
|
978
1016
|
)
|
979
1017
|
|
980
1018
|
structured_llm.invoke(
|
@@ -1076,6 +1114,7 @@ class ChatOllama(BaseChatModel):
|
|
1076
1114
|
# 'parsing_error': None
|
1077
1115
|
# }
|
1078
1116
|
""" # noqa: E501, D301
|
1117
|
+
_ = kwargs.pop("strict", None)
|
1079
1118
|
if kwargs:
|
1080
1119
|
raise ValueError(f"Received unsupported arguments {kwargs}")
|
1081
1120
|
is_pydantic_schema = _is_pydantic_class(schema)
|
@@ -1090,7 +1129,7 @@ class ChatOllama(BaseChatModel):
|
|
1090
1129
|
llm = self.bind_tools(
|
1091
1130
|
[schema],
|
1092
1131
|
tool_choice=tool_name,
|
1093
|
-
|
1132
|
+
ls_structured_output_format={
|
1094
1133
|
"kwargs": {"method": method},
|
1095
1134
|
"schema": formatted_tool,
|
1096
1135
|
},
|
@@ -1107,7 +1146,7 @@ class ChatOllama(BaseChatModel):
|
|
1107
1146
|
elif method == "json_mode":
|
1108
1147
|
llm = self.bind(
|
1109
1148
|
format="json",
|
1110
|
-
|
1149
|
+
ls_structured_output_format={
|
1111
1150
|
"kwargs": {"method": method},
|
1112
1151
|
"schema": schema,
|
1113
1152
|
},
|
@@ -1125,9 +1164,13 @@ class ChatOllama(BaseChatModel):
|
|
1125
1164
|
)
|
1126
1165
|
if is_pydantic_schema:
|
1127
1166
|
schema = cast(TypeBaseModel, schema)
|
1167
|
+
if issubclass(schema, BaseModelV1):
|
1168
|
+
response_format = schema.schema()
|
1169
|
+
else:
|
1170
|
+
response_format = schema.model_json_schema()
|
1128
1171
|
llm = self.bind(
|
1129
|
-
format=
|
1130
|
-
|
1172
|
+
format=response_format,
|
1173
|
+
ls_structured_output_format={
|
1131
1174
|
"kwargs": {"method": method},
|
1132
1175
|
"schema": schema,
|
1133
1176
|
},
|
@@ -1135,20 +1178,17 @@ class ChatOllama(BaseChatModel):
|
|
1135
1178
|
output_parser = PydanticOutputParser(pydantic_object=schema)
|
1136
1179
|
else:
|
1137
1180
|
if is_typeddict(schema):
|
1138
|
-
|
1139
|
-
response_format = convert_any_typed_dicts_to_pydantic(
|
1140
|
-
schema, visited={}
|
1141
|
-
).schema() # type: ignore[attr-defined]
|
1181
|
+
response_format = convert_to_json_schema(schema)
|
1142
1182
|
if "required" not in response_format:
|
1143
1183
|
response_format["required"] = list(
|
1144
1184
|
response_format["properties"].keys()
|
1145
1185
|
)
|
1146
1186
|
else:
|
1147
1187
|
# is JSON schema
|
1148
|
-
response_format = schema
|
1188
|
+
response_format = cast(dict, schema)
|
1149
1189
|
llm = self.bind(
|
1150
1190
|
format=response_format,
|
1151
|
-
|
1191
|
+
ls_structured_output_format={
|
1152
1192
|
"kwargs": {"method": method},
|
1153
1193
|
"schema": response_format,
|
1154
1194
|
},
|
langchain_ollama/embeddings.py
CHANGED
@@ -164,6 +164,11 @@ class OllamaEmbeddings(BaseModel, Embeddings):
|
|
164
164
|
"""The number of GPUs to use. On macOS it defaults to 1 to
|
165
165
|
enable metal support, 0 to disable."""
|
166
166
|
|
167
|
+
keep_alive: Optional[int] = None
|
168
|
+
"""controls how long the model will stay loaded into memory
|
169
|
+
following the request (default: 5m)
|
170
|
+
"""
|
171
|
+
|
167
172
|
num_thread: Optional[int] = None
|
168
173
|
"""Sets the number of threads to use during computation.
|
169
174
|
By default, Ollama will detect this for optimal performance.
|
@@ -235,7 +240,7 @@ class OllamaEmbeddings(BaseModel, Embeddings):
|
|
235
240
|
def embed_documents(self, texts: List[str]) -> List[List[float]]:
|
236
241
|
"""Embed search docs."""
|
237
242
|
embedded_docs = self._client.embed(
|
238
|
-
self.model, texts, options=self._default_params
|
243
|
+
self.model, texts, options=self._default_params, keep_alive=self.keep_alive
|
239
244
|
)["embeddings"]
|
240
245
|
return embedded_docs
|
241
246
|
|
@@ -245,9 +250,11 @@ class OllamaEmbeddings(BaseModel, Embeddings):
|
|
245
250
|
|
246
251
|
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
|
247
252
|
"""Embed search docs."""
|
248
|
-
embedded_docs = (
|
249
|
-
|
250
|
-
|
253
|
+
embedded_docs = (
|
254
|
+
await self._async_client.embed(
|
255
|
+
self.model, texts, keep_alive=self.keep_alive
|
256
|
+
)
|
257
|
+
)["embeddings"]
|
251
258
|
return embedded_docs
|
252
259
|
|
253
260
|
async def aembed_query(self, text: str) -> List[float]:
|
@@ -1,22 +1,14 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: langchain-ollama
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.3.0
|
4
4
|
Summary: An integration package connecting Ollama and LangChain
|
5
|
-
Home-page: https://github.com/langchain-ai/langchain
|
6
5
|
License: MIT
|
7
|
-
Requires-Python: >=3.9,<4.0
|
8
|
-
Classifier: License :: OSI Approved :: MIT License
|
9
|
-
Classifier: Programming Language :: Python :: 3
|
10
|
-
Classifier: Programming Language :: Python :: 3.9
|
11
|
-
Classifier: Programming Language :: Python :: 3.10
|
12
|
-
Classifier: Programming Language :: Python :: 3.11
|
13
|
-
Classifier: Programming Language :: Python :: 3.12
|
14
|
-
Classifier: Programming Language :: Python :: 3.13
|
15
|
-
Requires-Dist: langchain-core (>=0.3.33,<0.4.0)
|
16
|
-
Requires-Dist: ollama (>=0.4.4,<1)
|
17
|
-
Project-URL: Repository, https://github.com/langchain-ai/langchain
|
18
|
-
Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-ollama%3D%3D0%22&expanded=true
|
19
6
|
Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/partners/ollama
|
7
|
+
Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-ollama%3D%3D0%22&expanded=true
|
8
|
+
Project-URL: repository, https://github.com/langchain-ai/langchain
|
9
|
+
Requires-Python: <4.0,>=3.9
|
10
|
+
Requires-Dist: ollama<1,>=0.4.4
|
11
|
+
Requires-Dist: langchain-core<1.0.0,>=0.3.47
|
20
12
|
Description-Content-Type: text/markdown
|
21
13
|
|
22
14
|
# langchain-ollama
|
@@ -63,4 +55,3 @@ from langchain_ollama import OllamaLLM
|
|
63
55
|
llm = OllamaLLM(model="llama3")
|
64
56
|
llm.invoke("The meaning of life is")
|
65
57
|
```
|
66
|
-
|
@@ -0,0 +1,10 @@
|
|
1
|
+
langchain_ollama-0.3.0.dist-info/METADATA,sha256=VcLxoKw-32dqWPuJrjPGq2HwweTu_v3ZEtLNIRNUBRc,1463
|
2
|
+
langchain_ollama-0.3.0.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
|
3
|
+
langchain_ollama-0.3.0.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
|
4
|
+
langchain_ollama-0.3.0.dist-info/licenses/LICENSE,sha256=2btS8uNUDWD_UNjw9ba6ZJt_00aUjEw9CGyK-xIHY8c,1072
|
5
|
+
langchain_ollama/__init__.py,sha256=SxPRrWcPayJpbwhheTtlqCaPp9ffiAAgZMM5Wc1yYpM,634
|
6
|
+
langchain_ollama/chat_models.py,sha256=VMk5GnKiyPQ5TERQDhdSe2uiBOKtCP0GmYlcJs4CC14,49328
|
7
|
+
langchain_ollama/embeddings.py,sha256=d0jSB-T8Awv0razTUA_iD-ZvTma82Nw44YtiVu983u0,8633
|
8
|
+
langchain_ollama/llms.py,sha256=ojnYU0efhN10xhUINu1dCR2Erw79J_mYS6_l45J7Vls,12778
|
9
|
+
langchain_ollama/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
|
+
langchain_ollama-0.3.0.dist-info/RECORD,,
|
@@ -1,9 +0,0 @@
|
|
1
|
-
langchain_ollama/__init__.py,sha256=SxPRrWcPayJpbwhheTtlqCaPp9ffiAAgZMM5Wc1yYpM,634
|
2
|
-
langchain_ollama/chat_models.py,sha256=YDaHyz5t4EfQrMIGJsNFdiPH9LJUOBdrBjlr0qAC8GM,48172
|
3
|
-
langchain_ollama/embeddings.py,sha256=rZLgMvuEVqMRo1kPr9pPPrGVpEOes76cwzkXJRged_4,8397
|
4
|
-
langchain_ollama/llms.py,sha256=ojnYU0efhN10xhUINu1dCR2Erw79J_mYS6_l45J7Vls,12778
|
5
|
-
langchain_ollama/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
6
|
-
langchain_ollama-0.2.3.dist-info/LICENSE,sha256=2btS8uNUDWD_UNjw9ba6ZJt_00aUjEw9CGyK-xIHY8c,1072
|
7
|
-
langchain_ollama-0.2.3.dist-info/METADATA,sha256=BZ3HPeJJiDPaEhUjJIC-3SmIhQuNs6r97LS7EOVoPsE,1876
|
8
|
-
langchain_ollama-0.2.3.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
|
9
|
-
langchain_ollama-0.2.3.dist-info/RECORD,,
|
File without changes
|