langchain-ollama 0.3.1__py3-none-any.whl → 0.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,7 +3,6 @@
3
3
  It provides infrastructure for interacting with the Ollama service.
4
4
  """
5
5
 
6
-
7
6
  from importlib import metadata
8
7
 
9
8
  from langchain_ollama.chat_models import ChatOllama
@@ -1,21 +1,14 @@
1
1
  """Ollama chat models."""
2
2
 
3
3
  import json
4
+ from collections.abc import AsyncIterator, Iterator, Mapping, Sequence
4
5
  from operator import itemgetter
5
6
  from typing import (
6
7
  Any,
7
- AsyncIterator,
8
8
  Callable,
9
- Dict,
10
9
  Final,
11
- Iterator,
12
- List,
13
10
  Literal,
14
- Mapping,
15
11
  Optional,
16
- Sequence,
17
- Tuple,
18
- Type,
19
12
  Union,
20
13
  cast,
21
14
  )
@@ -33,10 +26,12 @@ from langchain_core.messages import (
33
26
  AIMessageChunk,
34
27
  BaseMessage,
35
28
  BaseMessageChunk,
29
+ ChatMessage,
36
30
  HumanMessage,
37
31
  SystemMessage,
38
32
  ToolCall,
39
33
  ToolMessage,
34
+ is_data_content_block,
40
35
  )
41
36
  from langchain_core.messages.ai import UsageMetadata
42
37
  from langchain_core.messages.tool import tool_call
@@ -153,7 +148,7 @@ def _parse_arguments_from_tool_call(
153
148
 
154
149
  def _get_tool_calls_from_response(
155
150
  response: Mapping[str, Any],
156
- ) -> List[ToolCall]:
151
+ ) -> list[ToolCall]:
157
152
  """Get tool calls from ollama response."""
158
153
  tool_calls = []
159
154
  if "message" in response:
@@ -180,6 +175,20 @@ def _lc_tool_call_to_openai_tool_call(tool_call: ToolCall) -> dict:
180
175
  }
181
176
 
182
177
 
178
+ def _get_image_from_data_content_block(block: dict) -> str:
179
+ """Format standard data content block to format expected by Ollama."""
180
+ if block["type"] == "image":
181
+ if block["source_type"] == "base64":
182
+ return block["data"]
183
+ else:
184
+ error_message = "Image data only supported through in-line base64 format."
185
+ raise ValueError(error_message)
186
+
187
+ else:
188
+ error_message = f"Blocks of type {block['type']} not supported."
189
+ raise ValueError(error_message)
190
+
191
+
183
192
  def _is_pydantic_class(obj: Any) -> bool:
184
193
  return isinstance(obj, type) and is_basemodel_subclass(obj)
185
194
 
@@ -341,7 +350,7 @@ class ChatOllama(BaseChatModel):
341
350
  model: str
342
351
  """Model name to use."""
343
352
 
344
- extract_reasoning: Optional[Union[bool, Tuple[str, str]]] = False
353
+ extract_reasoning: Optional[Union[bool, tuple[str, str]]] = False
345
354
  """Whether to extract the reasoning tokens in think blocks.
346
355
  Extracts `chunk.content` to `chunk.additional_kwargs.reasoning_content`.
347
356
  If a tuple is supplied, they are assumed to be the (start, end) tokens.
@@ -399,7 +408,7 @@ class ChatOllama(BaseChatModel):
399
408
  to a specific number will make the model generate the same text for
400
409
  the same prompt."""
401
410
 
402
- stop: Optional[List[str]] = None
411
+ stop: Optional[list[str]] = None
403
412
  """Sets the stop tokens to use."""
404
413
 
405
414
  tfs_z: Optional[float] = None
@@ -427,8 +436,22 @@ class ChatOllama(BaseChatModel):
427
436
  """Base url the model is hosted under."""
428
437
 
429
438
  client_kwargs: Optional[dict] = {}
430
- """Additional kwargs to pass to the httpx Client.
431
- For a full list of the params, see [this link](https://pydoc.dev/httpx/latest/httpx.Client.html)
439
+ """Additional kwargs to pass to the httpx clients.
440
+ These arguments are passed to both synchronous and async clients.
441
+ Use sync_client_kwargs and async_client_kwargs to pass different arguments
442
+ to synchronous and asynchronous clients.
443
+ """
444
+
445
+ async_client_kwargs: Optional[dict] = {}
446
+ """Additional kwargs to merge with client_kwargs before
447
+ passing to the httpx AsyncClient.
448
+ For a full list of the params, see [this link](https://www.python-httpx.org/api/#asyncclient)
449
+ """
450
+
451
+ sync_client_kwargs: Optional[dict] = {}
452
+ """Additional kwargs to merge with client_kwargs before
453
+ passing to the httpx Client.
454
+ For a full list of the params, see [this link](https://www.python-httpx.org/api/#client)
432
455
  """
433
456
 
434
457
  _client: Client = PrivateAttr(default=None) # type: ignore
@@ -443,10 +466,10 @@ class ChatOllama(BaseChatModel):
443
466
 
444
467
  def _chat_params(
445
468
  self,
446
- messages: List[BaseMessage],
447
- stop: Optional[List[str]] = None,
469
+ messages: list[BaseMessage],
470
+ stop: Optional[list[str]] = None,
448
471
  **kwargs: Any,
449
- ) -> Dict[str, Any]:
472
+ ) -> dict[str, Any]:
450
473
  ollama_messages = self._convert_messages_to_ollama_messages(messages)
451
474
 
452
475
  if self.stop is not None and stop is not None:
@@ -494,18 +517,27 @@ class ChatOllama(BaseChatModel):
494
517
  def _set_clients(self) -> Self:
495
518
  """Set clients to use for ollama."""
496
519
  client_kwargs = self.client_kwargs or {}
497
- self._client = Client(host=self.base_url, **client_kwargs)
498
- self._async_client = AsyncClient(host=self.base_url, **client_kwargs)
520
+
521
+ sync_client_kwargs = client_kwargs
522
+ if self.sync_client_kwargs:
523
+ sync_client_kwargs = {**sync_client_kwargs, **self.sync_client_kwargs}
524
+
525
+ async_client_kwargs = client_kwargs
526
+ if self.async_client_kwargs:
527
+ async_client_kwargs = {**async_client_kwargs, **self.async_client_kwargs}
528
+
529
+ self._client = Client(host=self.base_url, **sync_client_kwargs)
530
+ self._async_client = AsyncClient(host=self.base_url, **async_client_kwargs)
499
531
  return self
500
532
 
501
533
  def _convert_messages_to_ollama_messages(
502
- self, messages: List[BaseMessage]
534
+ self, messages: list[BaseMessage]
503
535
  ) -> Sequence[Message]:
504
- ollama_messages: List = []
536
+ ollama_messages: list = []
505
537
  for message in messages:
506
- role: Literal["user", "assistant", "system", "tool"]
538
+ role: str
507
539
  tool_call_id: Optional[str] = None
508
- tool_calls: Optional[List[Dict[str, Any]]] = None
540
+ tool_calls: Optional[list[dict[str, Any]]] = None
509
541
  if isinstance(message, HumanMessage):
510
542
  role = "user"
511
543
  elif isinstance(message, AIMessage):
@@ -520,6 +552,8 @@ class ChatOllama(BaseChatModel):
520
552
  )
521
553
  elif isinstance(message, SystemMessage):
522
554
  role = "system"
555
+ elif isinstance(message, ChatMessage):
556
+ role = message.role
523
557
  elif isinstance(message, ToolMessage):
524
558
  role = "tool"
525
559
  tool_call_id = message.tool_call_id
@@ -531,7 +565,7 @@ class ChatOllama(BaseChatModel):
531
565
  if isinstance(message.content, str):
532
566
  content = message.content
533
567
  else:
534
- for content_part in cast(List[Dict], message.content):
568
+ for content_part in cast(list[dict], message.content):
535
569
  if content_part.get("type") == "text":
536
570
  content += f"\n{content_part['text']}"
537
571
  elif content_part.get("type") == "tool_use":
@@ -560,7 +594,9 @@ class ChatOllama(BaseChatModel):
560
594
  images.append(image_url_components[1])
561
595
  else:
562
596
  images.append(image_url_components[0])
563
-
597
+ elif is_data_content_block(content_part):
598
+ image = _get_image_from_data_content_block(content_part)
599
+ images.append(image)
564
600
  else:
565
601
  raise ValueError(
566
602
  "Unsupported message content type. "
@@ -583,7 +619,7 @@ class ChatOllama(BaseChatModel):
583
619
 
584
620
  def _extract_reasoning(
585
621
  self, message_chunk: BaseMessageChunk, is_thinking: bool
586
- ) -> Tuple[BaseMessageChunk, bool]:
622
+ ) -> tuple[BaseMessageChunk, bool]:
587
623
  """Mutate a message chunk to extract reasoning content."""
588
624
  if not self.extract_reasoning:
589
625
  return message_chunk, is_thinking
@@ -605,8 +641,8 @@ class ChatOllama(BaseChatModel):
605
641
 
606
642
  async def _acreate_chat_stream(
607
643
  self,
608
- messages: List[BaseMessage],
609
- stop: Optional[List[str]] = None,
644
+ messages: list[BaseMessage],
645
+ stop: Optional[list[str]] = None,
610
646
  **kwargs: Any,
611
647
  ) -> AsyncIterator[Union[Mapping[str, Any], str]]:
612
648
  chat_params = self._chat_params(messages, stop, **kwargs)
@@ -619,8 +655,8 @@ class ChatOllama(BaseChatModel):
619
655
 
620
656
  def _create_chat_stream(
621
657
  self,
622
- messages: List[BaseMessage],
623
- stop: Optional[List[str]] = None,
658
+ messages: list[BaseMessage],
659
+ stop: Optional[list[str]] = None,
624
660
  **kwargs: Any,
625
661
  ) -> Iterator[Union[Mapping[str, Any], str]]:
626
662
  chat_params = self._chat_params(messages, stop, **kwargs)
@@ -632,8 +668,8 @@ class ChatOllama(BaseChatModel):
632
668
 
633
669
  def _chat_stream_with_aggregation(
634
670
  self,
635
- messages: List[BaseMessage],
636
- stop: Optional[List[str]] = None,
671
+ messages: list[BaseMessage],
672
+ stop: Optional[list[str]] = None,
637
673
  run_manager: Optional[CallbackManagerForLLMRun] = None,
638
674
  verbose: bool = False,
639
675
  **kwargs: Any,
@@ -657,8 +693,8 @@ class ChatOllama(BaseChatModel):
657
693
 
658
694
  async def _achat_stream_with_aggregation(
659
695
  self,
660
- messages: List[BaseMessage],
661
- stop: Optional[List[str]] = None,
696
+ messages: list[BaseMessage],
697
+ stop: Optional[list[str]] = None,
662
698
  run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
663
699
  verbose: bool = False,
664
700
  **kwargs: Any,
@@ -681,7 +717,7 @@ class ChatOllama(BaseChatModel):
681
717
  return final_chunk
682
718
 
683
719
  def _get_ls_params(
684
- self, stop: Optional[List[str]] = None, **kwargs: Any
720
+ self, stop: Optional[list[str]] = None, **kwargs: Any
685
721
  ) -> LangSmithParams:
686
722
  """Get standard params for tracing."""
687
723
  params = self._get_invocation_params(stop=stop, **kwargs)
@@ -697,8 +733,8 @@ class ChatOllama(BaseChatModel):
697
733
 
698
734
  def _generate(
699
735
  self,
700
- messages: List[BaseMessage],
701
- stop: Optional[List[str]] = None,
736
+ messages: list[BaseMessage],
737
+ stop: Optional[list[str]] = None,
702
738
  run_manager: Optional[CallbackManagerForLLMRun] = None,
703
739
  **kwargs: Any,
704
740
  ) -> ChatResult:
@@ -719,13 +755,18 @@ class ChatOllama(BaseChatModel):
719
755
 
720
756
  def _iterate_over_stream(
721
757
  self,
722
- messages: List[BaseMessage],
723
- stop: Optional[List[str]] = None,
758
+ messages: list[BaseMessage],
759
+ stop: Optional[list[str]] = None,
724
760
  **kwargs: Any,
725
761
  ) -> Iterator[ChatGenerationChunk]:
726
762
  is_thinking = False
727
763
  for stream_resp in self._create_chat_stream(messages, stop, **kwargs):
728
764
  if not isinstance(stream_resp, str):
765
+ if stream_resp.get("done") is True:
766
+ generation_info = dict(stream_resp)
767
+ _ = generation_info.pop("message", None)
768
+ else:
769
+ generation_info = None
729
770
  chunk = ChatGenerationChunk(
730
771
  message=AIMessageChunk(
731
772
  content=(
@@ -739,9 +780,7 @@ class ChatOllama(BaseChatModel):
739
780
  ),
740
781
  tool_calls=_get_tool_calls_from_response(stream_resp),
741
782
  ),
742
- generation_info=(
743
- dict(stream_resp) if stream_resp.get("done") is True else None
744
- ),
783
+ generation_info=generation_info,
745
784
  )
746
785
  if chunk.generation_info and (
747
786
  model := chunk.generation_info.get("model")
@@ -756,8 +795,8 @@ class ChatOllama(BaseChatModel):
756
795
 
757
796
  def _stream(
758
797
  self,
759
- messages: List[BaseMessage],
760
- stop: Optional[List[str]] = None,
798
+ messages: list[BaseMessage],
799
+ stop: Optional[list[str]] = None,
761
800
  run_manager: Optional[CallbackManagerForLLMRun] = None,
762
801
  **kwargs: Any,
763
802
  ) -> Iterator[ChatGenerationChunk]:
@@ -771,13 +810,18 @@ class ChatOllama(BaseChatModel):
771
810
 
772
811
  async def _aiterate_over_stream(
773
812
  self,
774
- messages: List[BaseMessage],
775
- stop: Optional[List[str]] = None,
813
+ messages: list[BaseMessage],
814
+ stop: Optional[list[str]] = None,
776
815
  **kwargs: Any,
777
816
  ) -> AsyncIterator[ChatGenerationChunk]:
778
817
  is_thinking = False
779
818
  async for stream_resp in self._acreate_chat_stream(messages, stop, **kwargs):
780
819
  if not isinstance(stream_resp, str):
820
+ if stream_resp.get("done") is True:
821
+ generation_info = dict(stream_resp)
822
+ _ = generation_info.pop("message", None)
823
+ else:
824
+ generation_info = None
781
825
  chunk = ChatGenerationChunk(
782
826
  message=AIMessageChunk(
783
827
  content=(
@@ -791,9 +835,7 @@ class ChatOllama(BaseChatModel):
791
835
  ),
792
836
  tool_calls=_get_tool_calls_from_response(stream_resp),
793
837
  ),
794
- generation_info=(
795
- dict(stream_resp) if stream_resp.get("done") is True else None
796
- ),
838
+ generation_info=generation_info,
797
839
  )
798
840
  if chunk.generation_info and (
799
841
  model := chunk.generation_info.get("model")
@@ -808,8 +850,8 @@ class ChatOllama(BaseChatModel):
808
850
 
809
851
  async def _astream(
810
852
  self,
811
- messages: List[BaseMessage],
812
- stop: Optional[List[str]] = None,
853
+ messages: list[BaseMessage],
854
+ stop: Optional[list[str]] = None,
813
855
  run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
814
856
  **kwargs: Any,
815
857
  ) -> AsyncIterator[ChatGenerationChunk]:
@@ -823,8 +865,8 @@ class ChatOllama(BaseChatModel):
823
865
 
824
866
  async def _agenerate(
825
867
  self,
826
- messages: List[BaseMessage],
827
- stop: Optional[List[str]] = None,
868
+ messages: list[BaseMessage],
869
+ stop: Optional[list[str]] = None,
828
870
  run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
829
871
  **kwargs: Any,
830
872
  ) -> ChatResult:
@@ -850,7 +892,7 @@ class ChatOllama(BaseChatModel):
850
892
 
851
893
  def bind_tools(
852
894
  self,
853
- tools: Sequence[Union[Dict[str, Any], Type, Callable, BaseTool]],
895
+ tools: Sequence[Union[dict[str, Any], type, Callable, BaseTool]],
854
896
  *,
855
897
  tool_choice: Optional[Union[dict, str, Literal["auto", "any"], bool]] = None,
856
898
  **kwargs: Any,
@@ -873,12 +915,12 @@ class ChatOllama(BaseChatModel):
873
915
 
874
916
  def with_structured_output(
875
917
  self,
876
- schema: Union[Dict, type],
918
+ schema: Union[dict, type],
877
919
  *,
878
920
  method: Literal["function_calling", "json_mode", "json_schema"] = "json_schema",
879
921
  include_raw: bool = False,
880
922
  **kwargs: Any,
881
- ) -> Runnable[LanguageModelInput, Union[Dict, BaseModel]]:
923
+ ) -> Runnable[LanguageModelInput, Union[dict, BaseModel]]:
882
924
  """Model wrapper that returns outputs formatted to match the given schema.
883
925
 
884
926
  Args:
@@ -1,6 +1,6 @@
1
1
  """Ollama embeddings models."""
2
2
 
3
- from typing import Any, Dict, List, Optional
3
+ from typing import Any, Optional
4
4
 
5
5
  from langchain_core.embeddings import Embeddings
6
6
  from ollama import AsyncClient, Client
@@ -127,8 +127,22 @@ class OllamaEmbeddings(BaseModel, Embeddings):
127
127
  """Base url the model is hosted under."""
128
128
 
129
129
  client_kwargs: Optional[dict] = {}
130
- """Additional kwargs to pass to the httpx Client.
131
- For a full list of the params, see [this link](https://pydoc.dev/httpx/latest/httpx.Client.html)
130
+ """Additional kwargs to pass to the httpx clients.
131
+ These arguments are passed to both synchronous and async clients.
132
+ Use sync_client_kwargs and async_client_kwargs to pass different arguments
133
+ to synchronous and asynchronous clients.
134
+ """
135
+
136
+ async_client_kwargs: Optional[dict] = {}
137
+ """Additional kwargs to merge with client_kwargs before
138
+ passing to the httpx AsyncClient.
139
+ For a full list of the params, see [this link](https://www.python-httpx.org/api/#asyncclient)
140
+ """
141
+
142
+ sync_client_kwargs: Optional[dict] = {}
143
+ """Additional kwargs to merge with client_kwargs before
144
+ passing to the httpx Client.
145
+ For a full list of the params, see [this link](https://www.python-httpx.org/api/#client)
132
146
  """
133
147
 
134
148
  _client: Client = PrivateAttr(default=None) # type: ignore
@@ -188,7 +202,7 @@ class OllamaEmbeddings(BaseModel, Embeddings):
188
202
  """The temperature of the model. Increasing the temperature will
189
203
  make the model answer more creatively. (Default: 0.8)"""
190
204
 
191
- stop: Optional[List[str]] = None
205
+ stop: Optional[list[str]] = None
192
206
  """Sets the stop tokens to use."""
193
207
 
194
208
  tfs_z: Optional[float] = None
@@ -211,7 +225,7 @@ class OllamaEmbeddings(BaseModel, Embeddings):
211
225
  )
212
226
 
213
227
  @property
214
- def _default_params(self) -> Dict[str, Any]:
228
+ def _default_params(self) -> dict[str, Any]:
215
229
  """Get the default parameters for calling Ollama."""
216
230
  return {
217
231
  "mirostat": self.mirostat,
@@ -233,22 +247,31 @@ class OllamaEmbeddings(BaseModel, Embeddings):
233
247
  def _set_clients(self) -> Self:
234
248
  """Set clients to use for ollama."""
235
249
  client_kwargs = self.client_kwargs or {}
236
- self._client = Client(host=self.base_url, **client_kwargs)
237
- self._async_client = AsyncClient(host=self.base_url, **client_kwargs)
250
+
251
+ sync_client_kwargs = client_kwargs
252
+ if self.sync_client_kwargs:
253
+ sync_client_kwargs = {**sync_client_kwargs, **self.sync_client_kwargs}
254
+
255
+ async_client_kwargs = client_kwargs
256
+ if self.async_client_kwargs:
257
+ async_client_kwargs = {**async_client_kwargs, **self.async_client_kwargs}
258
+
259
+ self._client = Client(host=self.base_url, **sync_client_kwargs)
260
+ self._async_client = AsyncClient(host=self.base_url, **async_client_kwargs)
238
261
  return self
239
262
 
240
- def embed_documents(self, texts: List[str]) -> List[List[float]]:
263
+ def embed_documents(self, texts: list[str]) -> list[list[float]]:
241
264
  """Embed search docs."""
242
265
  embedded_docs = self._client.embed(
243
266
  self.model, texts, options=self._default_params, keep_alive=self.keep_alive
244
267
  )["embeddings"]
245
268
  return embedded_docs
246
269
 
247
- def embed_query(self, text: str) -> List[float]:
270
+ def embed_query(self, text: str) -> list[float]:
248
271
  """Embed query text."""
249
272
  return self.embed_documents([text])[0]
250
273
 
251
- async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
274
+ async def aembed_documents(self, texts: list[str]) -> list[list[float]]:
252
275
  """Embed search docs."""
253
276
  embedded_docs = (
254
277
  await self._async_client.embed(
@@ -257,6 +280,6 @@ class OllamaEmbeddings(BaseModel, Embeddings):
257
280
  )["embeddings"]
258
281
  return embedded_docs
259
282
 
260
- async def aembed_query(self, text: str) -> List[float]:
283
+ async def aembed_query(self, text: str) -> list[float]:
261
284
  """Embed query text."""
262
285
  return (await self.aembed_documents([text]))[0]
langchain_ollama/llms.py CHANGED
@@ -1,13 +1,9 @@
1
1
  """Ollama large language models."""
2
2
 
3
+ from collections.abc import AsyncIterator, Iterator, Mapping
3
4
  from typing import (
4
5
  Any,
5
- AsyncIterator,
6
- Dict,
7
- Iterator,
8
- List,
9
6
  Literal,
10
- Mapping,
11
7
  Optional,
12
8
  Union,
13
9
  )
@@ -89,7 +85,7 @@ class OllamaLLM(BaseLLM):
89
85
  to a specific number will make the model generate the same text for
90
86
  the same prompt."""
91
87
 
92
- stop: Optional[List[str]] = None
88
+ stop: Optional[list[str]] = None
93
89
  """Sets the stop tokens to use."""
94
90
 
95
91
  tfs_z: Optional[float] = None
@@ -117,8 +113,22 @@ class OllamaLLM(BaseLLM):
117
113
  """Base url the model is hosted under."""
118
114
 
119
115
  client_kwargs: Optional[dict] = {}
120
- """Additional kwargs to pass to the httpx Client.
121
- For a full list of the params, see [this link](https://pydoc.dev/httpx/latest/httpx.Client.html)
116
+ """Additional kwargs to pass to the httpx clients.
117
+ These arguments are passed to both synchronous and async clients.
118
+ Use sync_client_kwargs and async_client_kwargs to pass different arguments
119
+ to synchronous and asynchronous clients.
120
+ """
121
+
122
+ async_client_kwargs: Optional[dict] = {}
123
+ """Additional kwargs to merge with client_kwargs before
124
+ passing to the httpx AsyncClient.
125
+ For a full list of the params, see [this link](https://www.python-httpx.org/api/#asyncclient)
126
+ """
127
+
128
+ sync_client_kwargs: Optional[dict] = {}
129
+ """Additional kwargs to merge with client_kwargs before
130
+ passing to the httpx Client.
131
+ For a full list of the params, see [this link](https://www.python-httpx.org/api/#client)
122
132
  """
123
133
 
124
134
  _client: Client = PrivateAttr(default=None) # type: ignore
@@ -134,9 +144,9 @@ class OllamaLLM(BaseLLM):
134
144
  def _generate_params(
135
145
  self,
136
146
  prompt: str,
137
- stop: Optional[List[str]] = None,
147
+ stop: Optional[list[str]] = None,
138
148
  **kwargs: Any,
139
- ) -> Dict[str, Any]:
149
+ ) -> dict[str, Any]:
140
150
  if self.stop is not None and stop is not None:
141
151
  raise ValueError("`stop` found in both the input and default params.")
142
152
  elif self.stop is not None:
@@ -181,7 +191,7 @@ class OllamaLLM(BaseLLM):
181
191
  return "ollama-llm"
182
192
 
183
193
  def _get_ls_params(
184
- self, stop: Optional[List[str]] = None, **kwargs: Any
194
+ self, stop: Optional[list[str]] = None, **kwargs: Any
185
195
  ) -> LangSmithParams:
186
196
  """Get standard params for tracing."""
187
197
  params = super()._get_ls_params(stop=stop, **kwargs)
@@ -193,14 +203,23 @@ class OllamaLLM(BaseLLM):
193
203
  def _set_clients(self) -> Self:
194
204
  """Set clients to use for ollama."""
195
205
  client_kwargs = self.client_kwargs or {}
196
- self._client = Client(host=self.base_url, **client_kwargs)
197
- self._async_client = AsyncClient(host=self.base_url, **client_kwargs)
206
+
207
+ sync_client_kwargs = client_kwargs
208
+ if self.sync_client_kwargs:
209
+ sync_client_kwargs = {**sync_client_kwargs, **self.sync_client_kwargs}
210
+
211
+ async_client_kwargs = client_kwargs
212
+ if self.async_client_kwargs:
213
+ async_client_kwargs = {**async_client_kwargs, **self.async_client_kwargs}
214
+
215
+ self._client = Client(host=self.base_url, **sync_client_kwargs)
216
+ self._async_client = AsyncClient(host=self.base_url, **async_client_kwargs)
198
217
  return self
199
218
 
200
219
  async def _acreate_generate_stream(
201
220
  self,
202
221
  prompt: str,
203
- stop: Optional[List[str]] = None,
222
+ stop: Optional[list[str]] = None,
204
223
  **kwargs: Any,
205
224
  ) -> AsyncIterator[Union[Mapping[str, Any], str]]:
206
225
  async for part in await self._async_client.generate(
@@ -211,7 +230,7 @@ class OllamaLLM(BaseLLM):
211
230
  def _create_generate_stream(
212
231
  self,
213
232
  prompt: str,
214
- stop: Optional[List[str]] = None,
233
+ stop: Optional[list[str]] = None,
215
234
  **kwargs: Any,
216
235
  ) -> Iterator[Union[Mapping[str, Any], str]]:
217
236
  yield from self._client.generate(
@@ -221,7 +240,7 @@ class OllamaLLM(BaseLLM):
221
240
  async def _astream_with_aggregation(
222
241
  self,
223
242
  prompt: str,
224
- stop: Optional[List[str]] = None,
243
+ stop: Optional[list[str]] = None,
225
244
  run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
226
245
  verbose: bool = False,
227
246
  **kwargs: Any,
@@ -253,7 +272,7 @@ class OllamaLLM(BaseLLM):
253
272
  def _stream_with_aggregation(
254
273
  self,
255
274
  prompt: str,
256
- stop: Optional[List[str]] = None,
275
+ stop: Optional[list[str]] = None,
257
276
  run_manager: Optional[CallbackManagerForLLMRun] = None,
258
277
  verbose: bool = False,
259
278
  **kwargs: Any,
@@ -284,8 +303,8 @@ class OllamaLLM(BaseLLM):
284
303
 
285
304
  def _generate(
286
305
  self,
287
- prompts: List[str],
288
- stop: Optional[List[str]] = None,
306
+ prompts: list[str],
307
+ stop: Optional[list[str]] = None,
289
308
  run_manager: Optional[CallbackManagerForLLMRun] = None,
290
309
  **kwargs: Any,
291
310
  ) -> LLMResult:
@@ -303,8 +322,8 @@ class OllamaLLM(BaseLLM):
303
322
 
304
323
  async def _agenerate(
305
324
  self,
306
- prompts: List[str],
307
- stop: Optional[List[str]] = None,
325
+ prompts: list[str],
326
+ stop: Optional[list[str]] = None,
308
327
  run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
309
328
  **kwargs: Any,
310
329
  ) -> LLMResult:
@@ -323,7 +342,7 @@ class OllamaLLM(BaseLLM):
323
342
  def _stream(
324
343
  self,
325
344
  prompt: str,
326
- stop: Optional[List[str]] = None,
345
+ stop: Optional[list[str]] = None,
327
346
  run_manager: Optional[CallbackManagerForLLMRun] = None,
328
347
  **kwargs: Any,
329
348
  ) -> Iterator[GenerationChunk]:
@@ -345,7 +364,7 @@ class OllamaLLM(BaseLLM):
345
364
  async def _astream(
346
365
  self,
347
366
  prompt: str,
348
- stop: Optional[List[str]] = None,
367
+ stop: Optional[list[str]] = None,
349
368
  run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
350
369
  **kwargs: Any,
351
370
  ) -> AsyncIterator[GenerationChunk]:
@@ -1,14 +1,14 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langchain-ollama
3
- Version: 0.3.1
3
+ Version: 0.3.3
4
4
  Summary: An integration package connecting Ollama and LangChain
5
5
  License: MIT
6
6
  Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/partners/ollama
7
7
  Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-ollama%3D%3D0%22&expanded=true
8
8
  Project-URL: repository, https://github.com/langchain-ai/langchain
9
- Requires-Python: <4.0,>=3.9
10
- Requires-Dist: ollama<1,>=0.4.4
11
- Requires-Dist: langchain-core<1.0.0,>=0.3.51
9
+ Requires-Python: >=3.9
10
+ Requires-Dist: ollama<1.0.0,>=0.4.8
11
+ Requires-Dist: langchain-core<1.0.0,>=0.3.60
12
12
  Description-Content-Type: text/markdown
13
13
 
14
14
  # langchain-ollama
@@ -0,0 +1,10 @@
1
+ langchain_ollama-0.3.3.dist-info/METADATA,sha256=K2QhMD3eEMIMegVdXf6ZyQ7C5fbl2wQ1CvvqtUOmyug,1462
2
+ langchain_ollama-0.3.3.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90
3
+ langchain_ollama-0.3.3.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
4
+ langchain_ollama-0.3.3.dist-info/licenses/LICENSE,sha256=2btS8uNUDWD_UNjw9ba6ZJt_00aUjEw9CGyK-xIHY8c,1072
5
+ langchain_ollama/__init__.py,sha256=1f8Cyf1_bS0CT16U8-Os1P1Oa3erIDtIBTH4KVmBLvY,633
6
+ langchain_ollama/chat_models.py,sha256=Z2wzR5R568aNyH1LKN84kUdNZFOvvgY-csE626_sBVc,51723
7
+ langchain_ollama/embeddings.py,sha256=udL26XHdUMybQogY9Gj3vlJXxxkVAVZ-9He2U8wlJ3k,9547
8
+ langchain_ollama/llms.py,sha256=Rin6HVZvrH1epRsjhojSmOBFWAaU0cfOU1gV6I0bqJE,13933
9
+ langchain_ollama/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
+ langchain_ollama-0.3.3.dist-info/RECORD,,
@@ -1,10 +0,0 @@
1
- langchain_ollama-0.3.1.dist-info/METADATA,sha256=ryQPLWBXudxvwl7tLwTVn-qIhUQhP5oqAF0u-u6zjzc,1463
2
- langchain_ollama-0.3.1.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90
3
- langchain_ollama-0.3.1.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
4
- langchain_ollama-0.3.1.dist-info/licenses/LICENSE,sha256=2btS8uNUDWD_UNjw9ba6ZJt_00aUjEw9CGyK-xIHY8c,1072
5
- langchain_ollama/__init__.py,sha256=SxPRrWcPayJpbwhheTtlqCaPp9ffiAAgZMM5Wc1yYpM,634
6
- langchain_ollama/chat_models.py,sha256=UUZl-xQh6hjb6zCJOXet1fpe3C-Pc-WNKPAaadLgBaY,49756
7
- langchain_ollama/embeddings.py,sha256=d0jSB-T8Awv0razTUA_iD-ZvTma82Nw44YtiVu983u0,8633
8
- langchain_ollama/llms.py,sha256=DnCpLYL3kmUSVObgmZN0XZC0OB-upUjtRLjA3HfZFXY,13012
9
- langchain_ollama/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
- langchain_ollama-0.3.1.dist-info/RECORD,,