langchain-ollama 0.3.0__py3-none-any.whl → 0.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,7 +3,6 @@
3
3
  It provides infrastructure for interacting with the Ollama service.
4
4
  """
5
5
 
6
-
7
6
  from importlib import metadata
8
7
 
9
8
  from langchain_ollama.chat_models import ChatOllama
@@ -1,21 +1,14 @@
1
1
  """Ollama chat models."""
2
2
 
3
3
  import json
4
+ from collections.abc import AsyncIterator, Iterator, Mapping, Sequence
4
5
  from operator import itemgetter
5
6
  from typing import (
6
7
  Any,
7
- AsyncIterator,
8
8
  Callable,
9
- Dict,
10
9
  Final,
11
- Iterator,
12
- List,
13
10
  Literal,
14
- Mapping,
15
11
  Optional,
16
- Sequence,
17
- Tuple,
18
- Type,
19
12
  Union,
20
13
  cast,
21
14
  )
@@ -37,6 +30,7 @@ from langchain_core.messages import (
37
30
  SystemMessage,
38
31
  ToolCall,
39
32
  ToolMessage,
33
+ is_data_content_block,
40
34
  )
41
35
  from langchain_core.messages.ai import UsageMetadata
42
36
  from langchain_core.messages.tool import tool_call
@@ -153,7 +147,7 @@ def _parse_arguments_from_tool_call(
153
147
 
154
148
  def _get_tool_calls_from_response(
155
149
  response: Mapping[str, Any],
156
- ) -> List[ToolCall]:
150
+ ) -> list[ToolCall]:
157
151
  """Get tool calls from ollama response."""
158
152
  tool_calls = []
159
153
  if "message" in response:
@@ -180,6 +174,20 @@ def _lc_tool_call_to_openai_tool_call(tool_call: ToolCall) -> dict:
180
174
  }
181
175
 
182
176
 
177
+ def _get_image_from_data_content_block(block: dict) -> str:
178
+ """Format standard data content block to format expected by Ollama."""
179
+ if block["type"] == "image":
180
+ if block["source_type"] == "base64":
181
+ return block["data"]
182
+ else:
183
+ error_message = "Image data only supported through in-line base64 format."
184
+ raise ValueError(error_message)
185
+
186
+ else:
187
+ error_message = f"Blocks of type {block['type']} not supported."
188
+ raise ValueError(error_message)
189
+
190
+
183
191
  def _is_pydantic_class(obj: Any) -> bool:
184
192
  return isinstance(obj, type) and is_basemodel_subclass(obj)
185
193
 
@@ -341,7 +349,7 @@ class ChatOllama(BaseChatModel):
341
349
  model: str
342
350
  """Model name to use."""
343
351
 
344
- extract_reasoning: Optional[Union[bool, Tuple[str, str]]] = False
352
+ extract_reasoning: Optional[Union[bool, tuple[str, str]]] = False
345
353
  """Whether to extract the reasoning tokens in think blocks.
346
354
  Extracts `chunk.content` to `chunk.additional_kwargs.reasoning_content`.
347
355
  If a tuple is supplied, they are assumed to be the (start, end) tokens.
@@ -399,7 +407,7 @@ class ChatOllama(BaseChatModel):
399
407
  to a specific number will make the model generate the same text for
400
408
  the same prompt."""
401
409
 
402
- stop: Optional[List[str]] = None
410
+ stop: Optional[list[str]] = None
403
411
  """Sets the stop tokens to use."""
404
412
 
405
413
  tfs_z: Optional[float] = None
@@ -443,10 +451,10 @@ class ChatOllama(BaseChatModel):
443
451
 
444
452
  def _chat_params(
445
453
  self,
446
- messages: List[BaseMessage],
447
- stop: Optional[List[str]] = None,
454
+ messages: list[BaseMessage],
455
+ stop: Optional[list[str]] = None,
448
456
  **kwargs: Any,
449
- ) -> Dict[str, Any]:
457
+ ) -> dict[str, Any]:
450
458
  ollama_messages = self._convert_messages_to_ollama_messages(messages)
451
459
 
452
460
  if self.stop is not None and stop is not None:
@@ -499,13 +507,13 @@ class ChatOllama(BaseChatModel):
499
507
  return self
500
508
 
501
509
  def _convert_messages_to_ollama_messages(
502
- self, messages: List[BaseMessage]
510
+ self, messages: list[BaseMessage]
503
511
  ) -> Sequence[Message]:
504
- ollama_messages: List = []
512
+ ollama_messages: list = []
505
513
  for message in messages:
506
514
  role: Literal["user", "assistant", "system", "tool"]
507
515
  tool_call_id: Optional[str] = None
508
- tool_calls: Optional[List[Dict[str, Any]]] = None
516
+ tool_calls: Optional[list[dict[str, Any]]] = None
509
517
  if isinstance(message, HumanMessage):
510
518
  role = "user"
511
519
  elif isinstance(message, AIMessage):
@@ -531,7 +539,7 @@ class ChatOllama(BaseChatModel):
531
539
  if isinstance(message.content, str):
532
540
  content = message.content
533
541
  else:
534
- for content_part in cast(List[Dict], message.content):
542
+ for content_part in cast(list[dict], message.content):
535
543
  if content_part.get("type") == "text":
536
544
  content += f"\n{content_part['text']}"
537
545
  elif content_part.get("type") == "tool_use":
@@ -560,7 +568,9 @@ class ChatOllama(BaseChatModel):
560
568
  images.append(image_url_components[1])
561
569
  else:
562
570
  images.append(image_url_components[0])
563
-
571
+ elif is_data_content_block(content_part):
572
+ image = _get_image_from_data_content_block(content_part)
573
+ images.append(image)
564
574
  else:
565
575
  raise ValueError(
566
576
  "Unsupported message content type. "
@@ -583,7 +593,7 @@ class ChatOllama(BaseChatModel):
583
593
 
584
594
  def _extract_reasoning(
585
595
  self, message_chunk: BaseMessageChunk, is_thinking: bool
586
- ) -> Tuple[BaseMessageChunk, bool]:
596
+ ) -> tuple[BaseMessageChunk, bool]:
587
597
  """Mutate a message chunk to extract reasoning content."""
588
598
  if not self.extract_reasoning:
589
599
  return message_chunk, is_thinking
@@ -605,8 +615,8 @@ class ChatOllama(BaseChatModel):
605
615
 
606
616
  async def _acreate_chat_stream(
607
617
  self,
608
- messages: List[BaseMessage],
609
- stop: Optional[List[str]] = None,
618
+ messages: list[BaseMessage],
619
+ stop: Optional[list[str]] = None,
610
620
  **kwargs: Any,
611
621
  ) -> AsyncIterator[Union[Mapping[str, Any], str]]:
612
622
  chat_params = self._chat_params(messages, stop, **kwargs)
@@ -619,8 +629,8 @@ class ChatOllama(BaseChatModel):
619
629
 
620
630
  def _create_chat_stream(
621
631
  self,
622
- messages: List[BaseMessage],
623
- stop: Optional[List[str]] = None,
632
+ messages: list[BaseMessage],
633
+ stop: Optional[list[str]] = None,
624
634
  **kwargs: Any,
625
635
  ) -> Iterator[Union[Mapping[str, Any], str]]:
626
636
  chat_params = self._chat_params(messages, stop, **kwargs)
@@ -632,8 +642,8 @@ class ChatOllama(BaseChatModel):
632
642
 
633
643
  def _chat_stream_with_aggregation(
634
644
  self,
635
- messages: List[BaseMessage],
636
- stop: Optional[List[str]] = None,
645
+ messages: list[BaseMessage],
646
+ stop: Optional[list[str]] = None,
637
647
  run_manager: Optional[CallbackManagerForLLMRun] = None,
638
648
  verbose: bool = False,
639
649
  **kwargs: Any,
@@ -657,8 +667,8 @@ class ChatOllama(BaseChatModel):
657
667
 
658
668
  async def _achat_stream_with_aggregation(
659
669
  self,
660
- messages: List[BaseMessage],
661
- stop: Optional[List[str]] = None,
670
+ messages: list[BaseMessage],
671
+ stop: Optional[list[str]] = None,
662
672
  run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
663
673
  verbose: bool = False,
664
674
  **kwargs: Any,
@@ -681,7 +691,7 @@ class ChatOllama(BaseChatModel):
681
691
  return final_chunk
682
692
 
683
693
  def _get_ls_params(
684
- self, stop: Optional[List[str]] = None, **kwargs: Any
694
+ self, stop: Optional[list[str]] = None, **kwargs: Any
685
695
  ) -> LangSmithParams:
686
696
  """Get standard params for tracing."""
687
697
  params = self._get_invocation_params(stop=stop, **kwargs)
@@ -697,8 +707,8 @@ class ChatOllama(BaseChatModel):
697
707
 
698
708
  def _generate(
699
709
  self,
700
- messages: List[BaseMessage],
701
- stop: Optional[List[str]] = None,
710
+ messages: list[BaseMessage],
711
+ stop: Optional[list[str]] = None,
702
712
  run_manager: Optional[CallbackManagerForLLMRun] = None,
703
713
  **kwargs: Any,
704
714
  ) -> ChatResult:
@@ -719,13 +729,18 @@ class ChatOllama(BaseChatModel):
719
729
 
720
730
  def _iterate_over_stream(
721
731
  self,
722
- messages: List[BaseMessage],
723
- stop: Optional[List[str]] = None,
732
+ messages: list[BaseMessage],
733
+ stop: Optional[list[str]] = None,
724
734
  **kwargs: Any,
725
735
  ) -> Iterator[ChatGenerationChunk]:
726
736
  is_thinking = False
727
737
  for stream_resp in self._create_chat_stream(messages, stop, **kwargs):
728
738
  if not isinstance(stream_resp, str):
739
+ if stream_resp.get("done") is True:
740
+ generation_info = dict(stream_resp)
741
+ _ = generation_info.pop("message", None)
742
+ else:
743
+ generation_info = None
729
744
  chunk = ChatGenerationChunk(
730
745
  message=AIMessageChunk(
731
746
  content=(
@@ -739,10 +754,12 @@ class ChatOllama(BaseChatModel):
739
754
  ),
740
755
  tool_calls=_get_tool_calls_from_response(stream_resp),
741
756
  ),
742
- generation_info=(
743
- dict(stream_resp) if stream_resp.get("done") is True else None
744
- ),
757
+ generation_info=generation_info,
745
758
  )
759
+ if chunk.generation_info and (
760
+ model := chunk.generation_info.get("model")
761
+ ):
762
+ chunk.generation_info["model_name"] = model # backwards compat
746
763
  if self.extract_reasoning:
747
764
  message, is_thinking = self._extract_reasoning(
748
765
  chunk.message, is_thinking
@@ -752,8 +769,8 @@ class ChatOllama(BaseChatModel):
752
769
 
753
770
  def _stream(
754
771
  self,
755
- messages: List[BaseMessage],
756
- stop: Optional[List[str]] = None,
772
+ messages: list[BaseMessage],
773
+ stop: Optional[list[str]] = None,
757
774
  run_manager: Optional[CallbackManagerForLLMRun] = None,
758
775
  **kwargs: Any,
759
776
  ) -> Iterator[ChatGenerationChunk]:
@@ -767,13 +784,18 @@ class ChatOllama(BaseChatModel):
767
784
 
768
785
  async def _aiterate_over_stream(
769
786
  self,
770
- messages: List[BaseMessage],
771
- stop: Optional[List[str]] = None,
787
+ messages: list[BaseMessage],
788
+ stop: Optional[list[str]] = None,
772
789
  **kwargs: Any,
773
790
  ) -> AsyncIterator[ChatGenerationChunk]:
774
791
  is_thinking = False
775
792
  async for stream_resp in self._acreate_chat_stream(messages, stop, **kwargs):
776
793
  if not isinstance(stream_resp, str):
794
+ if stream_resp.get("done") is True:
795
+ generation_info = dict(stream_resp)
796
+ _ = generation_info.pop("message", None)
797
+ else:
798
+ generation_info = None
777
799
  chunk = ChatGenerationChunk(
778
800
  message=AIMessageChunk(
779
801
  content=(
@@ -787,10 +809,12 @@ class ChatOllama(BaseChatModel):
787
809
  ),
788
810
  tool_calls=_get_tool_calls_from_response(stream_resp),
789
811
  ),
790
- generation_info=(
791
- dict(stream_resp) if stream_resp.get("done") is True else None
792
- ),
812
+ generation_info=generation_info,
793
813
  )
814
+ if chunk.generation_info and (
815
+ model := chunk.generation_info.get("model")
816
+ ):
817
+ chunk.generation_info["model_name"] = model # backwards compat
794
818
  if self.extract_reasoning:
795
819
  message, is_thinking = self._extract_reasoning(
796
820
  chunk.message, is_thinking
@@ -800,8 +824,8 @@ class ChatOllama(BaseChatModel):
800
824
 
801
825
  async def _astream(
802
826
  self,
803
- messages: List[BaseMessage],
804
- stop: Optional[List[str]] = None,
827
+ messages: list[BaseMessage],
828
+ stop: Optional[list[str]] = None,
805
829
  run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
806
830
  **kwargs: Any,
807
831
  ) -> AsyncIterator[ChatGenerationChunk]:
@@ -815,8 +839,8 @@ class ChatOllama(BaseChatModel):
815
839
 
816
840
  async def _agenerate(
817
841
  self,
818
- messages: List[BaseMessage],
819
- stop: Optional[List[str]] = None,
842
+ messages: list[BaseMessage],
843
+ stop: Optional[list[str]] = None,
820
844
  run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
821
845
  **kwargs: Any,
822
846
  ) -> ChatResult:
@@ -842,7 +866,7 @@ class ChatOllama(BaseChatModel):
842
866
 
843
867
  def bind_tools(
844
868
  self,
845
- tools: Sequence[Union[Dict[str, Any], Type, Callable, BaseTool]],
869
+ tools: Sequence[Union[dict[str, Any], type, Callable, BaseTool]],
846
870
  *,
847
871
  tool_choice: Optional[Union[dict, str, Literal["auto", "any"], bool]] = None,
848
872
  **kwargs: Any,
@@ -865,12 +889,12 @@ class ChatOllama(BaseChatModel):
865
889
 
866
890
  def with_structured_output(
867
891
  self,
868
- schema: Union[Dict, type],
892
+ schema: Union[dict, type],
869
893
  *,
870
894
  method: Literal["function_calling", "json_mode", "json_schema"] = "json_schema",
871
895
  include_raw: bool = False,
872
896
  **kwargs: Any,
873
- ) -> Runnable[LanguageModelInput, Union[Dict, BaseModel]]:
897
+ ) -> Runnable[LanguageModelInput, Union[dict, BaseModel]]:
874
898
  """Model wrapper that returns outputs formatted to match the given schema.
875
899
 
876
900
  Args:
@@ -1,6 +1,6 @@
1
1
  """Ollama embeddings models."""
2
2
 
3
- from typing import Any, Dict, List, Optional
3
+ from typing import Any, Optional
4
4
 
5
5
  from langchain_core.embeddings import Embeddings
6
6
  from ollama import AsyncClient, Client
@@ -188,7 +188,7 @@ class OllamaEmbeddings(BaseModel, Embeddings):
188
188
  """The temperature of the model. Increasing the temperature will
189
189
  make the model answer more creatively. (Default: 0.8)"""
190
190
 
191
- stop: Optional[List[str]] = None
191
+ stop: Optional[list[str]] = None
192
192
  """Sets the stop tokens to use."""
193
193
 
194
194
  tfs_z: Optional[float] = None
@@ -211,7 +211,7 @@ class OllamaEmbeddings(BaseModel, Embeddings):
211
211
  )
212
212
 
213
213
  @property
214
- def _default_params(self) -> Dict[str, Any]:
214
+ def _default_params(self) -> dict[str, Any]:
215
215
  """Get the default parameters for calling Ollama."""
216
216
  return {
217
217
  "mirostat": self.mirostat,
@@ -237,18 +237,18 @@ class OllamaEmbeddings(BaseModel, Embeddings):
237
237
  self._async_client = AsyncClient(host=self.base_url, **client_kwargs)
238
238
  return self
239
239
 
240
- def embed_documents(self, texts: List[str]) -> List[List[float]]:
240
+ def embed_documents(self, texts: list[str]) -> list[list[float]]:
241
241
  """Embed search docs."""
242
242
  embedded_docs = self._client.embed(
243
243
  self.model, texts, options=self._default_params, keep_alive=self.keep_alive
244
244
  )["embeddings"]
245
245
  return embedded_docs
246
246
 
247
- def embed_query(self, text: str) -> List[float]:
247
+ def embed_query(self, text: str) -> list[float]:
248
248
  """Embed query text."""
249
249
  return self.embed_documents([text])[0]
250
250
 
251
- async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
251
+ async def aembed_documents(self, texts: list[str]) -> list[list[float]]:
252
252
  """Embed search docs."""
253
253
  embedded_docs = (
254
254
  await self._async_client.embed(
@@ -257,6 +257,6 @@ class OllamaEmbeddings(BaseModel, Embeddings):
257
257
  )["embeddings"]
258
258
  return embedded_docs
259
259
 
260
- async def aembed_query(self, text: str) -> List[float]:
260
+ async def aembed_query(self, text: str) -> list[float]:
261
261
  """Embed query text."""
262
262
  return (await self.aembed_documents([text]))[0]
langchain_ollama/llms.py CHANGED
@@ -1,13 +1,9 @@
1
1
  """Ollama large language models."""
2
2
 
3
+ from collections.abc import AsyncIterator, Iterator, Mapping
3
4
  from typing import (
4
5
  Any,
5
- AsyncIterator,
6
- Dict,
7
- Iterator,
8
- List,
9
6
  Literal,
10
- Mapping,
11
7
  Optional,
12
8
  Union,
13
9
  )
@@ -84,7 +80,12 @@ class OllamaLLM(BaseLLM):
84
80
  """The temperature of the model. Increasing the temperature will
85
81
  make the model answer more creatively. (Default: 0.8)"""
86
82
 
87
- stop: Optional[List[str]] = None
83
+ seed: Optional[int] = None
84
+ """Sets the random number seed to use for generation. Setting this
85
+ to a specific number will make the model generate the same text for
86
+ the same prompt."""
87
+
88
+ stop: Optional[list[str]] = None
88
89
  """Sets the stop tokens to use."""
89
90
 
90
91
  tfs_z: Optional[float] = None
@@ -129,9 +130,9 @@ class OllamaLLM(BaseLLM):
129
130
  def _generate_params(
130
131
  self,
131
132
  prompt: str,
132
- stop: Optional[List[str]] = None,
133
+ stop: Optional[list[str]] = None,
133
134
  **kwargs: Any,
134
- ) -> Dict[str, Any]:
135
+ ) -> dict[str, Any]:
135
136
  if self.stop is not None and stop is not None:
136
137
  raise ValueError("`stop` found in both the input and default params.")
137
138
  elif self.stop is not None:
@@ -150,6 +151,7 @@ class OllamaLLM(BaseLLM):
150
151
  "repeat_last_n": self.repeat_last_n,
151
152
  "repeat_penalty": self.repeat_penalty,
152
153
  "temperature": self.temperature,
154
+ "seed": self.seed,
153
155
  "stop": self.stop if stop is None else stop,
154
156
  "tfs_z": self.tfs_z,
155
157
  "top_k": self.top_k,
@@ -175,7 +177,7 @@ class OllamaLLM(BaseLLM):
175
177
  return "ollama-llm"
176
178
 
177
179
  def _get_ls_params(
178
- self, stop: Optional[List[str]] = None, **kwargs: Any
180
+ self, stop: Optional[list[str]] = None, **kwargs: Any
179
181
  ) -> LangSmithParams:
180
182
  """Get standard params for tracing."""
181
183
  params = super()._get_ls_params(stop=stop, **kwargs)
@@ -194,7 +196,7 @@ class OllamaLLM(BaseLLM):
194
196
  async def _acreate_generate_stream(
195
197
  self,
196
198
  prompt: str,
197
- stop: Optional[List[str]] = None,
199
+ stop: Optional[list[str]] = None,
198
200
  **kwargs: Any,
199
201
  ) -> AsyncIterator[Union[Mapping[str, Any], str]]:
200
202
  async for part in await self._async_client.generate(
@@ -205,7 +207,7 @@ class OllamaLLM(BaseLLM):
205
207
  def _create_generate_stream(
206
208
  self,
207
209
  prompt: str,
208
- stop: Optional[List[str]] = None,
210
+ stop: Optional[list[str]] = None,
209
211
  **kwargs: Any,
210
212
  ) -> Iterator[Union[Mapping[str, Any], str]]:
211
213
  yield from self._client.generate(
@@ -215,7 +217,7 @@ class OllamaLLM(BaseLLM):
215
217
  async def _astream_with_aggregation(
216
218
  self,
217
219
  prompt: str,
218
- stop: Optional[List[str]] = None,
220
+ stop: Optional[list[str]] = None,
219
221
  run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
220
222
  verbose: bool = False,
221
223
  **kwargs: Any,
@@ -247,7 +249,7 @@ class OllamaLLM(BaseLLM):
247
249
  def _stream_with_aggregation(
248
250
  self,
249
251
  prompt: str,
250
- stop: Optional[List[str]] = None,
252
+ stop: Optional[list[str]] = None,
251
253
  run_manager: Optional[CallbackManagerForLLMRun] = None,
252
254
  verbose: bool = False,
253
255
  **kwargs: Any,
@@ -278,8 +280,8 @@ class OllamaLLM(BaseLLM):
278
280
 
279
281
  def _generate(
280
282
  self,
281
- prompts: List[str],
282
- stop: Optional[List[str]] = None,
283
+ prompts: list[str],
284
+ stop: Optional[list[str]] = None,
283
285
  run_manager: Optional[CallbackManagerForLLMRun] = None,
284
286
  **kwargs: Any,
285
287
  ) -> LLMResult:
@@ -297,8 +299,8 @@ class OllamaLLM(BaseLLM):
297
299
 
298
300
  async def _agenerate(
299
301
  self,
300
- prompts: List[str],
301
- stop: Optional[List[str]] = None,
302
+ prompts: list[str],
303
+ stop: Optional[list[str]] = None,
302
304
  run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
303
305
  **kwargs: Any,
304
306
  ) -> LLMResult:
@@ -317,7 +319,7 @@ class OllamaLLM(BaseLLM):
317
319
  def _stream(
318
320
  self,
319
321
  prompt: str,
320
- stop: Optional[List[str]] = None,
322
+ stop: Optional[list[str]] = None,
321
323
  run_manager: Optional[CallbackManagerForLLMRun] = None,
322
324
  **kwargs: Any,
323
325
  ) -> Iterator[GenerationChunk]:
@@ -339,7 +341,7 @@ class OllamaLLM(BaseLLM):
339
341
  async def _astream(
340
342
  self,
341
343
  prompt: str,
342
- stop: Optional[List[str]] = None,
344
+ stop: Optional[list[str]] = None,
343
345
  run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
344
346
  **kwargs: Any,
345
347
  ) -> AsyncIterator[GenerationChunk]:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langchain-ollama
3
- Version: 0.3.0
3
+ Version: 0.3.2
4
4
  Summary: An integration package connecting Ollama and LangChain
5
5
  License: MIT
6
6
  Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/partners/ollama
@@ -8,7 +8,7 @@ Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q
8
8
  Project-URL: repository, https://github.com/langchain-ai/langchain
9
9
  Requires-Python: <4.0,>=3.9
10
10
  Requires-Dist: ollama<1,>=0.4.4
11
- Requires-Dist: langchain-core<1.0.0,>=0.3.47
11
+ Requires-Dist: langchain-core<1.0.0,>=0.3.52
12
12
  Description-Content-Type: text/markdown
13
13
 
14
14
  # langchain-ollama
@@ -0,0 +1,10 @@
1
+ langchain_ollama-0.3.2.dist-info/METADATA,sha256=58k8ADvokbZrjkTN5_-DRJWHYxZI6A1IbYO7rJ2DWc8,1463
2
+ langchain_ollama-0.3.2.dist-info/WHEEL,sha256=tSfRZzRHthuv7vxpI4aehrdN9scLjk-dCJkPLzkHxGg,90
3
+ langchain_ollama-0.3.2.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
4
+ langchain_ollama-0.3.2.dist-info/licenses/LICENSE,sha256=2btS8uNUDWD_UNjw9ba6ZJt_00aUjEw9CGyK-xIHY8c,1072
5
+ langchain_ollama/__init__.py,sha256=1f8Cyf1_bS0CT16U8-Os1P1Oa3erIDtIBTH4KVmBLvY,633
6
+ langchain_ollama/chat_models.py,sha256=3ZvSHz-14idWKykyQgMV2i84bFrXVRjpU9dbGTz4_hs,50735
7
+ langchain_ollama/embeddings.py,sha256=2G0gfnUbPBpVv9oBzL7C3z3FI_VumQ2WCYCf_-LMz-Q,8621
8
+ langchain_ollama/llms.py,sha256=DiCWKLX2JPZAoVoRTKKQ2yOuoXbVStg0wkS1p6IruQU,13007
9
+ langchain_ollama/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
+ langchain_ollama-0.3.2.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: pdm-backend (2.4.3)
2
+ Generator: pdm-backend (2.4.4)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
@@ -1,10 +0,0 @@
1
- langchain_ollama-0.3.0.dist-info/METADATA,sha256=VcLxoKw-32dqWPuJrjPGq2HwweTu_v3ZEtLNIRNUBRc,1463
2
- langchain_ollama-0.3.0.dist-info/WHEEL,sha256=thaaA2w1JzcGC48WYufAs8nrYZjJm8LqNfnXFOFyCC4,90
3
- langchain_ollama-0.3.0.dist-info/entry_points.txt,sha256=6OYgBcLyFCUgeqLgnvMyOJxPCWzgy7se4rLPKtNonMs,34
4
- langchain_ollama-0.3.0.dist-info/licenses/LICENSE,sha256=2btS8uNUDWD_UNjw9ba6ZJt_00aUjEw9CGyK-xIHY8c,1072
5
- langchain_ollama/__init__.py,sha256=SxPRrWcPayJpbwhheTtlqCaPp9ffiAAgZMM5Wc1yYpM,634
6
- langchain_ollama/chat_models.py,sha256=VMk5GnKiyPQ5TERQDhdSe2uiBOKtCP0GmYlcJs4CC14,49328
7
- langchain_ollama/embeddings.py,sha256=d0jSB-T8Awv0razTUA_iD-ZvTma82Nw44YtiVu983u0,8633
8
- langchain_ollama/llms.py,sha256=ojnYU0efhN10xhUINu1dCR2Erw79J_mYS6_l45J7Vls,12778
9
- langchain_ollama/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
- langchain_ollama-0.3.0.dist-info/RECORD,,