langchain-ollama 0.2.0.dev1__py3-none-any.whl → 0.2.2rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,3 +1,9 @@
1
+ """This is the langchain_ollama package.
2
+
3
+ It provides infrastructure for interacting with the Ollama service.
4
+ """
5
+
6
+
1
7
  from importlib import metadata
2
8
 
3
9
  from langchain_ollama.chat_models import ChatOllama
@@ -66,8 +66,8 @@ def _get_tool_calls_from_response(
66
66
  """Get tool calls from ollama response."""
67
67
  tool_calls = []
68
68
  if "message" in response:
69
- if "tool_calls" in response["message"]:
70
- for tc in response["message"]["tool_calls"]:
69
+ if raw_tool_calls := response["message"].get("tool_calls"):
70
+ for tc in raw_tool_calls:
71
71
  tool_calls.append(
72
72
  tool_call(
73
73
  id=str(uuid4()),
@@ -90,9 +90,11 @@ def _lc_tool_call_to_openai_tool_call(tool_call: ToolCall) -> dict:
90
90
 
91
91
 
92
92
  class ChatOllama(BaseChatModel):
93
- """Ollama chat model integration.
93
+ r"""Ollama chat model integration.
94
+
95
+ .. dropdown:: Setup
96
+ :open:
94
97
 
95
- Setup:
96
98
  Install ``langchain-ollama`` and download any models you want to use from ollama.
97
99
 
98
100
  .. code-block:: bash
@@ -325,27 +327,36 @@ class ChatOllama(BaseChatModel):
325
327
  """Base url the model is hosted under."""
326
328
 
327
329
  client_kwargs: Optional[dict] = {}
328
- """Additional kwargs to pass to the httpx Client.
330
+ """Additional kwargs to pass to the httpx Client.
329
331
  For a full list of the params, see [this link](https://pydoc.dev/httpx/latest/httpx.Client.html)
330
332
  """
331
333
 
332
- _client: Client = PrivateAttr(default=None)
334
+ _client: Client = PrivateAttr(default=None) # type: ignore
333
335
  """
334
336
  The client to use for making requests.
335
337
  """
336
338
 
337
- _async_client: AsyncClient = PrivateAttr(default=None)
339
+ _async_client: AsyncClient = PrivateAttr(default=None) # type: ignore
338
340
  """
339
341
  The async client to use for making requests.
340
342
  """
341
343
 
342
- @property
343
- def _default_params(self) -> Dict[str, Any]:
344
- """Get the default parameters for calling Ollama."""
345
- return {
346
- "model": self.model,
347
- "format": self.format,
348
- "options": {
344
+ def _chat_params(
345
+ self,
346
+ messages: List[BaseMessage],
347
+ stop: Optional[List[str]] = None,
348
+ **kwargs: Any,
349
+ ) -> Dict[str, Any]:
350
+ ollama_messages = self._convert_messages_to_ollama_messages(messages)
351
+
352
+ if self.stop is not None and stop is not None:
353
+ raise ValueError("`stop` found in both the input and default params.")
354
+ elif self.stop is not None:
355
+ stop = self.stop
356
+
357
+ options_dict = kwargs.pop(
358
+ "options",
359
+ {
349
360
  "mirostat": self.mirostat,
350
361
  "mirostat_eta": self.mirostat_eta,
351
362
  "mirostat_tau": self.mirostat_tau,
@@ -357,14 +368,31 @@ class ChatOllama(BaseChatModel):
357
368
  "repeat_penalty": self.repeat_penalty,
358
369
  "temperature": self.temperature,
359
370
  "seed": self.seed,
360
- "stop": self.stop,
371
+ "stop": self.stop if stop is None else stop,
361
372
  "tfs_z": self.tfs_z,
362
373
  "top_k": self.top_k,
363
374
  "top_p": self.top_p,
364
375
  },
365
- "keep_alive": self.keep_alive,
376
+ )
377
+
378
+ tools = kwargs.get("tools")
379
+ default_stream = not bool(tools)
380
+
381
+ params = {
382
+ "messages": ollama_messages,
383
+ "stream": kwargs.pop("stream", default_stream),
384
+ "model": kwargs.pop("model", self.model),
385
+ "format": kwargs.pop("format", self.format),
386
+ "options": Options(**options_dict),
387
+ "keep_alive": kwargs.pop("keep_alive", self.keep_alive),
388
+ **kwargs,
366
389
  }
367
390
 
391
+ if tools:
392
+ params["tools"] = tools
393
+
394
+ return params
395
+
368
396
  @model_validator(mode="after")
369
397
  def _set_clients(self) -> Self:
370
398
  """Set clients to use for ollama."""
@@ -462,37 +490,13 @@ class ChatOllama(BaseChatModel):
462
490
  stop: Optional[List[str]] = None,
463
491
  **kwargs: Any,
464
492
  ) -> AsyncIterator[Union[Mapping[str, Any], str]]:
465
- ollama_messages = self._convert_messages_to_ollama_messages(messages)
493
+ chat_params = self._chat_params(messages, stop, **kwargs)
466
494
 
467
- stop = stop if stop is not None else self.stop
468
-
469
- params = self._default_params
470
-
471
- for key in self._default_params:
472
- if key in kwargs:
473
- params[key] = kwargs[key]
474
-
475
- params["options"]["stop"] = stop
476
- if "tools" in kwargs:
477
- yield await self._async_client.chat(
478
- model=params["model"],
479
- messages=ollama_messages,
480
- stream=False,
481
- options=Options(**params["options"]),
482
- keep_alive=params["keep_alive"],
483
- format=params["format"],
484
- tools=kwargs["tools"],
485
- ) # type:ignore
486
- else:
487
- async for part in await self._async_client.chat(
488
- model=params["model"],
489
- messages=ollama_messages,
490
- stream=True,
491
- options=Options(**params["options"]),
492
- keep_alive=params["keep_alive"],
493
- format=params["format"],
494
- ): # type:ignore
495
+ if chat_params["stream"]:
496
+ async for part in await self._async_client.chat(**chat_params):
495
497
  yield part
498
+ else:
499
+ yield await self._async_client.chat(**chat_params)
496
500
 
497
501
  def _create_chat_stream(
498
502
  self,
@@ -500,36 +504,12 @@ class ChatOllama(BaseChatModel):
500
504
  stop: Optional[List[str]] = None,
501
505
  **kwargs: Any,
502
506
  ) -> Iterator[Union[Mapping[str, Any], str]]:
503
- ollama_messages = self._convert_messages_to_ollama_messages(messages)
507
+ chat_params = self._chat_params(messages, stop, **kwargs)
504
508
 
505
- stop = stop if stop is not None else self.stop
506
-
507
- params = self._default_params
508
-
509
- for key in self._default_params:
510
- if key in kwargs:
511
- params[key] = kwargs[key]
512
-
513
- params["options"]["stop"] = stop
514
- if "tools" in kwargs:
515
- yield self._client.chat(
516
- model=params["model"],
517
- messages=ollama_messages,
518
- stream=False,
519
- options=Options(**params["options"]),
520
- keep_alive=params["keep_alive"],
521
- format=params["format"],
522
- tools=kwargs["tools"],
523
- )
509
+ if chat_params["stream"]:
510
+ yield from self._client.chat(**chat_params)
524
511
  else:
525
- yield from self._client.chat(
526
- model=params["model"],
527
- messages=ollama_messages,
528
- stream=True,
529
- options=Options(**params["options"]),
530
- keep_alive=params["keep_alive"],
531
- format=params["format"],
532
- )
512
+ yield self._client.chat(**chat_params)
533
513
 
534
514
  def _chat_stream_with_aggregation(
535
515
  self,
@@ -748,6 +728,8 @@ class ChatOllama(BaseChatModel):
748
728
  def bind_tools(
749
729
  self,
750
730
  tools: Sequence[Union[Dict[str, Any], Type, Callable, BaseTool]],
731
+ *,
732
+ tool_choice: Optional[Union[dict, str, Literal["auto", "any"], bool]] = None,
751
733
  **kwargs: Any,
752
734
  ) -> Runnable[LanguageModelInput, BaseMessage]:
753
735
  """Bind tool-like objects to this chat model.
@@ -758,6 +740,8 @@ class ChatOllama(BaseChatModel):
758
740
  tools: A list of tool definitions to bind to this chat model.
759
741
  Supports any tool definition handled by
760
742
  :meth:`langchain_core.utils.function_calling.convert_to_openai_tool`.
743
+ tool_choice: If provided, which tool for model to call. **This parameter
744
+ is currently ignored as it is not supported by Ollama.**
761
745
  kwargs: Any additional parameters are passed directly to
762
746
  ``self.bind(**kwargs)``.
763
747
  """ # noqa: E501
@@ -1,3 +1,5 @@
1
+ """Ollama embeddings models."""
2
+
1
3
  from typing import (
2
4
  List,
3
5
  Optional,
@@ -132,12 +134,12 @@ class OllamaEmbeddings(BaseModel, Embeddings):
132
134
  For a full list of the params, see [this link](https://pydoc.dev/httpx/latest/httpx.Client.html)
133
135
  """
134
136
 
135
- _client: Client = PrivateAttr(default=None)
137
+ _client: Client = PrivateAttr(default=None) # type: ignore
136
138
  """
137
139
  The client to use for making requests.
138
140
  """
139
141
 
140
- _async_client: AsyncClient = PrivateAttr(default=None)
142
+ _async_client: AsyncClient = PrivateAttr(default=None) # type: ignore
141
143
  """
142
144
  The async client to use for making requests.
143
145
  """
langchain_ollama/llms.py CHANGED
@@ -116,23 +116,30 @@ class OllamaLLM(BaseLLM):
116
116
  For a full list of the params, see [this link](https://pydoc.dev/httpx/latest/httpx.Client.html)
117
117
  """
118
118
 
119
- _client: Client = PrivateAttr(default=None)
119
+ _client: Client = PrivateAttr(default=None) # type: ignore
120
120
  """
121
121
  The client to use for making requests.
122
122
  """
123
123
 
124
- _async_client: AsyncClient = PrivateAttr(default=None)
124
+ _async_client: AsyncClient = PrivateAttr(default=None) # type: ignore
125
125
  """
126
126
  The async client to use for making requests.
127
127
  """
128
128
 
129
- @property
130
- def _default_params(self) -> Dict[str, Any]:
131
- """Get the default parameters for calling Ollama."""
132
- return {
133
- "model": self.model,
134
- "format": self.format,
135
- "options": {
129
+ def _generate_params(
130
+ self,
131
+ prompt: str,
132
+ stop: Optional[List[str]] = None,
133
+ **kwargs: Any,
134
+ ) -> Dict[str, Any]:
135
+ if self.stop is not None and stop is not None:
136
+ raise ValueError("`stop` found in both the input and default params.")
137
+ elif self.stop is not None:
138
+ stop = self.stop
139
+
140
+ options_dict = kwargs.pop(
141
+ "options",
142
+ {
136
143
  "mirostat": self.mirostat,
137
144
  "mirostat_eta": self.mirostat_eta,
138
145
  "mirostat_tau": self.mirostat_tau,
@@ -143,14 +150,25 @@ class OllamaLLM(BaseLLM):
143
150
  "repeat_last_n": self.repeat_last_n,
144
151
  "repeat_penalty": self.repeat_penalty,
145
152
  "temperature": self.temperature,
146
- "stop": self.stop,
153
+ "stop": self.stop if stop is None else stop,
147
154
  "tfs_z": self.tfs_z,
148
155
  "top_k": self.top_k,
149
156
  "top_p": self.top_p,
150
157
  },
151
- "keep_alive": self.keep_alive,
158
+ )
159
+
160
+ params = {
161
+ "prompt": prompt,
162
+ "stream": kwargs.pop("stream", True),
163
+ "model": kwargs.pop("model", self.model),
164
+ "format": kwargs.pop("format", self.format),
165
+ "options": Options(**options_dict),
166
+ "keep_alive": kwargs.pop("keep_alive", self.keep_alive),
167
+ **kwargs,
152
168
  }
153
169
 
170
+ return params
171
+
154
172
  @property
155
173
  def _llm_type(self) -> str:
156
174
  """Return type of LLM."""
@@ -179,27 +197,10 @@ class OllamaLLM(BaseLLM):
179
197
  stop: Optional[List[str]] = None,
180
198
  **kwargs: Any,
181
199
  ) -> AsyncIterator[Union[Mapping[str, Any], str]]:
182
- if self.stop is not None and stop is not None:
183
- raise ValueError("`stop` found in both the input and default params.")
184
- elif self.stop is not None:
185
- stop = self.stop
186
-
187
- params = self._default_params
188
-
189
- for key in self._default_params:
190
- if key in kwargs:
191
- params[key] = kwargs[key]
192
-
193
- params["options"]["stop"] = stop
194
200
  async for part in await self._async_client.generate(
195
- model=params["model"],
196
- prompt=prompt,
197
- stream=True,
198
- options=Options(**params["options"]),
199
- keep_alive=params["keep_alive"],
200
- format=params["format"],
201
+ **self._generate_params(prompt, stop=stop, **kwargs)
201
202
  ): # type: ignore
202
- yield part
203
+ yield part # type: ignore
203
204
 
204
205
  def _create_generate_stream(
205
206
  self,
@@ -207,26 +208,9 @@ class OllamaLLM(BaseLLM):
207
208
  stop: Optional[List[str]] = None,
208
209
  **kwargs: Any,
209
210
  ) -> Iterator[Union[Mapping[str, Any], str]]:
210
- if self.stop is not None and stop is not None:
211
- raise ValueError("`stop` found in both the input and default params.")
212
- elif self.stop is not None:
213
- stop = self.stop
214
-
215
- params = self._default_params
216
-
217
- for key in self._default_params:
218
- if key in kwargs:
219
- params[key] = kwargs[key]
220
-
221
- params["options"]["stop"] = stop
222
211
  yield from self._client.generate(
223
- model=params["model"],
224
- prompt=prompt,
225
- stream=True,
226
- options=Options(**params["options"]),
227
- keep_alive=params["keep_alive"],
228
- format=params["format"],
229
- )
212
+ **self._generate_params(prompt, stop=stop, **kwargs)
213
+ ) # type: ignore
230
214
 
231
215
  async def _astream_with_aggregation(
232
216
  self,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langchain-ollama
3
- Version: 0.2.0.dev1
3
+ Version: 0.2.2rc1
4
4
  Summary: An integration package connecting Ollama and LangChain
5
5
  Home-page: https://github.com/langchain-ai/langchain
6
6
  License: MIT
@@ -11,7 +11,7 @@ Classifier: Programming Language :: Python :: 3.9
11
11
  Classifier: Programming Language :: Python :: 3.10
12
12
  Classifier: Programming Language :: Python :: 3.11
13
13
  Classifier: Programming Language :: Python :: 3.12
14
- Requires-Dist: langchain-core (>=0.3.0.dev4,<0.4.0)
14
+ Requires-Dist: langchain-core (>=0.3.20,<0.4.0)
15
15
  Requires-Dist: ollama (>=0.3.0,<1)
16
16
  Project-URL: Repository, https://github.com/langchain-ai/langchain
17
17
  Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-ollama%3D%3D0%22&expanded=true
@@ -0,0 +1,9 @@
1
+ langchain_ollama/__init__.py,sha256=SxPRrWcPayJpbwhheTtlqCaPp9ffiAAgZMM5Wc1yYpM,634
2
+ langchain_ollama/chat_models.py,sha256=BS28WEnDBq0aUrlOyABbcMkvIk4C-oV_Zj6bnhQoJkM,29902
3
+ langchain_ollama/embeddings.py,sha256=svqdPF44qX5qbFpZmLiXrzTC-AldmMlZRS5wBfY-EmA,5056
4
+ langchain_ollama/llms.py,sha256=ojnYU0efhN10xhUINu1dCR2Erw79J_mYS6_l45J7Vls,12778
5
+ langchain_ollama/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ langchain_ollama-0.2.2rc1.dist-info/LICENSE,sha256=2btS8uNUDWD_UNjw9ba6ZJt_00aUjEw9CGyK-xIHY8c,1072
7
+ langchain_ollama-0.2.2rc1.dist-info/METADATA,sha256=E9wttWytUkVCrJtbUjYA0nMxIt8tTkZOQZDFCU6Z_nc,1828
8
+ langchain_ollama-0.2.2rc1.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
9
+ langchain_ollama-0.2.2rc1.dist-info/RECORD,,
@@ -1,9 +0,0 @@
1
- langchain_ollama/__init__.py,sha256=HhQZqbCjhrbr2dC_9Dkw12pg4HPjnDXUoInROMNJKqA,518
2
- langchain_ollama/chat_models.py,sha256=q_URs_NzgY87XZ0RBDu-TY_seTh2lKXbtCXB7xY_utE,30423
3
- langchain_ollama/embeddings.py,sha256=46gmGxzK5Cm0GYesTSSgWupJYmJ2ywN7FQUAl0fzpxE,4991
4
- langchain_ollama/llms.py,sha256=uwQfKwDHXhWWVSAFzHpuv8SirBwKp0H4irnA8lqU0M4,13259
5
- langchain_ollama/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
- langchain_ollama-0.2.0.dev1.dist-info/LICENSE,sha256=2btS8uNUDWD_UNjw9ba6ZJt_00aUjEw9CGyK-xIHY8c,1072
7
- langchain_ollama-0.2.0.dev1.dist-info/METADATA,sha256=qI7Sy504_I0CEJJNrXBZwHTz1b_f6QMKYdjEowTETh4,1834
8
- langchain_ollama-0.2.0.dev1.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
9
- langchain_ollama-0.2.0.dev1.dist-info/RECORD,,