langchain-ollama 0.2.0__py3-none-any.whl → 0.2.2rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,3 +1,9 @@
1
+ """This is the langchain_ollama package.
2
+
3
+ It provides infrastructure for interacting with the Ollama service.
4
+ """
5
+
6
+
1
7
  from importlib import metadata
2
8
 
3
9
  from langchain_ollama.chat_models import ChatOllama
@@ -66,8 +66,8 @@ def _get_tool_calls_from_response(
66
66
  """Get tool calls from ollama response."""
67
67
  tool_calls = []
68
68
  if "message" in response:
69
- if "tool_calls" in response["message"]:
70
- for tc in response["message"]["tool_calls"]:
69
+ if raw_tool_calls := response["message"].get("tool_calls"):
70
+ for tc in raw_tool_calls:
71
71
  tool_calls.append(
72
72
  tool_call(
73
73
  id=str(uuid4()),
@@ -90,7 +90,7 @@ def _lc_tool_call_to_openai_tool_call(tool_call: ToolCall) -> dict:
90
90
 
91
91
 
92
92
  class ChatOllama(BaseChatModel):
93
- """Ollama chat model integration.
93
+ r"""Ollama chat model integration.
94
94
 
95
95
  .. dropdown:: Setup
96
96
  :open:
@@ -327,27 +327,36 @@ class ChatOllama(BaseChatModel):
327
327
  """Base url the model is hosted under."""
328
328
 
329
329
  client_kwargs: Optional[dict] = {}
330
- """Additional kwargs to pass to the httpx Client.
330
+ """Additional kwargs to pass to the httpx Client.
331
331
  For a full list of the params, see [this link](https://pydoc.dev/httpx/latest/httpx.Client.html)
332
332
  """
333
333
 
334
- _client: Client = PrivateAttr(default=None)
334
+ _client: Client = PrivateAttr(default=None) # type: ignore
335
335
  """
336
336
  The client to use for making requests.
337
337
  """
338
338
 
339
- _async_client: AsyncClient = PrivateAttr(default=None)
339
+ _async_client: AsyncClient = PrivateAttr(default=None) # type: ignore
340
340
  """
341
341
  The async client to use for making requests.
342
342
  """
343
343
 
344
- @property
345
- def _default_params(self) -> Dict[str, Any]:
346
- """Get the default parameters for calling Ollama."""
347
- return {
348
- "model": self.model,
349
- "format": self.format,
350
- "options": {
344
+ def _chat_params(
345
+ self,
346
+ messages: List[BaseMessage],
347
+ stop: Optional[List[str]] = None,
348
+ **kwargs: Any,
349
+ ) -> Dict[str, Any]:
350
+ ollama_messages = self._convert_messages_to_ollama_messages(messages)
351
+
352
+ if self.stop is not None and stop is not None:
353
+ raise ValueError("`stop` found in both the input and default params.")
354
+ elif self.stop is not None:
355
+ stop = self.stop
356
+
357
+ options_dict = kwargs.pop(
358
+ "options",
359
+ {
351
360
  "mirostat": self.mirostat,
352
361
  "mirostat_eta": self.mirostat_eta,
353
362
  "mirostat_tau": self.mirostat_tau,
@@ -359,14 +368,31 @@ class ChatOllama(BaseChatModel):
359
368
  "repeat_penalty": self.repeat_penalty,
360
369
  "temperature": self.temperature,
361
370
  "seed": self.seed,
362
- "stop": self.stop,
371
+ "stop": self.stop if stop is None else stop,
363
372
  "tfs_z": self.tfs_z,
364
373
  "top_k": self.top_k,
365
374
  "top_p": self.top_p,
366
375
  },
367
- "keep_alive": self.keep_alive,
376
+ )
377
+
378
+ tools = kwargs.get("tools")
379
+ default_stream = not bool(tools)
380
+
381
+ params = {
382
+ "messages": ollama_messages,
383
+ "stream": kwargs.pop("stream", default_stream),
384
+ "model": kwargs.pop("model", self.model),
385
+ "format": kwargs.pop("format", self.format),
386
+ "options": Options(**options_dict),
387
+ "keep_alive": kwargs.pop("keep_alive", self.keep_alive),
388
+ **kwargs,
368
389
  }
369
390
 
391
+ if tools:
392
+ params["tools"] = tools
393
+
394
+ return params
395
+
370
396
  @model_validator(mode="after")
371
397
  def _set_clients(self) -> Self:
372
398
  """Set clients to use for ollama."""
@@ -464,37 +490,13 @@ class ChatOllama(BaseChatModel):
464
490
  stop: Optional[List[str]] = None,
465
491
  **kwargs: Any,
466
492
  ) -> AsyncIterator[Union[Mapping[str, Any], str]]:
467
- ollama_messages = self._convert_messages_to_ollama_messages(messages)
493
+ chat_params = self._chat_params(messages, stop, **kwargs)
468
494
 
469
- stop = stop if stop is not None else self.stop
470
-
471
- params = self._default_params
472
-
473
- for key in self._default_params:
474
- if key in kwargs:
475
- params[key] = kwargs[key]
476
-
477
- params["options"]["stop"] = stop
478
- if "tools" in kwargs:
479
- yield await self._async_client.chat(
480
- model=params["model"],
481
- messages=ollama_messages,
482
- stream=False,
483
- options=Options(**params["options"]),
484
- keep_alive=params["keep_alive"],
485
- format=params["format"],
486
- tools=kwargs["tools"],
487
- ) # type:ignore
488
- else:
489
- async for part in await self._async_client.chat(
490
- model=params["model"],
491
- messages=ollama_messages,
492
- stream=True,
493
- options=Options(**params["options"]),
494
- keep_alive=params["keep_alive"],
495
- format=params["format"],
496
- ): # type:ignore
495
+ if chat_params["stream"]:
496
+ async for part in await self._async_client.chat(**chat_params):
497
497
  yield part
498
+ else:
499
+ yield await self._async_client.chat(**chat_params)
498
500
 
499
501
  def _create_chat_stream(
500
502
  self,
@@ -502,36 +504,12 @@ class ChatOllama(BaseChatModel):
502
504
  stop: Optional[List[str]] = None,
503
505
  **kwargs: Any,
504
506
  ) -> Iterator[Union[Mapping[str, Any], str]]:
505
- ollama_messages = self._convert_messages_to_ollama_messages(messages)
507
+ chat_params = self._chat_params(messages, stop, **kwargs)
506
508
 
507
- stop = stop if stop is not None else self.stop
508
-
509
- params = self._default_params
510
-
511
- for key in self._default_params:
512
- if key in kwargs:
513
- params[key] = kwargs[key]
514
-
515
- params["options"]["stop"] = stop
516
- if "tools" in kwargs:
517
- yield self._client.chat(
518
- model=params["model"],
519
- messages=ollama_messages,
520
- stream=False,
521
- options=Options(**params["options"]),
522
- keep_alive=params["keep_alive"],
523
- format=params["format"],
524
- tools=kwargs["tools"],
525
- )
509
+ if chat_params["stream"]:
510
+ yield from self._client.chat(**chat_params)
526
511
  else:
527
- yield from self._client.chat(
528
- model=params["model"],
529
- messages=ollama_messages,
530
- stream=True,
531
- options=Options(**params["options"]),
532
- keep_alive=params["keep_alive"],
533
- format=params["format"],
534
- )
512
+ yield self._client.chat(**chat_params)
535
513
 
536
514
  def _chat_stream_with_aggregation(
537
515
  self,
@@ -750,6 +728,8 @@ class ChatOllama(BaseChatModel):
750
728
  def bind_tools(
751
729
  self,
752
730
  tools: Sequence[Union[Dict[str, Any], Type, Callable, BaseTool]],
731
+ *,
732
+ tool_choice: Optional[Union[dict, str, Literal["auto", "any"], bool]] = None,
753
733
  **kwargs: Any,
754
734
  ) -> Runnable[LanguageModelInput, BaseMessage]:
755
735
  """Bind tool-like objects to this chat model.
@@ -760,6 +740,8 @@ class ChatOllama(BaseChatModel):
760
740
  tools: A list of tool definitions to bind to this chat model.
761
741
  Supports any tool definition handled by
762
742
  :meth:`langchain_core.utils.function_calling.convert_to_openai_tool`.
743
+ tool_choice: If provided, which tool for model to call. **This parameter
744
+ is currently ignored as it is not supported by Ollama.**
763
745
  kwargs: Any additional parameters are passed directly to
764
746
  ``self.bind(**kwargs)``.
765
747
  """ # noqa: E501
@@ -1,3 +1,5 @@
1
+ """Ollama embeddings models."""
2
+
1
3
  from typing import (
2
4
  List,
3
5
  Optional,
@@ -132,12 +134,12 @@ class OllamaEmbeddings(BaseModel, Embeddings):
132
134
  For a full list of the params, see [this link](https://pydoc.dev/httpx/latest/httpx.Client.html)
133
135
  """
134
136
 
135
- _client: Client = PrivateAttr(default=None)
137
+ _client: Client = PrivateAttr(default=None) # type: ignore
136
138
  """
137
139
  The client to use for making requests.
138
140
  """
139
141
 
140
- _async_client: AsyncClient = PrivateAttr(default=None)
142
+ _async_client: AsyncClient = PrivateAttr(default=None) # type: ignore
141
143
  """
142
144
  The async client to use for making requests.
143
145
  """
langchain_ollama/llms.py CHANGED
@@ -116,23 +116,30 @@ class OllamaLLM(BaseLLM):
116
116
  For a full list of the params, see [this link](https://pydoc.dev/httpx/latest/httpx.Client.html)
117
117
  """
118
118
 
119
- _client: Client = PrivateAttr(default=None)
119
+ _client: Client = PrivateAttr(default=None) # type: ignore
120
120
  """
121
121
  The client to use for making requests.
122
122
  """
123
123
 
124
- _async_client: AsyncClient = PrivateAttr(default=None)
124
+ _async_client: AsyncClient = PrivateAttr(default=None) # type: ignore
125
125
  """
126
126
  The async client to use for making requests.
127
127
  """
128
128
 
129
- @property
130
- def _default_params(self) -> Dict[str, Any]:
131
- """Get the default parameters for calling Ollama."""
132
- return {
133
- "model": self.model,
134
- "format": self.format,
135
- "options": {
129
+ def _generate_params(
130
+ self,
131
+ prompt: str,
132
+ stop: Optional[List[str]] = None,
133
+ **kwargs: Any,
134
+ ) -> Dict[str, Any]:
135
+ if self.stop is not None and stop is not None:
136
+ raise ValueError("`stop` found in both the input and default params.")
137
+ elif self.stop is not None:
138
+ stop = self.stop
139
+
140
+ options_dict = kwargs.pop(
141
+ "options",
142
+ {
136
143
  "mirostat": self.mirostat,
137
144
  "mirostat_eta": self.mirostat_eta,
138
145
  "mirostat_tau": self.mirostat_tau,
@@ -143,14 +150,25 @@ class OllamaLLM(BaseLLM):
143
150
  "repeat_last_n": self.repeat_last_n,
144
151
  "repeat_penalty": self.repeat_penalty,
145
152
  "temperature": self.temperature,
146
- "stop": self.stop,
153
+ "stop": self.stop if stop is None else stop,
147
154
  "tfs_z": self.tfs_z,
148
155
  "top_k": self.top_k,
149
156
  "top_p": self.top_p,
150
157
  },
151
- "keep_alive": self.keep_alive,
158
+ )
159
+
160
+ params = {
161
+ "prompt": prompt,
162
+ "stream": kwargs.pop("stream", True),
163
+ "model": kwargs.pop("model", self.model),
164
+ "format": kwargs.pop("format", self.format),
165
+ "options": Options(**options_dict),
166
+ "keep_alive": kwargs.pop("keep_alive", self.keep_alive),
167
+ **kwargs,
152
168
  }
153
169
 
170
+ return params
171
+
154
172
  @property
155
173
  def _llm_type(self) -> str:
156
174
  """Return type of LLM."""
@@ -179,27 +197,10 @@ class OllamaLLM(BaseLLM):
179
197
  stop: Optional[List[str]] = None,
180
198
  **kwargs: Any,
181
199
  ) -> AsyncIterator[Union[Mapping[str, Any], str]]:
182
- if self.stop is not None and stop is not None:
183
- raise ValueError("`stop` found in both the input and default params.")
184
- elif self.stop is not None:
185
- stop = self.stop
186
-
187
- params = self._default_params
188
-
189
- for key in self._default_params:
190
- if key in kwargs:
191
- params[key] = kwargs[key]
192
-
193
- params["options"]["stop"] = stop
194
200
  async for part in await self._async_client.generate(
195
- model=params["model"],
196
- prompt=prompt,
197
- stream=True,
198
- options=Options(**params["options"]),
199
- keep_alive=params["keep_alive"],
200
- format=params["format"],
201
+ **self._generate_params(prompt, stop=stop, **kwargs)
201
202
  ): # type: ignore
202
- yield part
203
+ yield part # type: ignore
203
204
 
204
205
  def _create_generate_stream(
205
206
  self,
@@ -207,26 +208,9 @@ class OllamaLLM(BaseLLM):
207
208
  stop: Optional[List[str]] = None,
208
209
  **kwargs: Any,
209
210
  ) -> Iterator[Union[Mapping[str, Any], str]]:
210
- if self.stop is not None and stop is not None:
211
- raise ValueError("`stop` found in both the input and default params.")
212
- elif self.stop is not None:
213
- stop = self.stop
214
-
215
- params = self._default_params
216
-
217
- for key in self._default_params:
218
- if key in kwargs:
219
- params[key] = kwargs[key]
220
-
221
- params["options"]["stop"] = stop
222
211
  yield from self._client.generate(
223
- model=params["model"],
224
- prompt=prompt,
225
- stream=True,
226
- options=Options(**params["options"]),
227
- keep_alive=params["keep_alive"],
228
- format=params["format"],
229
- )
212
+ **self._generate_params(prompt, stop=stop, **kwargs)
213
+ ) # type: ignore
230
214
 
231
215
  async def _astream_with_aggregation(
232
216
  self,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langchain-ollama
3
- Version: 0.2.0
3
+ Version: 0.2.2rc1
4
4
  Summary: An integration package connecting Ollama and LangChain
5
5
  Home-page: https://github.com/langchain-ai/langchain
6
6
  License: MIT
@@ -11,7 +11,7 @@ Classifier: Programming Language :: Python :: 3.9
11
11
  Classifier: Programming Language :: Python :: 3.10
12
12
  Classifier: Programming Language :: Python :: 3.11
13
13
  Classifier: Programming Language :: Python :: 3.12
14
- Requires-Dist: langchain-core (>=0.3.0,<0.4.0)
14
+ Requires-Dist: langchain-core (>=0.3.20,<0.4.0)
15
15
  Requires-Dist: ollama (>=0.3.0,<1)
16
16
  Project-URL: Repository, https://github.com/langchain-ai/langchain
17
17
  Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-ollama%3D%3D0%22&expanded=true
@@ -0,0 +1,9 @@
1
+ langchain_ollama/__init__.py,sha256=SxPRrWcPayJpbwhheTtlqCaPp9ffiAAgZMM5Wc1yYpM,634
2
+ langchain_ollama/chat_models.py,sha256=BS28WEnDBq0aUrlOyABbcMkvIk4C-oV_Zj6bnhQoJkM,29902
3
+ langchain_ollama/embeddings.py,sha256=svqdPF44qX5qbFpZmLiXrzTC-AldmMlZRS5wBfY-EmA,5056
4
+ langchain_ollama/llms.py,sha256=ojnYU0efhN10xhUINu1dCR2Erw79J_mYS6_l45J7Vls,12778
5
+ langchain_ollama/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ langchain_ollama-0.2.2rc1.dist-info/LICENSE,sha256=2btS8uNUDWD_UNjw9ba6ZJt_00aUjEw9CGyK-xIHY8c,1072
7
+ langchain_ollama-0.2.2rc1.dist-info/METADATA,sha256=E9wttWytUkVCrJtbUjYA0nMxIt8tTkZOQZDFCU6Z_nc,1828
8
+ langchain_ollama-0.2.2rc1.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
9
+ langchain_ollama-0.2.2rc1.dist-info/RECORD,,
@@ -1,9 +0,0 @@
1
- langchain_ollama/__init__.py,sha256=HhQZqbCjhrbr2dC_9Dkw12pg4HPjnDXUoInROMNJKqA,518
2
- langchain_ollama/chat_models.py,sha256=lfpm1D4YM_VjGegHq6JJws9nIzIp-QtX57VZvT8GC4I,30452
3
- langchain_ollama/embeddings.py,sha256=46gmGxzK5Cm0GYesTSSgWupJYmJ2ywN7FQUAl0fzpxE,4991
4
- langchain_ollama/llms.py,sha256=uwQfKwDHXhWWVSAFzHpuv8SirBwKp0H4irnA8lqU0M4,13259
5
- langchain_ollama/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
- langchain_ollama-0.2.0.dist-info/LICENSE,sha256=2btS8uNUDWD_UNjw9ba6ZJt_00aUjEw9CGyK-xIHY8c,1072
7
- langchain_ollama-0.2.0.dist-info/METADATA,sha256=slqxbRBWofN8p4ewKoKh7hljZqey_qhjr3zrYwDgD0g,1824
8
- langchain_ollama-0.2.0.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
9
- langchain_ollama-0.2.0.dist-info/RECORD,,