langchain-ollama 0.2.0.dev1__tar.gz → 0.2.2rc1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {langchain_ollama-0.2.0.dev1 → langchain_ollama-0.2.2rc1}/PKG-INFO +2 -2
- {langchain_ollama-0.2.0.dev1 → langchain_ollama-0.2.2rc1}/langchain_ollama/__init__.py +6 -0
- {langchain_ollama-0.2.0.dev1 → langchain_ollama-0.2.2rc1}/langchain_ollama/chat_models.py +57 -73
- {langchain_ollama-0.2.0.dev1 → langchain_ollama-0.2.2rc1}/langchain_ollama/embeddings.py +4 -2
- {langchain_ollama-0.2.0.dev1 → langchain_ollama-0.2.2rc1}/langchain_ollama/llms.py +33 -49
- {langchain_ollama-0.2.0.dev1 → langchain_ollama-0.2.2rc1}/pyproject.toml +18 -5
- {langchain_ollama-0.2.0.dev1 → langchain_ollama-0.2.2rc1}/LICENSE +0 -0
- {langchain_ollama-0.2.0.dev1 → langchain_ollama-0.2.2rc1}/README.md +0 -0
- {langchain_ollama-0.2.0.dev1 → langchain_ollama-0.2.2rc1}/langchain_ollama/py.typed +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: langchain-ollama
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.2rc1
|
4
4
|
Summary: An integration package connecting Ollama and LangChain
|
5
5
|
Home-page: https://github.com/langchain-ai/langchain
|
6
6
|
License: MIT
|
@@ -11,7 +11,7 @@ Classifier: Programming Language :: Python :: 3.9
|
|
11
11
|
Classifier: Programming Language :: Python :: 3.10
|
12
12
|
Classifier: Programming Language :: Python :: 3.11
|
13
13
|
Classifier: Programming Language :: Python :: 3.12
|
14
|
-
Requires-Dist: langchain-core (>=0.3.
|
14
|
+
Requires-Dist: langchain-core (>=0.3.20,<0.4.0)
|
15
15
|
Requires-Dist: ollama (>=0.3.0,<1)
|
16
16
|
Project-URL: Repository, https://github.com/langchain-ai/langchain
|
17
17
|
Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-ollama%3D%3D0%22&expanded=true
|
@@ -66,8 +66,8 @@ def _get_tool_calls_from_response(
|
|
66
66
|
"""Get tool calls from ollama response."""
|
67
67
|
tool_calls = []
|
68
68
|
if "message" in response:
|
69
|
-
if
|
70
|
-
for tc in
|
69
|
+
if raw_tool_calls := response["message"].get("tool_calls"):
|
70
|
+
for tc in raw_tool_calls:
|
71
71
|
tool_calls.append(
|
72
72
|
tool_call(
|
73
73
|
id=str(uuid4()),
|
@@ -90,9 +90,11 @@ def _lc_tool_call_to_openai_tool_call(tool_call: ToolCall) -> dict:
|
|
90
90
|
|
91
91
|
|
92
92
|
class ChatOllama(BaseChatModel):
|
93
|
-
"""Ollama chat model integration.
|
93
|
+
r"""Ollama chat model integration.
|
94
|
+
|
95
|
+
.. dropdown:: Setup
|
96
|
+
:open:
|
94
97
|
|
95
|
-
Setup:
|
96
98
|
Install ``langchain-ollama`` and download any models you want to use from ollama.
|
97
99
|
|
98
100
|
.. code-block:: bash
|
@@ -325,27 +327,36 @@ class ChatOllama(BaseChatModel):
|
|
325
327
|
"""Base url the model is hosted under."""
|
326
328
|
|
327
329
|
client_kwargs: Optional[dict] = {}
|
328
|
-
"""Additional kwargs to pass to the httpx Client.
|
330
|
+
"""Additional kwargs to pass to the httpx Client.
|
329
331
|
For a full list of the params, see [this link](https://pydoc.dev/httpx/latest/httpx.Client.html)
|
330
332
|
"""
|
331
333
|
|
332
|
-
_client: Client = PrivateAttr(default=None)
|
334
|
+
_client: Client = PrivateAttr(default=None) # type: ignore
|
333
335
|
"""
|
334
336
|
The client to use for making requests.
|
335
337
|
"""
|
336
338
|
|
337
|
-
_async_client: AsyncClient = PrivateAttr(default=None)
|
339
|
+
_async_client: AsyncClient = PrivateAttr(default=None) # type: ignore
|
338
340
|
"""
|
339
341
|
The async client to use for making requests.
|
340
342
|
"""
|
341
343
|
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
344
|
+
def _chat_params(
|
345
|
+
self,
|
346
|
+
messages: List[BaseMessage],
|
347
|
+
stop: Optional[List[str]] = None,
|
348
|
+
**kwargs: Any,
|
349
|
+
) -> Dict[str, Any]:
|
350
|
+
ollama_messages = self._convert_messages_to_ollama_messages(messages)
|
351
|
+
|
352
|
+
if self.stop is not None and stop is not None:
|
353
|
+
raise ValueError("`stop` found in both the input and default params.")
|
354
|
+
elif self.stop is not None:
|
355
|
+
stop = self.stop
|
356
|
+
|
357
|
+
options_dict = kwargs.pop(
|
358
|
+
"options",
|
359
|
+
{
|
349
360
|
"mirostat": self.mirostat,
|
350
361
|
"mirostat_eta": self.mirostat_eta,
|
351
362
|
"mirostat_tau": self.mirostat_tau,
|
@@ -357,14 +368,31 @@ class ChatOllama(BaseChatModel):
|
|
357
368
|
"repeat_penalty": self.repeat_penalty,
|
358
369
|
"temperature": self.temperature,
|
359
370
|
"seed": self.seed,
|
360
|
-
"stop": self.stop,
|
371
|
+
"stop": self.stop if stop is None else stop,
|
361
372
|
"tfs_z": self.tfs_z,
|
362
373
|
"top_k": self.top_k,
|
363
374
|
"top_p": self.top_p,
|
364
375
|
},
|
365
|
-
|
376
|
+
)
|
377
|
+
|
378
|
+
tools = kwargs.get("tools")
|
379
|
+
default_stream = not bool(tools)
|
380
|
+
|
381
|
+
params = {
|
382
|
+
"messages": ollama_messages,
|
383
|
+
"stream": kwargs.pop("stream", default_stream),
|
384
|
+
"model": kwargs.pop("model", self.model),
|
385
|
+
"format": kwargs.pop("format", self.format),
|
386
|
+
"options": Options(**options_dict),
|
387
|
+
"keep_alive": kwargs.pop("keep_alive", self.keep_alive),
|
388
|
+
**kwargs,
|
366
389
|
}
|
367
390
|
|
391
|
+
if tools:
|
392
|
+
params["tools"] = tools
|
393
|
+
|
394
|
+
return params
|
395
|
+
|
368
396
|
@model_validator(mode="after")
|
369
397
|
def _set_clients(self) -> Self:
|
370
398
|
"""Set clients to use for ollama."""
|
@@ -462,37 +490,13 @@ class ChatOllama(BaseChatModel):
|
|
462
490
|
stop: Optional[List[str]] = None,
|
463
491
|
**kwargs: Any,
|
464
492
|
) -> AsyncIterator[Union[Mapping[str, Any], str]]:
|
465
|
-
|
493
|
+
chat_params = self._chat_params(messages, stop, **kwargs)
|
466
494
|
|
467
|
-
|
468
|
-
|
469
|
-
params = self._default_params
|
470
|
-
|
471
|
-
for key in self._default_params:
|
472
|
-
if key in kwargs:
|
473
|
-
params[key] = kwargs[key]
|
474
|
-
|
475
|
-
params["options"]["stop"] = stop
|
476
|
-
if "tools" in kwargs:
|
477
|
-
yield await self._async_client.chat(
|
478
|
-
model=params["model"],
|
479
|
-
messages=ollama_messages,
|
480
|
-
stream=False,
|
481
|
-
options=Options(**params["options"]),
|
482
|
-
keep_alive=params["keep_alive"],
|
483
|
-
format=params["format"],
|
484
|
-
tools=kwargs["tools"],
|
485
|
-
) # type:ignore
|
486
|
-
else:
|
487
|
-
async for part in await self._async_client.chat(
|
488
|
-
model=params["model"],
|
489
|
-
messages=ollama_messages,
|
490
|
-
stream=True,
|
491
|
-
options=Options(**params["options"]),
|
492
|
-
keep_alive=params["keep_alive"],
|
493
|
-
format=params["format"],
|
494
|
-
): # type:ignore
|
495
|
+
if chat_params["stream"]:
|
496
|
+
async for part in await self._async_client.chat(**chat_params):
|
495
497
|
yield part
|
498
|
+
else:
|
499
|
+
yield await self._async_client.chat(**chat_params)
|
496
500
|
|
497
501
|
def _create_chat_stream(
|
498
502
|
self,
|
@@ -500,36 +504,12 @@ class ChatOllama(BaseChatModel):
|
|
500
504
|
stop: Optional[List[str]] = None,
|
501
505
|
**kwargs: Any,
|
502
506
|
) -> Iterator[Union[Mapping[str, Any], str]]:
|
503
|
-
|
507
|
+
chat_params = self._chat_params(messages, stop, **kwargs)
|
504
508
|
|
505
|
-
|
506
|
-
|
507
|
-
params = self._default_params
|
508
|
-
|
509
|
-
for key in self._default_params:
|
510
|
-
if key in kwargs:
|
511
|
-
params[key] = kwargs[key]
|
512
|
-
|
513
|
-
params["options"]["stop"] = stop
|
514
|
-
if "tools" in kwargs:
|
515
|
-
yield self._client.chat(
|
516
|
-
model=params["model"],
|
517
|
-
messages=ollama_messages,
|
518
|
-
stream=False,
|
519
|
-
options=Options(**params["options"]),
|
520
|
-
keep_alive=params["keep_alive"],
|
521
|
-
format=params["format"],
|
522
|
-
tools=kwargs["tools"],
|
523
|
-
)
|
509
|
+
if chat_params["stream"]:
|
510
|
+
yield from self._client.chat(**chat_params)
|
524
511
|
else:
|
525
|
-
yield
|
526
|
-
model=params["model"],
|
527
|
-
messages=ollama_messages,
|
528
|
-
stream=True,
|
529
|
-
options=Options(**params["options"]),
|
530
|
-
keep_alive=params["keep_alive"],
|
531
|
-
format=params["format"],
|
532
|
-
)
|
512
|
+
yield self._client.chat(**chat_params)
|
533
513
|
|
534
514
|
def _chat_stream_with_aggregation(
|
535
515
|
self,
|
@@ -748,6 +728,8 @@ class ChatOllama(BaseChatModel):
|
|
748
728
|
def bind_tools(
|
749
729
|
self,
|
750
730
|
tools: Sequence[Union[Dict[str, Any], Type, Callable, BaseTool]],
|
731
|
+
*,
|
732
|
+
tool_choice: Optional[Union[dict, str, Literal["auto", "any"], bool]] = None,
|
751
733
|
**kwargs: Any,
|
752
734
|
) -> Runnable[LanguageModelInput, BaseMessage]:
|
753
735
|
"""Bind tool-like objects to this chat model.
|
@@ -758,6 +740,8 @@ class ChatOllama(BaseChatModel):
|
|
758
740
|
tools: A list of tool definitions to bind to this chat model.
|
759
741
|
Supports any tool definition handled by
|
760
742
|
:meth:`langchain_core.utils.function_calling.convert_to_openai_tool`.
|
743
|
+
tool_choice: If provided, which tool for model to call. **This parameter
|
744
|
+
is currently ignored as it is not supported by Ollama.**
|
761
745
|
kwargs: Any additional parameters are passed directly to
|
762
746
|
``self.bind(**kwargs)``.
|
763
747
|
""" # noqa: E501
|
@@ -1,3 +1,5 @@
|
|
1
|
+
"""Ollama embeddings models."""
|
2
|
+
|
1
3
|
from typing import (
|
2
4
|
List,
|
3
5
|
Optional,
|
@@ -132,12 +134,12 @@ class OllamaEmbeddings(BaseModel, Embeddings):
|
|
132
134
|
For a full list of the params, see [this link](https://pydoc.dev/httpx/latest/httpx.Client.html)
|
133
135
|
"""
|
134
136
|
|
135
|
-
_client: Client = PrivateAttr(default=None)
|
137
|
+
_client: Client = PrivateAttr(default=None) # type: ignore
|
136
138
|
"""
|
137
139
|
The client to use for making requests.
|
138
140
|
"""
|
139
141
|
|
140
|
-
_async_client: AsyncClient = PrivateAttr(default=None)
|
142
|
+
_async_client: AsyncClient = PrivateAttr(default=None) # type: ignore
|
141
143
|
"""
|
142
144
|
The async client to use for making requests.
|
143
145
|
"""
|
@@ -116,23 +116,30 @@ class OllamaLLM(BaseLLM):
|
|
116
116
|
For a full list of the params, see [this link](https://pydoc.dev/httpx/latest/httpx.Client.html)
|
117
117
|
"""
|
118
118
|
|
119
|
-
_client: Client = PrivateAttr(default=None)
|
119
|
+
_client: Client = PrivateAttr(default=None) # type: ignore
|
120
120
|
"""
|
121
121
|
The client to use for making requests.
|
122
122
|
"""
|
123
123
|
|
124
|
-
_async_client: AsyncClient = PrivateAttr(default=None)
|
124
|
+
_async_client: AsyncClient = PrivateAttr(default=None) # type: ignore
|
125
125
|
"""
|
126
126
|
The async client to use for making requests.
|
127
127
|
"""
|
128
128
|
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
129
|
+
def _generate_params(
|
130
|
+
self,
|
131
|
+
prompt: str,
|
132
|
+
stop: Optional[List[str]] = None,
|
133
|
+
**kwargs: Any,
|
134
|
+
) -> Dict[str, Any]:
|
135
|
+
if self.stop is not None and stop is not None:
|
136
|
+
raise ValueError("`stop` found in both the input and default params.")
|
137
|
+
elif self.stop is not None:
|
138
|
+
stop = self.stop
|
139
|
+
|
140
|
+
options_dict = kwargs.pop(
|
141
|
+
"options",
|
142
|
+
{
|
136
143
|
"mirostat": self.mirostat,
|
137
144
|
"mirostat_eta": self.mirostat_eta,
|
138
145
|
"mirostat_tau": self.mirostat_tau,
|
@@ -143,14 +150,25 @@ class OllamaLLM(BaseLLM):
|
|
143
150
|
"repeat_last_n": self.repeat_last_n,
|
144
151
|
"repeat_penalty": self.repeat_penalty,
|
145
152
|
"temperature": self.temperature,
|
146
|
-
"stop": self.stop,
|
153
|
+
"stop": self.stop if stop is None else stop,
|
147
154
|
"tfs_z": self.tfs_z,
|
148
155
|
"top_k": self.top_k,
|
149
156
|
"top_p": self.top_p,
|
150
157
|
},
|
151
|
-
|
158
|
+
)
|
159
|
+
|
160
|
+
params = {
|
161
|
+
"prompt": prompt,
|
162
|
+
"stream": kwargs.pop("stream", True),
|
163
|
+
"model": kwargs.pop("model", self.model),
|
164
|
+
"format": kwargs.pop("format", self.format),
|
165
|
+
"options": Options(**options_dict),
|
166
|
+
"keep_alive": kwargs.pop("keep_alive", self.keep_alive),
|
167
|
+
**kwargs,
|
152
168
|
}
|
153
169
|
|
170
|
+
return params
|
171
|
+
|
154
172
|
@property
|
155
173
|
def _llm_type(self) -> str:
|
156
174
|
"""Return type of LLM."""
|
@@ -179,27 +197,10 @@ class OllamaLLM(BaseLLM):
|
|
179
197
|
stop: Optional[List[str]] = None,
|
180
198
|
**kwargs: Any,
|
181
199
|
) -> AsyncIterator[Union[Mapping[str, Any], str]]:
|
182
|
-
if self.stop is not None and stop is not None:
|
183
|
-
raise ValueError("`stop` found in both the input and default params.")
|
184
|
-
elif self.stop is not None:
|
185
|
-
stop = self.stop
|
186
|
-
|
187
|
-
params = self._default_params
|
188
|
-
|
189
|
-
for key in self._default_params:
|
190
|
-
if key in kwargs:
|
191
|
-
params[key] = kwargs[key]
|
192
|
-
|
193
|
-
params["options"]["stop"] = stop
|
194
200
|
async for part in await self._async_client.generate(
|
195
|
-
|
196
|
-
prompt=prompt,
|
197
|
-
stream=True,
|
198
|
-
options=Options(**params["options"]),
|
199
|
-
keep_alive=params["keep_alive"],
|
200
|
-
format=params["format"],
|
201
|
+
**self._generate_params(prompt, stop=stop, **kwargs)
|
201
202
|
): # type: ignore
|
202
|
-
yield part
|
203
|
+
yield part # type: ignore
|
203
204
|
|
204
205
|
def _create_generate_stream(
|
205
206
|
self,
|
@@ -207,26 +208,9 @@ class OllamaLLM(BaseLLM):
|
|
207
208
|
stop: Optional[List[str]] = None,
|
208
209
|
**kwargs: Any,
|
209
210
|
) -> Iterator[Union[Mapping[str, Any], str]]:
|
210
|
-
if self.stop is not None and stop is not None:
|
211
|
-
raise ValueError("`stop` found in both the input and default params.")
|
212
|
-
elif self.stop is not None:
|
213
|
-
stop = self.stop
|
214
|
-
|
215
|
-
params = self._default_params
|
216
|
-
|
217
|
-
for key in self._default_params:
|
218
|
-
if key in kwargs:
|
219
|
-
params[key] = kwargs[key]
|
220
|
-
|
221
|
-
params["options"]["stop"] = stop
|
222
211
|
yield from self._client.generate(
|
223
|
-
|
224
|
-
|
225
|
-
stream=True,
|
226
|
-
options=Options(**params["options"]),
|
227
|
-
keep_alive=params["keep_alive"],
|
228
|
-
format=params["format"],
|
229
|
-
)
|
212
|
+
**self._generate_params(prompt, stop=stop, **kwargs)
|
213
|
+
) # type: ignore
|
230
214
|
|
231
215
|
async def _astream_with_aggregation(
|
232
216
|
self,
|
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
|
|
4
4
|
|
5
5
|
[tool.poetry]
|
6
6
|
name = "langchain-ollama"
|
7
|
-
version = "0.2.
|
7
|
+
version = "0.2.2rc1"
|
8
8
|
description = "An integration package connecting Ollama and LangChain"
|
9
9
|
authors = []
|
10
10
|
readme = "README.md"
|
@@ -21,10 +21,23 @@ disallow_untyped_defs = "True"
|
|
21
21
|
[tool.poetry.dependencies]
|
22
22
|
python = ">=3.9,<4.0"
|
23
23
|
ollama = ">=0.3.0,<1"
|
24
|
-
langchain-core =
|
24
|
+
langchain-core = "^0.3.20"
|
25
25
|
|
26
26
|
[tool.ruff.lint]
|
27
|
-
select = [
|
27
|
+
select = [
|
28
|
+
"E", # pycodestyle
|
29
|
+
"F", # pyflakes
|
30
|
+
"I", # isort
|
31
|
+
"T201", # print
|
32
|
+
"D", # pydocstyle
|
33
|
+
|
34
|
+
]
|
35
|
+
|
36
|
+
[tool.ruff.lint.pydocstyle]
|
37
|
+
convention = "google"
|
38
|
+
|
39
|
+
[tool.ruff.lint.per-file-ignores]
|
40
|
+
"tests/**" = ["D"] # ignore docstring checks for tests
|
28
41
|
|
29
42
|
[tool.coverage.run]
|
30
43
|
omit = ["tests/*"]
|
@@ -32,7 +45,7 @@ omit = ["tests/*"]
|
|
32
45
|
[tool.pytest.ini_options]
|
33
46
|
addopts = "--snapshot-warn-unused --strict-markers --strict-config --durations=5"
|
34
47
|
markers = [
|
35
|
-
|
48
|
+
"compile: mark placeholder test used to compile integration tests without running them",
|
36
49
|
]
|
37
50
|
asyncio_mode = "auto"
|
38
51
|
|
@@ -73,7 +86,7 @@ mypy = "^1.7.1"
|
|
73
86
|
path = "../../core"
|
74
87
|
develop = true
|
75
88
|
|
76
|
-
[tool.poetry.group.test.dependencies.langchain-
|
89
|
+
[tool.poetry.group.test.dependencies.langchain-tests]
|
77
90
|
path = "../../standard-tests"
|
78
91
|
develop = true
|
79
92
|
|
File without changes
|
File without changes
|
File without changes
|