langchain-ollama 0.2.0__tar.gz → 0.2.2rc1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {langchain_ollama-0.2.0 → langchain_ollama-0.2.2rc1}/PKG-INFO +2 -2
- {langchain_ollama-0.2.0 → langchain_ollama-0.2.2rc1}/langchain_ollama/__init__.py +6 -0
- {langchain_ollama-0.2.0 → langchain_ollama-0.2.2rc1}/langchain_ollama/chat_models.py +54 -72
- {langchain_ollama-0.2.0 → langchain_ollama-0.2.2rc1}/langchain_ollama/embeddings.py +4 -2
- {langchain_ollama-0.2.0 → langchain_ollama-0.2.2rc1}/langchain_ollama/llms.py +33 -49
- {langchain_ollama-0.2.0 → langchain_ollama-0.2.2rc1}/pyproject.toml +18 -5
- {langchain_ollama-0.2.0 → langchain_ollama-0.2.2rc1}/LICENSE +0 -0
- {langchain_ollama-0.2.0 → langchain_ollama-0.2.2rc1}/README.md +0 -0
- {langchain_ollama-0.2.0 → langchain_ollama-0.2.2rc1}/langchain_ollama/py.typed +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: langchain-ollama
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.2rc1
|
4
4
|
Summary: An integration package connecting Ollama and LangChain
|
5
5
|
Home-page: https://github.com/langchain-ai/langchain
|
6
6
|
License: MIT
|
@@ -11,7 +11,7 @@ Classifier: Programming Language :: Python :: 3.9
|
|
11
11
|
Classifier: Programming Language :: Python :: 3.10
|
12
12
|
Classifier: Programming Language :: Python :: 3.11
|
13
13
|
Classifier: Programming Language :: Python :: 3.12
|
14
|
-
Requires-Dist: langchain-core (>=0.3.
|
14
|
+
Requires-Dist: langchain-core (>=0.3.20,<0.4.0)
|
15
15
|
Requires-Dist: ollama (>=0.3.0,<1)
|
16
16
|
Project-URL: Repository, https://github.com/langchain-ai/langchain
|
17
17
|
Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-ollama%3D%3D0%22&expanded=true
|
@@ -66,8 +66,8 @@ def _get_tool_calls_from_response(
|
|
66
66
|
"""Get tool calls from ollama response."""
|
67
67
|
tool_calls = []
|
68
68
|
if "message" in response:
|
69
|
-
if
|
70
|
-
for tc in
|
69
|
+
if raw_tool_calls := response["message"].get("tool_calls"):
|
70
|
+
for tc in raw_tool_calls:
|
71
71
|
tool_calls.append(
|
72
72
|
tool_call(
|
73
73
|
id=str(uuid4()),
|
@@ -90,7 +90,7 @@ def _lc_tool_call_to_openai_tool_call(tool_call: ToolCall) -> dict:
|
|
90
90
|
|
91
91
|
|
92
92
|
class ChatOllama(BaseChatModel):
|
93
|
-
"""Ollama chat model integration.
|
93
|
+
r"""Ollama chat model integration.
|
94
94
|
|
95
95
|
.. dropdown:: Setup
|
96
96
|
:open:
|
@@ -327,27 +327,36 @@ class ChatOllama(BaseChatModel):
|
|
327
327
|
"""Base url the model is hosted under."""
|
328
328
|
|
329
329
|
client_kwargs: Optional[dict] = {}
|
330
|
-
"""Additional kwargs to pass to the httpx Client.
|
330
|
+
"""Additional kwargs to pass to the httpx Client.
|
331
331
|
For a full list of the params, see [this link](https://pydoc.dev/httpx/latest/httpx.Client.html)
|
332
332
|
"""
|
333
333
|
|
334
|
-
_client: Client = PrivateAttr(default=None)
|
334
|
+
_client: Client = PrivateAttr(default=None) # type: ignore
|
335
335
|
"""
|
336
336
|
The client to use for making requests.
|
337
337
|
"""
|
338
338
|
|
339
|
-
_async_client: AsyncClient = PrivateAttr(default=None)
|
339
|
+
_async_client: AsyncClient = PrivateAttr(default=None) # type: ignore
|
340
340
|
"""
|
341
341
|
The async client to use for making requests.
|
342
342
|
"""
|
343
343
|
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
|
350
|
-
|
344
|
+
def _chat_params(
|
345
|
+
self,
|
346
|
+
messages: List[BaseMessage],
|
347
|
+
stop: Optional[List[str]] = None,
|
348
|
+
**kwargs: Any,
|
349
|
+
) -> Dict[str, Any]:
|
350
|
+
ollama_messages = self._convert_messages_to_ollama_messages(messages)
|
351
|
+
|
352
|
+
if self.stop is not None and stop is not None:
|
353
|
+
raise ValueError("`stop` found in both the input and default params.")
|
354
|
+
elif self.stop is not None:
|
355
|
+
stop = self.stop
|
356
|
+
|
357
|
+
options_dict = kwargs.pop(
|
358
|
+
"options",
|
359
|
+
{
|
351
360
|
"mirostat": self.mirostat,
|
352
361
|
"mirostat_eta": self.mirostat_eta,
|
353
362
|
"mirostat_tau": self.mirostat_tau,
|
@@ -359,14 +368,31 @@ class ChatOllama(BaseChatModel):
|
|
359
368
|
"repeat_penalty": self.repeat_penalty,
|
360
369
|
"temperature": self.temperature,
|
361
370
|
"seed": self.seed,
|
362
|
-
"stop": self.stop,
|
371
|
+
"stop": self.stop if stop is None else stop,
|
363
372
|
"tfs_z": self.tfs_z,
|
364
373
|
"top_k": self.top_k,
|
365
374
|
"top_p": self.top_p,
|
366
375
|
},
|
367
|
-
|
376
|
+
)
|
377
|
+
|
378
|
+
tools = kwargs.get("tools")
|
379
|
+
default_stream = not bool(tools)
|
380
|
+
|
381
|
+
params = {
|
382
|
+
"messages": ollama_messages,
|
383
|
+
"stream": kwargs.pop("stream", default_stream),
|
384
|
+
"model": kwargs.pop("model", self.model),
|
385
|
+
"format": kwargs.pop("format", self.format),
|
386
|
+
"options": Options(**options_dict),
|
387
|
+
"keep_alive": kwargs.pop("keep_alive", self.keep_alive),
|
388
|
+
**kwargs,
|
368
389
|
}
|
369
390
|
|
391
|
+
if tools:
|
392
|
+
params["tools"] = tools
|
393
|
+
|
394
|
+
return params
|
395
|
+
|
370
396
|
@model_validator(mode="after")
|
371
397
|
def _set_clients(self) -> Self:
|
372
398
|
"""Set clients to use for ollama."""
|
@@ -464,37 +490,13 @@ class ChatOllama(BaseChatModel):
|
|
464
490
|
stop: Optional[List[str]] = None,
|
465
491
|
**kwargs: Any,
|
466
492
|
) -> AsyncIterator[Union[Mapping[str, Any], str]]:
|
467
|
-
|
493
|
+
chat_params = self._chat_params(messages, stop, **kwargs)
|
468
494
|
|
469
|
-
|
470
|
-
|
471
|
-
params = self._default_params
|
472
|
-
|
473
|
-
for key in self._default_params:
|
474
|
-
if key in kwargs:
|
475
|
-
params[key] = kwargs[key]
|
476
|
-
|
477
|
-
params["options"]["stop"] = stop
|
478
|
-
if "tools" in kwargs:
|
479
|
-
yield await self._async_client.chat(
|
480
|
-
model=params["model"],
|
481
|
-
messages=ollama_messages,
|
482
|
-
stream=False,
|
483
|
-
options=Options(**params["options"]),
|
484
|
-
keep_alive=params["keep_alive"],
|
485
|
-
format=params["format"],
|
486
|
-
tools=kwargs["tools"],
|
487
|
-
) # type:ignore
|
488
|
-
else:
|
489
|
-
async for part in await self._async_client.chat(
|
490
|
-
model=params["model"],
|
491
|
-
messages=ollama_messages,
|
492
|
-
stream=True,
|
493
|
-
options=Options(**params["options"]),
|
494
|
-
keep_alive=params["keep_alive"],
|
495
|
-
format=params["format"],
|
496
|
-
): # type:ignore
|
495
|
+
if chat_params["stream"]:
|
496
|
+
async for part in await self._async_client.chat(**chat_params):
|
497
497
|
yield part
|
498
|
+
else:
|
499
|
+
yield await self._async_client.chat(**chat_params)
|
498
500
|
|
499
501
|
def _create_chat_stream(
|
500
502
|
self,
|
@@ -502,36 +504,12 @@ class ChatOllama(BaseChatModel):
|
|
502
504
|
stop: Optional[List[str]] = None,
|
503
505
|
**kwargs: Any,
|
504
506
|
) -> Iterator[Union[Mapping[str, Any], str]]:
|
505
|
-
|
507
|
+
chat_params = self._chat_params(messages, stop, **kwargs)
|
506
508
|
|
507
|
-
|
508
|
-
|
509
|
-
params = self._default_params
|
510
|
-
|
511
|
-
for key in self._default_params:
|
512
|
-
if key in kwargs:
|
513
|
-
params[key] = kwargs[key]
|
514
|
-
|
515
|
-
params["options"]["stop"] = stop
|
516
|
-
if "tools" in kwargs:
|
517
|
-
yield self._client.chat(
|
518
|
-
model=params["model"],
|
519
|
-
messages=ollama_messages,
|
520
|
-
stream=False,
|
521
|
-
options=Options(**params["options"]),
|
522
|
-
keep_alive=params["keep_alive"],
|
523
|
-
format=params["format"],
|
524
|
-
tools=kwargs["tools"],
|
525
|
-
)
|
509
|
+
if chat_params["stream"]:
|
510
|
+
yield from self._client.chat(**chat_params)
|
526
511
|
else:
|
527
|
-
yield
|
528
|
-
model=params["model"],
|
529
|
-
messages=ollama_messages,
|
530
|
-
stream=True,
|
531
|
-
options=Options(**params["options"]),
|
532
|
-
keep_alive=params["keep_alive"],
|
533
|
-
format=params["format"],
|
534
|
-
)
|
512
|
+
yield self._client.chat(**chat_params)
|
535
513
|
|
536
514
|
def _chat_stream_with_aggregation(
|
537
515
|
self,
|
@@ -750,6 +728,8 @@ class ChatOllama(BaseChatModel):
|
|
750
728
|
def bind_tools(
|
751
729
|
self,
|
752
730
|
tools: Sequence[Union[Dict[str, Any], Type, Callable, BaseTool]],
|
731
|
+
*,
|
732
|
+
tool_choice: Optional[Union[dict, str, Literal["auto", "any"], bool]] = None,
|
753
733
|
**kwargs: Any,
|
754
734
|
) -> Runnable[LanguageModelInput, BaseMessage]:
|
755
735
|
"""Bind tool-like objects to this chat model.
|
@@ -760,6 +740,8 @@ class ChatOllama(BaseChatModel):
|
|
760
740
|
tools: A list of tool definitions to bind to this chat model.
|
761
741
|
Supports any tool definition handled by
|
762
742
|
:meth:`langchain_core.utils.function_calling.convert_to_openai_tool`.
|
743
|
+
tool_choice: If provided, which tool for model to call. **This parameter
|
744
|
+
is currently ignored as it is not supported by Ollama.**
|
763
745
|
kwargs: Any additional parameters are passed directly to
|
764
746
|
``self.bind(**kwargs)``.
|
765
747
|
""" # noqa: E501
|
@@ -1,3 +1,5 @@
|
|
1
|
+
"""Ollama embeddings models."""
|
2
|
+
|
1
3
|
from typing import (
|
2
4
|
List,
|
3
5
|
Optional,
|
@@ -132,12 +134,12 @@ class OllamaEmbeddings(BaseModel, Embeddings):
|
|
132
134
|
For a full list of the params, see [this link](https://pydoc.dev/httpx/latest/httpx.Client.html)
|
133
135
|
"""
|
134
136
|
|
135
|
-
_client: Client = PrivateAttr(default=None)
|
137
|
+
_client: Client = PrivateAttr(default=None) # type: ignore
|
136
138
|
"""
|
137
139
|
The client to use for making requests.
|
138
140
|
"""
|
139
141
|
|
140
|
-
_async_client: AsyncClient = PrivateAttr(default=None)
|
142
|
+
_async_client: AsyncClient = PrivateAttr(default=None) # type: ignore
|
141
143
|
"""
|
142
144
|
The async client to use for making requests.
|
143
145
|
"""
|
@@ -116,23 +116,30 @@ class OllamaLLM(BaseLLM):
|
|
116
116
|
For a full list of the params, see [this link](https://pydoc.dev/httpx/latest/httpx.Client.html)
|
117
117
|
"""
|
118
118
|
|
119
|
-
_client: Client = PrivateAttr(default=None)
|
119
|
+
_client: Client = PrivateAttr(default=None) # type: ignore
|
120
120
|
"""
|
121
121
|
The client to use for making requests.
|
122
122
|
"""
|
123
123
|
|
124
|
-
_async_client: AsyncClient = PrivateAttr(default=None)
|
124
|
+
_async_client: AsyncClient = PrivateAttr(default=None) # type: ignore
|
125
125
|
"""
|
126
126
|
The async client to use for making requests.
|
127
127
|
"""
|
128
128
|
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
129
|
+
def _generate_params(
|
130
|
+
self,
|
131
|
+
prompt: str,
|
132
|
+
stop: Optional[List[str]] = None,
|
133
|
+
**kwargs: Any,
|
134
|
+
) -> Dict[str, Any]:
|
135
|
+
if self.stop is not None and stop is not None:
|
136
|
+
raise ValueError("`stop` found in both the input and default params.")
|
137
|
+
elif self.stop is not None:
|
138
|
+
stop = self.stop
|
139
|
+
|
140
|
+
options_dict = kwargs.pop(
|
141
|
+
"options",
|
142
|
+
{
|
136
143
|
"mirostat": self.mirostat,
|
137
144
|
"mirostat_eta": self.mirostat_eta,
|
138
145
|
"mirostat_tau": self.mirostat_tau,
|
@@ -143,14 +150,25 @@ class OllamaLLM(BaseLLM):
|
|
143
150
|
"repeat_last_n": self.repeat_last_n,
|
144
151
|
"repeat_penalty": self.repeat_penalty,
|
145
152
|
"temperature": self.temperature,
|
146
|
-
"stop": self.stop,
|
153
|
+
"stop": self.stop if stop is None else stop,
|
147
154
|
"tfs_z": self.tfs_z,
|
148
155
|
"top_k": self.top_k,
|
149
156
|
"top_p": self.top_p,
|
150
157
|
},
|
151
|
-
|
158
|
+
)
|
159
|
+
|
160
|
+
params = {
|
161
|
+
"prompt": prompt,
|
162
|
+
"stream": kwargs.pop("stream", True),
|
163
|
+
"model": kwargs.pop("model", self.model),
|
164
|
+
"format": kwargs.pop("format", self.format),
|
165
|
+
"options": Options(**options_dict),
|
166
|
+
"keep_alive": kwargs.pop("keep_alive", self.keep_alive),
|
167
|
+
**kwargs,
|
152
168
|
}
|
153
169
|
|
170
|
+
return params
|
171
|
+
|
154
172
|
@property
|
155
173
|
def _llm_type(self) -> str:
|
156
174
|
"""Return type of LLM."""
|
@@ -179,27 +197,10 @@ class OllamaLLM(BaseLLM):
|
|
179
197
|
stop: Optional[List[str]] = None,
|
180
198
|
**kwargs: Any,
|
181
199
|
) -> AsyncIterator[Union[Mapping[str, Any], str]]:
|
182
|
-
if self.stop is not None and stop is not None:
|
183
|
-
raise ValueError("`stop` found in both the input and default params.")
|
184
|
-
elif self.stop is not None:
|
185
|
-
stop = self.stop
|
186
|
-
|
187
|
-
params = self._default_params
|
188
|
-
|
189
|
-
for key in self._default_params:
|
190
|
-
if key in kwargs:
|
191
|
-
params[key] = kwargs[key]
|
192
|
-
|
193
|
-
params["options"]["stop"] = stop
|
194
200
|
async for part in await self._async_client.generate(
|
195
|
-
|
196
|
-
prompt=prompt,
|
197
|
-
stream=True,
|
198
|
-
options=Options(**params["options"]),
|
199
|
-
keep_alive=params["keep_alive"],
|
200
|
-
format=params["format"],
|
201
|
+
**self._generate_params(prompt, stop=stop, **kwargs)
|
201
202
|
): # type: ignore
|
202
|
-
yield part
|
203
|
+
yield part # type: ignore
|
203
204
|
|
204
205
|
def _create_generate_stream(
|
205
206
|
self,
|
@@ -207,26 +208,9 @@ class OllamaLLM(BaseLLM):
|
|
207
208
|
stop: Optional[List[str]] = None,
|
208
209
|
**kwargs: Any,
|
209
210
|
) -> Iterator[Union[Mapping[str, Any], str]]:
|
210
|
-
if self.stop is not None and stop is not None:
|
211
|
-
raise ValueError("`stop` found in both the input and default params.")
|
212
|
-
elif self.stop is not None:
|
213
|
-
stop = self.stop
|
214
|
-
|
215
|
-
params = self._default_params
|
216
|
-
|
217
|
-
for key in self._default_params:
|
218
|
-
if key in kwargs:
|
219
|
-
params[key] = kwargs[key]
|
220
|
-
|
221
|
-
params["options"]["stop"] = stop
|
222
211
|
yield from self._client.generate(
|
223
|
-
|
224
|
-
|
225
|
-
stream=True,
|
226
|
-
options=Options(**params["options"]),
|
227
|
-
keep_alive=params["keep_alive"],
|
228
|
-
format=params["format"],
|
229
|
-
)
|
212
|
+
**self._generate_params(prompt, stop=stop, **kwargs)
|
213
|
+
) # type: ignore
|
230
214
|
|
231
215
|
async def _astream_with_aggregation(
|
232
216
|
self,
|
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
|
|
4
4
|
|
5
5
|
[tool.poetry]
|
6
6
|
name = "langchain-ollama"
|
7
|
-
version = "0.2.
|
7
|
+
version = "0.2.2rc1"
|
8
8
|
description = "An integration package connecting Ollama and LangChain"
|
9
9
|
authors = []
|
10
10
|
readme = "README.md"
|
@@ -21,10 +21,23 @@ disallow_untyped_defs = "True"
|
|
21
21
|
[tool.poetry.dependencies]
|
22
22
|
python = ">=3.9,<4.0"
|
23
23
|
ollama = ">=0.3.0,<1"
|
24
|
-
langchain-core = "^0.3.
|
24
|
+
langchain-core = "^0.3.20"
|
25
25
|
|
26
26
|
[tool.ruff.lint]
|
27
|
-
select = [
|
27
|
+
select = [
|
28
|
+
"E", # pycodestyle
|
29
|
+
"F", # pyflakes
|
30
|
+
"I", # isort
|
31
|
+
"T201", # print
|
32
|
+
"D", # pydocstyle
|
33
|
+
|
34
|
+
]
|
35
|
+
|
36
|
+
[tool.ruff.lint.pydocstyle]
|
37
|
+
convention = "google"
|
38
|
+
|
39
|
+
[tool.ruff.lint.per-file-ignores]
|
40
|
+
"tests/**" = ["D"] # ignore docstring checks for tests
|
28
41
|
|
29
42
|
[tool.coverage.run]
|
30
43
|
omit = ["tests/*"]
|
@@ -32,7 +45,7 @@ omit = ["tests/*"]
|
|
32
45
|
[tool.pytest.ini_options]
|
33
46
|
addopts = "--snapshot-warn-unused --strict-markers --strict-config --durations=5"
|
34
47
|
markers = [
|
35
|
-
|
48
|
+
"compile: mark placeholder test used to compile integration tests without running them",
|
36
49
|
]
|
37
50
|
asyncio_mode = "auto"
|
38
51
|
|
@@ -73,7 +86,7 @@ mypy = "^1.7.1"
|
|
73
86
|
path = "../../core"
|
74
87
|
develop = true
|
75
88
|
|
76
|
-
[tool.poetry.group.test.dependencies.langchain-
|
89
|
+
[tool.poetry.group.test.dependencies.langchain-tests]
|
77
90
|
path = "../../standard-tests"
|
78
91
|
develop = true
|
79
92
|
|
File without changes
|
File without changes
|
File without changes
|