langchain-ollama 0.1.0rc0__tar.gz → 0.1.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {langchain_ollama-0.1.0rc0 → langchain_ollama-0.1.1}/PKG-INFO +5 -4
- {langchain_ollama-0.1.0rc0 → langchain_ollama-0.1.1}/README.md +2 -2
- {langchain_ollama-0.1.0rc0 → langchain_ollama-0.1.1}/langchain_ollama/chat_models.py +84 -37
- {langchain_ollama-0.1.0rc0 → langchain_ollama-0.1.1}/langchain_ollama/embeddings.py +4 -10
- {langchain_ollama-0.1.0rc0 → langchain_ollama-0.1.1}/langchain_ollama/llms.py +15 -19
- {langchain_ollama-0.1.0rc0 → langchain_ollama-0.1.1}/pyproject.toml +3 -2
- {langchain_ollama-0.1.0rc0 → langchain_ollama-0.1.1}/LICENSE +0 -0
- {langchain_ollama-0.1.0rc0 → langchain_ollama-0.1.1}/langchain_ollama/__init__.py +0 -0
- {langchain_ollama-0.1.0rc0 → langchain_ollama-0.1.1}/langchain_ollama/py.typed +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: langchain-ollama
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.1
|
4
4
|
Summary: An integration package connecting Ollama and LangChain
|
5
5
|
Home-page: https://github.com/langchain-ai/langchain
|
6
6
|
License: MIT
|
@@ -12,8 +12,9 @@ Classifier: Programming Language :: Python :: 3.10
|
|
12
12
|
Classifier: Programming Language :: Python :: 3.11
|
13
13
|
Classifier: Programming Language :: Python :: 3.12
|
14
14
|
Requires-Dist: langchain-core (>=0.2.20,<0.3.0)
|
15
|
-
Requires-Dist: ollama (>=0.
|
15
|
+
Requires-Dist: ollama (>=0.3.0,<1)
|
16
16
|
Project-URL: Repository, https://github.com/langchain-ai/langchain
|
17
|
+
Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-ollama%3D%3D0%22&expanded=true
|
17
18
|
Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/partners/ollama
|
18
19
|
Description-Content-Type: text/markdown
|
19
20
|
|
@@ -37,7 +38,7 @@ You can download it [here](https://ollama.com/download).
|
|
37
38
|
```python
|
38
39
|
from langchain_ollama import ChatOllama
|
39
40
|
|
40
|
-
llm = ChatOllama(model="llama3")
|
41
|
+
llm = ChatOllama(model="llama3-groq-tool-use")
|
41
42
|
llm.invoke("Sing a ballad of LangChain.")
|
42
43
|
```
|
43
44
|
|
@@ -58,7 +59,7 @@ embeddings.embed_query("What is the meaning of life?")
|
|
58
59
|
```python
|
59
60
|
from langchain_ollama import OllamaLLM
|
60
61
|
|
61
|
-
llm = OllamaLLM()
|
62
|
+
llm = OllamaLLM(model="llama3")
|
62
63
|
llm.invoke("The meaning of life is")
|
63
64
|
```
|
64
65
|
|
@@ -18,7 +18,7 @@ You can download it [here](https://ollama.com/download).
|
|
18
18
|
```python
|
19
19
|
from langchain_ollama import ChatOllama
|
20
20
|
|
21
|
-
llm = ChatOllama(model="llama3")
|
21
|
+
llm = ChatOllama(model="llama3-groq-tool-use")
|
22
22
|
llm.invoke("Sing a ballad of LangChain.")
|
23
23
|
```
|
24
24
|
|
@@ -39,6 +39,6 @@ embeddings.embed_query("What is the meaning of life?")
|
|
39
39
|
```python
|
40
40
|
from langchain_ollama import OllamaLLM
|
41
41
|
|
42
|
-
llm = OllamaLLM()
|
42
|
+
llm = OllamaLLM(model="llama3")
|
43
43
|
llm.invoke("The meaning of life is")
|
44
44
|
```
|
@@ -17,7 +17,6 @@ from typing import (
|
|
17
17
|
)
|
18
18
|
from uuid import uuid4
|
19
19
|
|
20
|
-
import ollama
|
21
20
|
from langchain_core.callbacks import (
|
22
21
|
CallbackManagerForLLMRun,
|
23
22
|
)
|
@@ -36,11 +35,10 @@ from langchain_core.messages import (
|
|
36
35
|
from langchain_core.messages.ai import UsageMetadata
|
37
36
|
from langchain_core.messages.tool import tool_call
|
38
37
|
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
|
39
|
-
from langchain_core.pydantic_v1 import BaseModel
|
40
38
|
from langchain_core.runnables import Runnable
|
41
39
|
from langchain_core.tools import BaseTool
|
42
40
|
from langchain_core.utils.function_calling import convert_to_openai_tool
|
43
|
-
from ollama import AsyncClient, Message, Options
|
41
|
+
from ollama import AsyncClient, Client, Message, Options
|
44
42
|
|
45
43
|
|
46
44
|
def _get_usage_metadata_from_generation_info(
|
@@ -218,9 +216,32 @@ class ChatOllama(BaseChatModel):
|
|
218
216
|
.. code-block:: python
|
219
217
|
|
220
218
|
'{"location": "Pune, India", "time_of_day": "morning"}'
|
219
|
+
|
220
|
+
Tool Calling:
|
221
|
+
.. warning::
|
222
|
+
Ollama currently does not support streaming for tools
|
223
|
+
|
224
|
+
.. code-block:: python
|
225
|
+
|
226
|
+
from langchain_ollama import ChatOllama
|
227
|
+
from langchain_core.pydantic_v1 import BaseModel, Field
|
228
|
+
|
229
|
+
class Multiply(BaseModel):
|
230
|
+
a: int = Field(..., description="First integer")
|
231
|
+
b: int = Field(..., description="Second integer")
|
232
|
+
|
233
|
+
ans = await chat.invoke("What is 45*67")
|
234
|
+
ans.tool_calls
|
235
|
+
|
236
|
+
.. code-block:: python
|
237
|
+
|
238
|
+
[{'name': 'Multiply',
|
239
|
+
'args': {'a': 45, 'b': 67},
|
240
|
+
'id': '420c3f3b-df10-4188-945f-eb3abdb40622',
|
241
|
+
'type': 'tool_call'}]
|
221
242
|
""" # noqa: E501
|
222
243
|
|
223
|
-
model: str
|
244
|
+
model: str
|
224
245
|
"""Model name to use."""
|
225
246
|
|
226
247
|
mirostat: Optional[int] = None
|
@@ -269,6 +290,11 @@ class ChatOllama(BaseChatModel):
|
|
269
290
|
"""The temperature of the model. Increasing the temperature will
|
270
291
|
make the model answer more creatively. (Default: 0.8)"""
|
271
292
|
|
293
|
+
seed: Optional[int] = None
|
294
|
+
"""Sets the random number seed to use for generation. Setting this
|
295
|
+
to a specific number will make the model generate the same text for
|
296
|
+
the same prompt."""
|
297
|
+
|
272
298
|
stop: Optional[List[str]] = None
|
273
299
|
"""Sets the stop tokens to use."""
|
274
300
|
|
@@ -293,6 +319,9 @@ class ChatOllama(BaseChatModel):
|
|
293
319
|
keep_alive: Optional[Union[int, str]] = None
|
294
320
|
"""How long the model will stay loaded into memory."""
|
295
321
|
|
322
|
+
base_url: Optional[str] = None
|
323
|
+
"""Base url the model is hosted under."""
|
324
|
+
|
296
325
|
@property
|
297
326
|
def _default_params(self) -> Dict[str, Any]:
|
298
327
|
"""Get the default parameters for calling Ollama."""
|
@@ -310,6 +339,7 @@ class ChatOllama(BaseChatModel):
|
|
310
339
|
"repeat_last_n": self.repeat_last_n,
|
311
340
|
"repeat_penalty": self.repeat_penalty,
|
312
341
|
"temperature": self.temperature,
|
342
|
+
"seed": self.seed,
|
313
343
|
"stop": self.stop,
|
314
344
|
"tfs_z": self.tfs_z,
|
315
345
|
"top_k": self.top_k,
|
@@ -323,7 +353,7 @@ class ChatOllama(BaseChatModel):
|
|
323
353
|
) -> Sequence[Message]:
|
324
354
|
ollama_messages: List = []
|
325
355
|
for message in messages:
|
326
|
-
role
|
356
|
+
role: Literal["user", "assistant", "system", "tool"]
|
327
357
|
tool_call_id: Optional[str] = None
|
328
358
|
tool_calls: Optional[List[Dict[str, Any]]] = None
|
329
359
|
if isinstance(message, HumanMessage):
|
@@ -360,11 +390,13 @@ class ChatOllama(BaseChatModel):
|
|
360
390
|
image_url = None
|
361
391
|
temp_image_url = content_part.get("image_url")
|
362
392
|
if isinstance(temp_image_url, str):
|
363
|
-
image_url =
|
393
|
+
image_url = temp_image_url
|
364
394
|
elif (
|
365
|
-
isinstance(temp_image_url, dict)
|
395
|
+
isinstance(temp_image_url, dict)
|
396
|
+
and "url" in temp_image_url
|
397
|
+
and isinstance(temp_image_url["url"], str)
|
366
398
|
):
|
367
|
-
image_url = temp_image_url
|
399
|
+
image_url = temp_image_url["url"]
|
368
400
|
else:
|
369
401
|
raise ValueError(
|
370
402
|
"Only string image_url or dict with string 'url' "
|
@@ -385,15 +417,16 @@ class ChatOllama(BaseChatModel):
|
|
385
417
|
"Must either have type 'text' or type 'image_url' "
|
386
418
|
"with a string 'image_url' field."
|
387
419
|
)
|
388
|
-
|
420
|
+
# Should convert to ollama.Message once role includes tool, and tool_call_id is in Message # noqa: E501
|
421
|
+
msg: dict = {
|
389
422
|
"role": role,
|
390
423
|
"content": content,
|
391
424
|
"images": images,
|
392
425
|
}
|
426
|
+
if tool_calls:
|
427
|
+
msg["tool_calls"] = tool_calls # type: ignore
|
393
428
|
if tool_call_id:
|
394
429
|
msg["tool_call_id"] = tool_call_id
|
395
|
-
if tool_calls:
|
396
|
-
msg["tool_calls"] = tool_calls
|
397
430
|
ollama_messages.append(msg)
|
398
431
|
|
399
432
|
return ollama_messages
|
@@ -415,15 +448,26 @@ class ChatOllama(BaseChatModel):
|
|
415
448
|
params[key] = kwargs[key]
|
416
449
|
|
417
450
|
params["options"]["stop"] = stop
|
418
|
-
|
419
|
-
|
420
|
-
|
421
|
-
|
422
|
-
|
423
|
-
|
424
|
-
|
425
|
-
|
426
|
-
|
451
|
+
if "tools" in kwargs:
|
452
|
+
yield await AsyncClient(host=self.base_url).chat(
|
453
|
+
model=params["model"],
|
454
|
+
messages=ollama_messages,
|
455
|
+
stream=False,
|
456
|
+
options=Options(**params["options"]),
|
457
|
+
keep_alive=params["keep_alive"],
|
458
|
+
format=params["format"],
|
459
|
+
tools=kwargs["tools"],
|
460
|
+
) # type:ignore
|
461
|
+
else:
|
462
|
+
async for part in await AsyncClient(host=self.base_url).chat(
|
463
|
+
model=params["model"],
|
464
|
+
messages=ollama_messages,
|
465
|
+
stream=True,
|
466
|
+
options=Options(**params["options"]),
|
467
|
+
keep_alive=params["keep_alive"],
|
468
|
+
format=params["format"],
|
469
|
+
): # type:ignore
|
470
|
+
yield part
|
427
471
|
|
428
472
|
def _create_chat_stream(
|
429
473
|
self,
|
@@ -443,25 +487,17 @@ class ChatOllama(BaseChatModel):
|
|
443
487
|
|
444
488
|
params["options"]["stop"] = stop
|
445
489
|
if "tools" in kwargs:
|
446
|
-
|
447
|
-
|
448
|
-
|
449
|
-
"messages": ollama_messages,
|
450
|
-
"stream": False,
|
451
|
-
"format": params["format"],
|
452
|
-
"options": Options(**params["options"]),
|
453
|
-
"keep_alive": params["keep_alive"],
|
454
|
-
"tools": kwargs["tools"],
|
455
|
-
}
|
456
|
-
it = ollama._client._request_stream(
|
457
|
-
"POST",
|
458
|
-
"/api/chat",
|
459
|
-
json=req,
|
490
|
+
yield Client(host=self.base_url).chat(
|
491
|
+
model=params["model"],
|
492
|
+
messages=ollama_messages,
|
460
493
|
stream=False,
|
494
|
+
options=Options(**params["options"]),
|
495
|
+
keep_alive=params["keep_alive"],
|
496
|
+
format=params["format"],
|
497
|
+
tools=kwargs["tools"],
|
461
498
|
)
|
462
|
-
yield cast(Mapping[str, Any], it)
|
463
499
|
else:
|
464
|
-
yield from
|
500
|
+
yield from Client(host=self.base_url).chat(
|
465
501
|
model=params["model"],
|
466
502
|
messages=ollama_messages,
|
467
503
|
stream=True,
|
@@ -686,8 +722,19 @@ class ChatOllama(BaseChatModel):
|
|
686
722
|
|
687
723
|
def bind_tools(
|
688
724
|
self,
|
689
|
-
tools: Sequence[Union[Dict[str, Any], Type
|
725
|
+
tools: Sequence[Union[Dict[str, Any], Type, Callable, BaseTool]],
|
690
726
|
**kwargs: Any,
|
691
727
|
) -> Runnable[LanguageModelInput, BaseMessage]:
|
728
|
+
"""Bind tool-like objects to this chat model.
|
729
|
+
|
730
|
+
Assumes model is compatible with OpenAI tool-calling API.
|
731
|
+
|
732
|
+
Args:
|
733
|
+
tools: A list of tool definitions to bind to this chat model.
|
734
|
+
Supports any tool definition handled by
|
735
|
+
:meth:`langchain_core.utils.function_calling.convert_to_openai_tool`.
|
736
|
+
kwargs: Any additional parameters are passed directly to
|
737
|
+
``self.bind(**kwargs)``.
|
738
|
+
""" # noqa: E501
|
692
739
|
formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
|
693
740
|
return super().bind(tools=formatted_tools, **kwargs)
|
@@ -14,11 +14,11 @@ class OllamaEmbeddings(BaseModel, Embeddings):
|
|
14
14
|
|
15
15
|
from langchain_ollama import OllamaEmbeddings
|
16
16
|
|
17
|
-
|
17
|
+
embedder = OllamaEmbeddings(model="llama3")
|
18
18
|
embedder.embed_query("what is the place that jonathan worked at?")
|
19
19
|
"""
|
20
20
|
|
21
|
-
model: str
|
21
|
+
model: str
|
22
22
|
"""Model name to use."""
|
23
23
|
|
24
24
|
class Config:
|
@@ -28,9 +28,7 @@ class OllamaEmbeddings(BaseModel, Embeddings):
|
|
28
28
|
|
29
29
|
def embed_documents(self, texts: List[str]) -> List[List[float]]:
|
30
30
|
"""Embed search docs."""
|
31
|
-
embedded_docs = []
|
32
|
-
for doc in texts:
|
33
|
-
embedded_docs.append(list(ollama.embeddings(self.model, doc)["embedding"]))
|
31
|
+
embedded_docs = ollama.embed(self.model, texts)["embeddings"]
|
34
32
|
return embedded_docs
|
35
33
|
|
36
34
|
def embed_query(self, text: str) -> List[float]:
|
@@ -39,11 +37,7 @@ class OllamaEmbeddings(BaseModel, Embeddings):
|
|
39
37
|
|
40
38
|
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
|
41
39
|
"""Embed search docs."""
|
42
|
-
embedded_docs = []
|
43
|
-
for doc in texts:
|
44
|
-
embedded_docs.append(
|
45
|
-
list((await AsyncClient().embeddings(self.model, doc))["embedding"])
|
46
|
-
)
|
40
|
+
embedded_docs = (await AsyncClient().embed(self.model, texts))["embeddings"]
|
47
41
|
return embedded_docs
|
48
42
|
|
49
43
|
async def aembed_query(self, text: str) -> List[float]:
|
@@ -34,7 +34,7 @@ class OllamaLLM(BaseLLM):
|
|
34
34
|
model.invoke("Come up with 10 names for a song about parrots")
|
35
35
|
"""
|
36
36
|
|
37
|
-
model: str
|
37
|
+
model: str
|
38
38
|
"""Model name to use."""
|
39
39
|
|
40
40
|
mirostat: Optional[int] = None
|
@@ -205,9 +205,9 @@ class OllamaLLM(BaseLLM):
|
|
205
205
|
if not isinstance(stream_resp, str):
|
206
206
|
chunk = GenerationChunk(
|
207
207
|
text=stream_resp["response"] if "response" in stream_resp else "",
|
208
|
-
generation_info=
|
209
|
-
|
210
|
-
|
208
|
+
generation_info=(
|
209
|
+
dict(stream_resp) if stream_resp.get("done") is True else None
|
210
|
+
),
|
211
211
|
)
|
212
212
|
if final_chunk is None:
|
213
213
|
final_chunk = chunk
|
@@ -237,9 +237,9 @@ class OllamaLLM(BaseLLM):
|
|
237
237
|
if not isinstance(stream_resp, str):
|
238
238
|
chunk = GenerationChunk(
|
239
239
|
text=stream_resp["response"] if "response" in stream_resp else "",
|
240
|
-
generation_info=
|
241
|
-
|
242
|
-
|
240
|
+
generation_info=(
|
241
|
+
dict(stream_resp) if stream_resp.get("done") is True else None
|
242
|
+
),
|
243
243
|
)
|
244
244
|
if final_chunk is None:
|
245
245
|
final_chunk = chunk
|
@@ -304,12 +304,10 @@ class OllamaLLM(BaseLLM):
|
|
304
304
|
for stream_resp in self._create_generate_stream(prompt, stop, **kwargs):
|
305
305
|
if not isinstance(stream_resp, str):
|
306
306
|
chunk = GenerationChunk(
|
307
|
-
text=stream_resp
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
if stream_resp.get("done") is True
|
312
|
-
else None,
|
307
|
+
text=(stream_resp.get("response", "")),
|
308
|
+
generation_info=(
|
309
|
+
dict(stream_resp) if stream_resp.get("done") is True else None
|
310
|
+
),
|
313
311
|
)
|
314
312
|
if run_manager:
|
315
313
|
run_manager.on_llm_new_token(
|
@@ -328,12 +326,10 @@ class OllamaLLM(BaseLLM):
|
|
328
326
|
async for stream_resp in self._acreate_generate_stream(prompt, stop, **kwargs):
|
329
327
|
if not isinstance(stream_resp, str):
|
330
328
|
chunk = GenerationChunk(
|
331
|
-
text=stream_resp
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
if stream_resp.get("done") is True
|
336
|
-
else None,
|
329
|
+
text=(stream_resp.get("response", "")),
|
330
|
+
generation_info=(
|
331
|
+
dict(stream_resp) if stream_resp.get("done") is True else None
|
332
|
+
),
|
337
333
|
)
|
338
334
|
if run_manager:
|
339
335
|
await run_manager.on_llm_new_token(
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[tool.poetry]
|
2
2
|
name = "langchain-ollama"
|
3
|
-
version = "0.1.
|
3
|
+
version = "0.1.1"
|
4
4
|
description = "An integration package connecting Ollama and LangChain"
|
5
5
|
authors = []
|
6
6
|
readme = "README.md"
|
@@ -9,10 +9,11 @@ license = "MIT"
|
|
9
9
|
|
10
10
|
[tool.poetry.urls]
|
11
11
|
"Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/partners/ollama"
|
12
|
+
"Release Notes" = "https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-ollama%3D%3D0%22&expanded=true"
|
12
13
|
|
13
14
|
[tool.poetry.dependencies]
|
14
15
|
python = ">=3.8.1,<4.0"
|
15
|
-
ollama = ">=0.
|
16
|
+
ollama = ">=0.3.0,<1"
|
16
17
|
langchain-core = "^0.2.20"
|
17
18
|
|
18
19
|
[tool.poetry.group.test]
|
File without changes
|
File without changes
|
File without changes
|