langchain-ollama 0.1.0__tar.gz → 0.1.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {langchain_ollama-0.1.0 → langchain_ollama-0.1.1}/PKG-INFO +2 -1
- {langchain_ollama-0.1.0 → langchain_ollama-0.1.1}/langchain_ollama/chat_models.py +36 -15
- {langchain_ollama-0.1.0 → langchain_ollama-0.1.1}/langchain_ollama/embeddings.py +3 -9
- {langchain_ollama-0.1.0 → langchain_ollama-0.1.1}/langchain_ollama/llms.py +2 -10
- {langchain_ollama-0.1.0 → langchain_ollama-0.1.1}/pyproject.toml +2 -1
- {langchain_ollama-0.1.0 → langchain_ollama-0.1.1}/LICENSE +0 -0
- {langchain_ollama-0.1.0 → langchain_ollama-0.1.1}/README.md +0 -0
- {langchain_ollama-0.1.0 → langchain_ollama-0.1.1}/langchain_ollama/__init__.py +0 -0
- {langchain_ollama-0.1.0 → langchain_ollama-0.1.1}/langchain_ollama/py.typed +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: langchain-ollama
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.1
|
4
4
|
Summary: An integration package connecting Ollama and LangChain
|
5
5
|
Home-page: https://github.com/langchain-ai/langchain
|
6
6
|
License: MIT
|
@@ -14,6 +14,7 @@ Classifier: Programming Language :: Python :: 3.12
|
|
14
14
|
Requires-Dist: langchain-core (>=0.2.20,<0.3.0)
|
15
15
|
Requires-Dist: ollama (>=0.3.0,<1)
|
16
16
|
Project-URL: Repository, https://github.com/langchain-ai/langchain
|
17
|
+
Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-ollama%3D%3D0%22&expanded=true
|
17
18
|
Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/partners/ollama
|
18
19
|
Description-Content-Type: text/markdown
|
19
20
|
|
@@ -17,7 +17,6 @@ from typing import (
|
|
17
17
|
)
|
18
18
|
from uuid import uuid4
|
19
19
|
|
20
|
-
import ollama
|
21
20
|
from langchain_core.callbacks import (
|
22
21
|
CallbackManagerForLLMRun,
|
23
22
|
)
|
@@ -36,11 +35,10 @@ from langchain_core.messages import (
|
|
36
35
|
from langchain_core.messages.ai import UsageMetadata
|
37
36
|
from langchain_core.messages.tool import tool_call
|
38
37
|
from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
|
39
|
-
from langchain_core.pydantic_v1 import BaseModel
|
40
38
|
from langchain_core.runnables import Runnable
|
41
39
|
from langchain_core.tools import BaseTool
|
42
40
|
from langchain_core.utils.function_calling import convert_to_openai_tool
|
43
|
-
from ollama import AsyncClient, Message, Options
|
41
|
+
from ollama import AsyncClient, Client, Message, Options
|
44
42
|
|
45
43
|
|
46
44
|
def _get_usage_metadata_from_generation_info(
|
@@ -292,6 +290,11 @@ class ChatOllama(BaseChatModel):
|
|
292
290
|
"""The temperature of the model. Increasing the temperature will
|
293
291
|
make the model answer more creatively. (Default: 0.8)"""
|
294
292
|
|
293
|
+
seed: Optional[int] = None
|
294
|
+
"""Sets the random number seed to use for generation. Setting this
|
295
|
+
to a specific number will make the model generate the same text for
|
296
|
+
the same prompt."""
|
297
|
+
|
295
298
|
stop: Optional[List[str]] = None
|
296
299
|
"""Sets the stop tokens to use."""
|
297
300
|
|
@@ -316,6 +319,9 @@ class ChatOllama(BaseChatModel):
|
|
316
319
|
keep_alive: Optional[Union[int, str]] = None
|
317
320
|
"""How long the model will stay loaded into memory."""
|
318
321
|
|
322
|
+
base_url: Optional[str] = None
|
323
|
+
"""Base url the model is hosted under."""
|
324
|
+
|
319
325
|
@property
|
320
326
|
def _default_params(self) -> Dict[str, Any]:
|
321
327
|
"""Get the default parameters for calling Ollama."""
|
@@ -333,6 +339,7 @@ class ChatOllama(BaseChatModel):
|
|
333
339
|
"repeat_last_n": self.repeat_last_n,
|
334
340
|
"repeat_penalty": self.repeat_penalty,
|
335
341
|
"temperature": self.temperature,
|
342
|
+
"seed": self.seed,
|
336
343
|
"stop": self.stop,
|
337
344
|
"tfs_z": self.tfs_z,
|
338
345
|
"top_k": self.top_k,
|
@@ -346,7 +353,7 @@ class ChatOllama(BaseChatModel):
|
|
346
353
|
) -> Sequence[Message]:
|
347
354
|
ollama_messages: List = []
|
348
355
|
for message in messages:
|
349
|
-
role
|
356
|
+
role: Literal["user", "assistant", "system", "tool"]
|
350
357
|
tool_call_id: Optional[str] = None
|
351
358
|
tool_calls: Optional[List[Dict[str, Any]]] = None
|
352
359
|
if isinstance(message, HumanMessage):
|
@@ -383,11 +390,13 @@ class ChatOllama(BaseChatModel):
|
|
383
390
|
image_url = None
|
384
391
|
temp_image_url = content_part.get("image_url")
|
385
392
|
if isinstance(temp_image_url, str):
|
386
|
-
image_url =
|
393
|
+
image_url = temp_image_url
|
387
394
|
elif (
|
388
|
-
isinstance(temp_image_url, dict)
|
395
|
+
isinstance(temp_image_url, dict)
|
396
|
+
and "url" in temp_image_url
|
397
|
+
and isinstance(temp_image_url["url"], str)
|
389
398
|
):
|
390
|
-
image_url = temp_image_url
|
399
|
+
image_url = temp_image_url["url"]
|
391
400
|
else:
|
392
401
|
raise ValueError(
|
393
402
|
"Only string image_url or dict with string 'url' "
|
@@ -408,15 +417,16 @@ class ChatOllama(BaseChatModel):
|
|
408
417
|
"Must either have type 'text' or type 'image_url' "
|
409
418
|
"with a string 'image_url' field."
|
410
419
|
)
|
411
|
-
|
420
|
+
# Should convert to ollama.Message once role includes tool, and tool_call_id is in Message # noqa: E501
|
421
|
+
msg: dict = {
|
412
422
|
"role": role,
|
413
423
|
"content": content,
|
414
424
|
"images": images,
|
415
425
|
}
|
426
|
+
if tool_calls:
|
427
|
+
msg["tool_calls"] = tool_calls # type: ignore
|
416
428
|
if tool_call_id:
|
417
429
|
msg["tool_call_id"] = tool_call_id
|
418
|
-
if tool_calls:
|
419
|
-
msg["tool_calls"] = tool_calls
|
420
430
|
ollama_messages.append(msg)
|
421
431
|
|
422
432
|
return ollama_messages
|
@@ -439,7 +449,7 @@ class ChatOllama(BaseChatModel):
|
|
439
449
|
|
440
450
|
params["options"]["stop"] = stop
|
441
451
|
if "tools" in kwargs:
|
442
|
-
yield await AsyncClient().chat(
|
452
|
+
yield await AsyncClient(host=self.base_url).chat(
|
443
453
|
model=params["model"],
|
444
454
|
messages=ollama_messages,
|
445
455
|
stream=False,
|
@@ -449,7 +459,7 @@ class ChatOllama(BaseChatModel):
|
|
449
459
|
tools=kwargs["tools"],
|
450
460
|
) # type:ignore
|
451
461
|
else:
|
452
|
-
async for part in await AsyncClient().chat(
|
462
|
+
async for part in await AsyncClient(host=self.base_url).chat(
|
453
463
|
model=params["model"],
|
454
464
|
messages=ollama_messages,
|
455
465
|
stream=True,
|
@@ -477,7 +487,7 @@ class ChatOllama(BaseChatModel):
|
|
477
487
|
|
478
488
|
params["options"]["stop"] = stop
|
479
489
|
if "tools" in kwargs:
|
480
|
-
yield
|
490
|
+
yield Client(host=self.base_url).chat(
|
481
491
|
model=params["model"],
|
482
492
|
messages=ollama_messages,
|
483
493
|
stream=False,
|
@@ -487,7 +497,7 @@ class ChatOllama(BaseChatModel):
|
|
487
497
|
tools=kwargs["tools"],
|
488
498
|
)
|
489
499
|
else:
|
490
|
-
yield from
|
500
|
+
yield from Client(host=self.base_url).chat(
|
491
501
|
model=params["model"],
|
492
502
|
messages=ollama_messages,
|
493
503
|
stream=True,
|
@@ -712,8 +722,19 @@ class ChatOllama(BaseChatModel):
|
|
712
722
|
|
713
723
|
def bind_tools(
|
714
724
|
self,
|
715
|
-
tools: Sequence[Union[Dict[str, Any], Type
|
725
|
+
tools: Sequence[Union[Dict[str, Any], Type, Callable, BaseTool]],
|
716
726
|
**kwargs: Any,
|
717
727
|
) -> Runnable[LanguageModelInput, BaseMessage]:
|
728
|
+
"""Bind tool-like objects to this chat model.
|
729
|
+
|
730
|
+
Assumes model is compatible with OpenAI tool-calling API.
|
731
|
+
|
732
|
+
Args:
|
733
|
+
tools: A list of tool definitions to bind to this chat model.
|
734
|
+
Supports any tool definition handled by
|
735
|
+
:meth:`langchain_core.utils.function_calling.convert_to_openai_tool`.
|
736
|
+
kwargs: Any additional parameters are passed directly to
|
737
|
+
``self.bind(**kwargs)``.
|
738
|
+
""" # noqa: E501
|
718
739
|
formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
|
719
740
|
return super().bind(tools=formatted_tools, **kwargs)
|
@@ -14,7 +14,7 @@ class OllamaEmbeddings(BaseModel, Embeddings):
|
|
14
14
|
|
15
15
|
from langchain_ollama import OllamaEmbeddings
|
16
16
|
|
17
|
-
|
17
|
+
embedder = OllamaEmbeddings(model="llama3")
|
18
18
|
embedder.embed_query("what is the place that jonathan worked at?")
|
19
19
|
"""
|
20
20
|
|
@@ -28,9 +28,7 @@ class OllamaEmbeddings(BaseModel, Embeddings):
|
|
28
28
|
|
29
29
|
def embed_documents(self, texts: List[str]) -> List[List[float]]:
|
30
30
|
"""Embed search docs."""
|
31
|
-
embedded_docs = []
|
32
|
-
for doc in texts:
|
33
|
-
embedded_docs.append(list(ollama.embeddings(self.model, doc)["embedding"]))
|
31
|
+
embedded_docs = ollama.embed(self.model, texts)["embeddings"]
|
34
32
|
return embedded_docs
|
35
33
|
|
36
34
|
def embed_query(self, text: str) -> List[float]:
|
@@ -39,11 +37,7 @@ class OllamaEmbeddings(BaseModel, Embeddings):
|
|
39
37
|
|
40
38
|
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
|
41
39
|
"""Embed search docs."""
|
42
|
-
embedded_docs = []
|
43
|
-
for doc in texts:
|
44
|
-
embedded_docs.append(
|
45
|
-
list((await AsyncClient().embeddings(self.model, doc))["embedding"])
|
46
|
-
)
|
40
|
+
embedded_docs = (await AsyncClient().embed(self.model, texts))["embeddings"]
|
47
41
|
return embedded_docs
|
48
42
|
|
49
43
|
async def aembed_query(self, text: str) -> List[float]:
|
@@ -304,11 +304,7 @@ class OllamaLLM(BaseLLM):
|
|
304
304
|
for stream_resp in self._create_generate_stream(prompt, stop, **kwargs):
|
305
305
|
if not isinstance(stream_resp, str):
|
306
306
|
chunk = GenerationChunk(
|
307
|
-
text=(
|
308
|
-
stream_resp["message"]["content"]
|
309
|
-
if "message" in stream_resp
|
310
|
-
else ""
|
311
|
-
),
|
307
|
+
text=(stream_resp.get("response", "")),
|
312
308
|
generation_info=(
|
313
309
|
dict(stream_resp) if stream_resp.get("done") is True else None
|
314
310
|
),
|
@@ -330,11 +326,7 @@ class OllamaLLM(BaseLLM):
|
|
330
326
|
async for stream_resp in self._acreate_generate_stream(prompt, stop, **kwargs):
|
331
327
|
if not isinstance(stream_resp, str):
|
332
328
|
chunk = GenerationChunk(
|
333
|
-
text=(
|
334
|
-
stream_resp["message"]["content"]
|
335
|
-
if "message" in stream_resp
|
336
|
-
else ""
|
337
|
-
),
|
329
|
+
text=(stream_resp.get("response", "")),
|
338
330
|
generation_info=(
|
339
331
|
dict(stream_resp) if stream_resp.get("done") is True else None
|
340
332
|
),
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[tool.poetry]
|
2
2
|
name = "langchain-ollama"
|
3
|
-
version = "0.1.
|
3
|
+
version = "0.1.1"
|
4
4
|
description = "An integration package connecting Ollama and LangChain"
|
5
5
|
authors = []
|
6
6
|
readme = "README.md"
|
@@ -9,6 +9,7 @@ license = "MIT"
|
|
9
9
|
|
10
10
|
[tool.poetry.urls]
|
11
11
|
"Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/partners/ollama"
|
12
|
+
"Release Notes" = "https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-ollama%3D%3D0%22&expanded=true"
|
12
13
|
|
13
14
|
[tool.poetry.dependencies]
|
14
15
|
python = ">=3.8.1,<4.0"
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|