langchain-ollama 0.2.0.dev1__py3-none-any.whl → 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain_ollama/__init__.py +6 -0
- langchain_ollama/chat_models.py +126 -74
- langchain_ollama/embeddings.py +4 -2
- langchain_ollama/llms.py +33 -49
- {langchain_ollama-0.2.0.dev1.dist-info → langchain_ollama-0.2.1.dist-info}/METADATA +3 -2
- langchain_ollama-0.2.1.dist-info/RECORD +9 -0
- {langchain_ollama-0.2.0.dev1.dist-info → langchain_ollama-0.2.1.dist-info}/WHEEL +1 -1
- langchain_ollama-0.2.0.dev1.dist-info/RECORD +0 -9
- {langchain_ollama-0.2.0.dev1.dist-info → langchain_ollama-0.2.1.dist-info}/LICENSE +0 -0
langchain_ollama/__init__.py
CHANGED
langchain_ollama/chat_models.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1
1
|
"""Ollama chat models."""
|
2
2
|
|
3
|
+
import json
|
3
4
|
from typing import (
|
4
5
|
Any,
|
5
6
|
AsyncIterator,
|
@@ -21,6 +22,7 @@ from langchain_core.callbacks import (
|
|
21
22
|
CallbackManagerForLLMRun,
|
22
23
|
)
|
23
24
|
from langchain_core.callbacks.manager import AsyncCallbackManagerForLLMRun
|
25
|
+
from langchain_core.exceptions import OutputParserException
|
24
26
|
from langchain_core.language_models import LanguageModelInput
|
25
27
|
from langchain_core.language_models.chat_models import BaseChatModel, LangSmithParams
|
26
28
|
from langchain_core.messages import (
|
@@ -60,19 +62,85 @@ def _get_usage_metadata_from_generation_info(
|
|
60
62
|
return None
|
61
63
|
|
62
64
|
|
65
|
+
def _parse_json_string(
|
66
|
+
json_string: str, raw_tool_call: dict[str, Any], skip: bool
|
67
|
+
) -> Any:
|
68
|
+
"""Attempt to parse a JSON string for tool calling.
|
69
|
+
|
70
|
+
Args:
|
71
|
+
json_string: JSON string to parse.
|
72
|
+
skip: Whether to ignore parsing errors and return the value anyways.
|
73
|
+
raw_tool_call: Raw tool call to include in error message.
|
74
|
+
|
75
|
+
Returns:
|
76
|
+
The parsed JSON string.
|
77
|
+
|
78
|
+
Raises:
|
79
|
+
OutputParserException: If the JSON string wrong invalid and skip=False.
|
80
|
+
"""
|
81
|
+
try:
|
82
|
+
return json.loads(json_string)
|
83
|
+
except json.JSONDecodeError as e:
|
84
|
+
if skip:
|
85
|
+
return json_string
|
86
|
+
msg = (
|
87
|
+
f"Function {raw_tool_call['function']['name']} arguments:\n\n"
|
88
|
+
f"{raw_tool_call['function']['arguments']}\n\nare not valid JSON. "
|
89
|
+
f"Received JSONDecodeError {e}"
|
90
|
+
)
|
91
|
+
raise OutputParserException(msg) from e
|
92
|
+
except TypeError as e:
|
93
|
+
if skip:
|
94
|
+
return json_string
|
95
|
+
msg = (
|
96
|
+
f"Function {raw_tool_call['function']['name']} arguments:\n\n"
|
97
|
+
f"{raw_tool_call['function']['arguments']}\n\nare not a string or a "
|
98
|
+
f"dictionary. Received TypeError {e}"
|
99
|
+
)
|
100
|
+
raise OutputParserException(msg) from e
|
101
|
+
|
102
|
+
|
103
|
+
def _parse_arguments_from_tool_call(
|
104
|
+
raw_tool_call: dict[str, Any],
|
105
|
+
) -> Optional[dict[str, Any]]:
|
106
|
+
"""Parse arguments by trying to parse any shallowly nested string-encoded JSON.
|
107
|
+
|
108
|
+
Band-aid fix for issue in Ollama with inconsistent tool call argument structure.
|
109
|
+
Should be removed/changed if fixed upstream.
|
110
|
+
See https://github.com/ollama/ollama/issues/6155
|
111
|
+
"""
|
112
|
+
if "function" not in raw_tool_call:
|
113
|
+
return None
|
114
|
+
arguments = raw_tool_call["function"]["arguments"]
|
115
|
+
parsed_arguments = {}
|
116
|
+
if isinstance(arguments, dict):
|
117
|
+
for key, value in arguments.items():
|
118
|
+
if isinstance(value, str):
|
119
|
+
parsed_arguments[key] = _parse_json_string(
|
120
|
+
value, skip=True, raw_tool_call=raw_tool_call
|
121
|
+
)
|
122
|
+
else:
|
123
|
+
parsed_arguments[key] = value
|
124
|
+
else:
|
125
|
+
parsed_arguments = _parse_json_string(
|
126
|
+
arguments, skip=False, raw_tool_call=raw_tool_call
|
127
|
+
)
|
128
|
+
return parsed_arguments
|
129
|
+
|
130
|
+
|
63
131
|
def _get_tool_calls_from_response(
|
64
132
|
response: Mapping[str, Any],
|
65
133
|
) -> List[ToolCall]:
|
66
134
|
"""Get tool calls from ollama response."""
|
67
135
|
tool_calls = []
|
68
136
|
if "message" in response:
|
69
|
-
if
|
70
|
-
for tc in
|
137
|
+
if raw_tool_calls := response["message"].get("tool_calls"):
|
138
|
+
for tc in raw_tool_calls:
|
71
139
|
tool_calls.append(
|
72
140
|
tool_call(
|
73
141
|
id=str(uuid4()),
|
74
142
|
name=tc["function"]["name"],
|
75
|
-
args=tc
|
143
|
+
args=_parse_arguments_from_tool_call(tc) or {},
|
76
144
|
)
|
77
145
|
)
|
78
146
|
return tool_calls
|
@@ -90,9 +158,11 @@ def _lc_tool_call_to_openai_tool_call(tool_call: ToolCall) -> dict:
|
|
90
158
|
|
91
159
|
|
92
160
|
class ChatOllama(BaseChatModel):
|
93
|
-
"""Ollama chat model integration.
|
161
|
+
r"""Ollama chat model integration.
|
162
|
+
|
163
|
+
.. dropdown:: Setup
|
164
|
+
:open:
|
94
165
|
|
95
|
-
Setup:
|
96
166
|
Install ``langchain-ollama`` and download any models you want to use from ollama.
|
97
167
|
|
98
168
|
.. code-block:: bash
|
@@ -325,27 +395,36 @@ class ChatOllama(BaseChatModel):
|
|
325
395
|
"""Base url the model is hosted under."""
|
326
396
|
|
327
397
|
client_kwargs: Optional[dict] = {}
|
328
|
-
"""Additional kwargs to pass to the httpx Client.
|
398
|
+
"""Additional kwargs to pass to the httpx Client.
|
329
399
|
For a full list of the params, see [this link](https://pydoc.dev/httpx/latest/httpx.Client.html)
|
330
400
|
"""
|
331
401
|
|
332
|
-
_client: Client = PrivateAttr(default=None)
|
402
|
+
_client: Client = PrivateAttr(default=None) # type: ignore
|
333
403
|
"""
|
334
404
|
The client to use for making requests.
|
335
405
|
"""
|
336
406
|
|
337
|
-
_async_client: AsyncClient = PrivateAttr(default=None)
|
407
|
+
_async_client: AsyncClient = PrivateAttr(default=None) # type: ignore
|
338
408
|
"""
|
339
409
|
The async client to use for making requests.
|
340
410
|
"""
|
341
411
|
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
412
|
+
def _chat_params(
|
413
|
+
self,
|
414
|
+
messages: List[BaseMessage],
|
415
|
+
stop: Optional[List[str]] = None,
|
416
|
+
**kwargs: Any,
|
417
|
+
) -> Dict[str, Any]:
|
418
|
+
ollama_messages = self._convert_messages_to_ollama_messages(messages)
|
419
|
+
|
420
|
+
if self.stop is not None and stop is not None:
|
421
|
+
raise ValueError("`stop` found in both the input and default params.")
|
422
|
+
elif self.stop is not None:
|
423
|
+
stop = self.stop
|
424
|
+
|
425
|
+
options_dict = kwargs.pop(
|
426
|
+
"options",
|
427
|
+
{
|
349
428
|
"mirostat": self.mirostat,
|
350
429
|
"mirostat_eta": self.mirostat_eta,
|
351
430
|
"mirostat_tau": self.mirostat_tau,
|
@@ -357,14 +436,31 @@ class ChatOllama(BaseChatModel):
|
|
357
436
|
"repeat_penalty": self.repeat_penalty,
|
358
437
|
"temperature": self.temperature,
|
359
438
|
"seed": self.seed,
|
360
|
-
"stop": self.stop,
|
439
|
+
"stop": self.stop if stop is None else stop,
|
361
440
|
"tfs_z": self.tfs_z,
|
362
441
|
"top_k": self.top_k,
|
363
442
|
"top_p": self.top_p,
|
364
443
|
},
|
365
|
-
|
444
|
+
)
|
445
|
+
|
446
|
+
tools = kwargs.get("tools")
|
447
|
+
default_stream = not bool(tools)
|
448
|
+
|
449
|
+
params = {
|
450
|
+
"messages": ollama_messages,
|
451
|
+
"stream": kwargs.pop("stream", default_stream),
|
452
|
+
"model": kwargs.pop("model", self.model),
|
453
|
+
"format": kwargs.pop("format", self.format),
|
454
|
+
"options": Options(**options_dict),
|
455
|
+
"keep_alive": kwargs.pop("keep_alive", self.keep_alive),
|
456
|
+
**kwargs,
|
366
457
|
}
|
367
458
|
|
459
|
+
if tools:
|
460
|
+
params["tools"] = tools
|
461
|
+
|
462
|
+
return params
|
463
|
+
|
368
464
|
@model_validator(mode="after")
|
369
465
|
def _set_clients(self) -> Self:
|
370
466
|
"""Set clients to use for ollama."""
|
@@ -462,37 +558,13 @@ class ChatOllama(BaseChatModel):
|
|
462
558
|
stop: Optional[List[str]] = None,
|
463
559
|
**kwargs: Any,
|
464
560
|
) -> AsyncIterator[Union[Mapping[str, Any], str]]:
|
465
|
-
|
561
|
+
chat_params = self._chat_params(messages, stop, **kwargs)
|
466
562
|
|
467
|
-
|
468
|
-
|
469
|
-
params = self._default_params
|
470
|
-
|
471
|
-
for key in self._default_params:
|
472
|
-
if key in kwargs:
|
473
|
-
params[key] = kwargs[key]
|
474
|
-
|
475
|
-
params["options"]["stop"] = stop
|
476
|
-
if "tools" in kwargs:
|
477
|
-
yield await self._async_client.chat(
|
478
|
-
model=params["model"],
|
479
|
-
messages=ollama_messages,
|
480
|
-
stream=False,
|
481
|
-
options=Options(**params["options"]),
|
482
|
-
keep_alive=params["keep_alive"],
|
483
|
-
format=params["format"],
|
484
|
-
tools=kwargs["tools"],
|
485
|
-
) # type:ignore
|
486
|
-
else:
|
487
|
-
async for part in await self._async_client.chat(
|
488
|
-
model=params["model"],
|
489
|
-
messages=ollama_messages,
|
490
|
-
stream=True,
|
491
|
-
options=Options(**params["options"]),
|
492
|
-
keep_alive=params["keep_alive"],
|
493
|
-
format=params["format"],
|
494
|
-
): # type:ignore
|
563
|
+
if chat_params["stream"]:
|
564
|
+
async for part in await self._async_client.chat(**chat_params):
|
495
565
|
yield part
|
566
|
+
else:
|
567
|
+
yield await self._async_client.chat(**chat_params)
|
496
568
|
|
497
569
|
def _create_chat_stream(
|
498
570
|
self,
|
@@ -500,36 +572,12 @@ class ChatOllama(BaseChatModel):
|
|
500
572
|
stop: Optional[List[str]] = None,
|
501
573
|
**kwargs: Any,
|
502
574
|
) -> Iterator[Union[Mapping[str, Any], str]]:
|
503
|
-
|
504
|
-
|
505
|
-
stop = stop if stop is not None else self.stop
|
575
|
+
chat_params = self._chat_params(messages, stop, **kwargs)
|
506
576
|
|
507
|
-
|
508
|
-
|
509
|
-
for key in self._default_params:
|
510
|
-
if key in kwargs:
|
511
|
-
params[key] = kwargs[key]
|
512
|
-
|
513
|
-
params["options"]["stop"] = stop
|
514
|
-
if "tools" in kwargs:
|
515
|
-
yield self._client.chat(
|
516
|
-
model=params["model"],
|
517
|
-
messages=ollama_messages,
|
518
|
-
stream=False,
|
519
|
-
options=Options(**params["options"]),
|
520
|
-
keep_alive=params["keep_alive"],
|
521
|
-
format=params["format"],
|
522
|
-
tools=kwargs["tools"],
|
523
|
-
)
|
577
|
+
if chat_params["stream"]:
|
578
|
+
yield from self._client.chat(**chat_params)
|
524
579
|
else:
|
525
|
-
yield
|
526
|
-
model=params["model"],
|
527
|
-
messages=ollama_messages,
|
528
|
-
stream=True,
|
529
|
-
options=Options(**params["options"]),
|
530
|
-
keep_alive=params["keep_alive"],
|
531
|
-
format=params["format"],
|
532
|
-
)
|
580
|
+
yield self._client.chat(**chat_params)
|
533
581
|
|
534
582
|
def _chat_stream_with_aggregation(
|
535
583
|
self,
|
@@ -748,6 +796,8 @@ class ChatOllama(BaseChatModel):
|
|
748
796
|
def bind_tools(
|
749
797
|
self,
|
750
798
|
tools: Sequence[Union[Dict[str, Any], Type, Callable, BaseTool]],
|
799
|
+
*,
|
800
|
+
tool_choice: Optional[Union[dict, str, Literal["auto", "any"], bool]] = None,
|
751
801
|
**kwargs: Any,
|
752
802
|
) -> Runnable[LanguageModelInput, BaseMessage]:
|
753
803
|
"""Bind tool-like objects to this chat model.
|
@@ -758,6 +808,8 @@ class ChatOllama(BaseChatModel):
|
|
758
808
|
tools: A list of tool definitions to bind to this chat model.
|
759
809
|
Supports any tool definition handled by
|
760
810
|
:meth:`langchain_core.utils.function_calling.convert_to_openai_tool`.
|
811
|
+
tool_choice: If provided, which tool for model to call. **This parameter
|
812
|
+
is currently ignored as it is not supported by Ollama.**
|
761
813
|
kwargs: Any additional parameters are passed directly to
|
762
814
|
``self.bind(**kwargs)``.
|
763
815
|
""" # noqa: E501
|
langchain_ollama/embeddings.py
CHANGED
@@ -1,3 +1,5 @@
|
|
1
|
+
"""Ollama embeddings models."""
|
2
|
+
|
1
3
|
from typing import (
|
2
4
|
List,
|
3
5
|
Optional,
|
@@ -132,12 +134,12 @@ class OllamaEmbeddings(BaseModel, Embeddings):
|
|
132
134
|
For a full list of the params, see [this link](https://pydoc.dev/httpx/latest/httpx.Client.html)
|
133
135
|
"""
|
134
136
|
|
135
|
-
_client: Client = PrivateAttr(default=None)
|
137
|
+
_client: Client = PrivateAttr(default=None) # type: ignore
|
136
138
|
"""
|
137
139
|
The client to use for making requests.
|
138
140
|
"""
|
139
141
|
|
140
|
-
_async_client: AsyncClient = PrivateAttr(default=None)
|
142
|
+
_async_client: AsyncClient = PrivateAttr(default=None) # type: ignore
|
141
143
|
"""
|
142
144
|
The async client to use for making requests.
|
143
145
|
"""
|
langchain_ollama/llms.py
CHANGED
@@ -116,23 +116,30 @@ class OllamaLLM(BaseLLM):
|
|
116
116
|
For a full list of the params, see [this link](https://pydoc.dev/httpx/latest/httpx.Client.html)
|
117
117
|
"""
|
118
118
|
|
119
|
-
_client: Client = PrivateAttr(default=None)
|
119
|
+
_client: Client = PrivateAttr(default=None) # type: ignore
|
120
120
|
"""
|
121
121
|
The client to use for making requests.
|
122
122
|
"""
|
123
123
|
|
124
|
-
_async_client: AsyncClient = PrivateAttr(default=None)
|
124
|
+
_async_client: AsyncClient = PrivateAttr(default=None) # type: ignore
|
125
125
|
"""
|
126
126
|
The async client to use for making requests.
|
127
127
|
"""
|
128
128
|
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
129
|
+
def _generate_params(
|
130
|
+
self,
|
131
|
+
prompt: str,
|
132
|
+
stop: Optional[List[str]] = None,
|
133
|
+
**kwargs: Any,
|
134
|
+
) -> Dict[str, Any]:
|
135
|
+
if self.stop is not None and stop is not None:
|
136
|
+
raise ValueError("`stop` found in both the input and default params.")
|
137
|
+
elif self.stop is not None:
|
138
|
+
stop = self.stop
|
139
|
+
|
140
|
+
options_dict = kwargs.pop(
|
141
|
+
"options",
|
142
|
+
{
|
136
143
|
"mirostat": self.mirostat,
|
137
144
|
"mirostat_eta": self.mirostat_eta,
|
138
145
|
"mirostat_tau": self.mirostat_tau,
|
@@ -143,14 +150,25 @@ class OllamaLLM(BaseLLM):
|
|
143
150
|
"repeat_last_n": self.repeat_last_n,
|
144
151
|
"repeat_penalty": self.repeat_penalty,
|
145
152
|
"temperature": self.temperature,
|
146
|
-
"stop": self.stop,
|
153
|
+
"stop": self.stop if stop is None else stop,
|
147
154
|
"tfs_z": self.tfs_z,
|
148
155
|
"top_k": self.top_k,
|
149
156
|
"top_p": self.top_p,
|
150
157
|
},
|
151
|
-
|
158
|
+
)
|
159
|
+
|
160
|
+
params = {
|
161
|
+
"prompt": prompt,
|
162
|
+
"stream": kwargs.pop("stream", True),
|
163
|
+
"model": kwargs.pop("model", self.model),
|
164
|
+
"format": kwargs.pop("format", self.format),
|
165
|
+
"options": Options(**options_dict),
|
166
|
+
"keep_alive": kwargs.pop("keep_alive", self.keep_alive),
|
167
|
+
**kwargs,
|
152
168
|
}
|
153
169
|
|
170
|
+
return params
|
171
|
+
|
154
172
|
@property
|
155
173
|
def _llm_type(self) -> str:
|
156
174
|
"""Return type of LLM."""
|
@@ -179,27 +197,10 @@ class OllamaLLM(BaseLLM):
|
|
179
197
|
stop: Optional[List[str]] = None,
|
180
198
|
**kwargs: Any,
|
181
199
|
) -> AsyncIterator[Union[Mapping[str, Any], str]]:
|
182
|
-
if self.stop is not None and stop is not None:
|
183
|
-
raise ValueError("`stop` found in both the input and default params.")
|
184
|
-
elif self.stop is not None:
|
185
|
-
stop = self.stop
|
186
|
-
|
187
|
-
params = self._default_params
|
188
|
-
|
189
|
-
for key in self._default_params:
|
190
|
-
if key in kwargs:
|
191
|
-
params[key] = kwargs[key]
|
192
|
-
|
193
|
-
params["options"]["stop"] = stop
|
194
200
|
async for part in await self._async_client.generate(
|
195
|
-
|
196
|
-
prompt=prompt,
|
197
|
-
stream=True,
|
198
|
-
options=Options(**params["options"]),
|
199
|
-
keep_alive=params["keep_alive"],
|
200
|
-
format=params["format"],
|
201
|
+
**self._generate_params(prompt, stop=stop, **kwargs)
|
201
202
|
): # type: ignore
|
202
|
-
yield part
|
203
|
+
yield part # type: ignore
|
203
204
|
|
204
205
|
def _create_generate_stream(
|
205
206
|
self,
|
@@ -207,26 +208,9 @@ class OllamaLLM(BaseLLM):
|
|
207
208
|
stop: Optional[List[str]] = None,
|
208
209
|
**kwargs: Any,
|
209
210
|
) -> Iterator[Union[Mapping[str, Any], str]]:
|
210
|
-
if self.stop is not None and stop is not None:
|
211
|
-
raise ValueError("`stop` found in both the input and default params.")
|
212
|
-
elif self.stop is not None:
|
213
|
-
stop = self.stop
|
214
|
-
|
215
|
-
params = self._default_params
|
216
|
-
|
217
|
-
for key in self._default_params:
|
218
|
-
if key in kwargs:
|
219
|
-
params[key] = kwargs[key]
|
220
|
-
|
221
|
-
params["options"]["stop"] = stop
|
222
211
|
yield from self._client.generate(
|
223
|
-
|
224
|
-
|
225
|
-
stream=True,
|
226
|
-
options=Options(**params["options"]),
|
227
|
-
keep_alive=params["keep_alive"],
|
228
|
-
format=params["format"],
|
229
|
-
)
|
212
|
+
**self._generate_params(prompt, stop=stop, **kwargs)
|
213
|
+
) # type: ignore
|
230
214
|
|
231
215
|
async def _astream_with_aggregation(
|
232
216
|
self,
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: langchain-ollama
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.1
|
4
4
|
Summary: An integration package connecting Ollama and LangChain
|
5
5
|
Home-page: https://github.com/langchain-ai/langchain
|
6
6
|
License: MIT
|
@@ -11,7 +11,8 @@ Classifier: Programming Language :: Python :: 3.9
|
|
11
11
|
Classifier: Programming Language :: Python :: 3.10
|
12
12
|
Classifier: Programming Language :: Python :: 3.11
|
13
13
|
Classifier: Programming Language :: Python :: 3.12
|
14
|
-
|
14
|
+
Classifier: Programming Language :: Python :: 3.13
|
15
|
+
Requires-Dist: langchain-core (>=0.3.20,<0.4.0)
|
15
16
|
Requires-Dist: ollama (>=0.3.0,<1)
|
16
17
|
Project-URL: Repository, https://github.com/langchain-ai/langchain
|
17
18
|
Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-ollama%3D%3D0%22&expanded=true
|
@@ -0,0 +1,9 @@
|
|
1
|
+
langchain_ollama/__init__.py,sha256=SxPRrWcPayJpbwhheTtlqCaPp9ffiAAgZMM5Wc1yYpM,634
|
2
|
+
langchain_ollama/chat_models.py,sha256=YRPNR6ZKpQ6sF5045KwYbiGRA0YyHYXB8_7hPkJhAQE,32263
|
3
|
+
langchain_ollama/embeddings.py,sha256=svqdPF44qX5qbFpZmLiXrzTC-AldmMlZRS5wBfY-EmA,5056
|
4
|
+
langchain_ollama/llms.py,sha256=ojnYU0efhN10xhUINu1dCR2Erw79J_mYS6_l45J7Vls,12778
|
5
|
+
langchain_ollama/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
6
|
+
langchain_ollama-0.2.1.dist-info/LICENSE,sha256=2btS8uNUDWD_UNjw9ba6ZJt_00aUjEw9CGyK-xIHY8c,1072
|
7
|
+
langchain_ollama-0.2.1.dist-info/METADATA,sha256=SWUTgk7s9ouJwEBXiCPJj4YFLzVKZcM-YSHgWj8KDLQ,1876
|
8
|
+
langchain_ollama-0.2.1.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
|
9
|
+
langchain_ollama-0.2.1.dist-info/RECORD,,
|
@@ -1,9 +0,0 @@
|
|
1
|
-
langchain_ollama/__init__.py,sha256=HhQZqbCjhrbr2dC_9Dkw12pg4HPjnDXUoInROMNJKqA,518
|
2
|
-
langchain_ollama/chat_models.py,sha256=q_URs_NzgY87XZ0RBDu-TY_seTh2lKXbtCXB7xY_utE,30423
|
3
|
-
langchain_ollama/embeddings.py,sha256=46gmGxzK5Cm0GYesTSSgWupJYmJ2ywN7FQUAl0fzpxE,4991
|
4
|
-
langchain_ollama/llms.py,sha256=uwQfKwDHXhWWVSAFzHpuv8SirBwKp0H4irnA8lqU0M4,13259
|
5
|
-
langchain_ollama/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
6
|
-
langchain_ollama-0.2.0.dev1.dist-info/LICENSE,sha256=2btS8uNUDWD_UNjw9ba6ZJt_00aUjEw9CGyK-xIHY8c,1072
|
7
|
-
langchain_ollama-0.2.0.dev1.dist-info/METADATA,sha256=qI7Sy504_I0CEJJNrXBZwHTz1b_f6QMKYdjEowTETh4,1834
|
8
|
-
langchain_ollama-0.2.0.dev1.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
|
9
|
-
langchain_ollama-0.2.0.dev1.dist-info/RECORD,,
|
File without changes
|