langchain-ollama 0.2.2__py3-none-any.whl → 0.2.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1085,8 +1085,16 @@ class ChatOllama(BaseChatModel):
1085
1085
  "schema must be specified when method is not 'json_mode'. "
1086
1086
  "Received None."
1087
1087
  )
1088
- tool_name = convert_to_openai_tool(schema)["function"]["name"]
1089
- llm = self.bind_tools([schema], tool_choice=tool_name)
1088
+ formatted_tool = convert_to_openai_tool(schema)
1089
+ tool_name = formatted_tool["function"]["name"]
1090
+ llm = self.bind_tools(
1091
+ [schema],
1092
+ tool_choice=tool_name,
1093
+ structured_output_format={
1094
+ "kwargs": {"method": method},
1095
+ "schema": formatted_tool,
1096
+ },
1097
+ )
1090
1098
  if is_pydantic_schema:
1091
1099
  output_parser: Runnable = PydanticToolsParser(
1092
1100
  tools=[schema], # type: ignore[list-item]
@@ -1097,7 +1105,13 @@ class ChatOllama(BaseChatModel):
1097
1105
  key_name=tool_name, first_tool_only=True
1098
1106
  )
1099
1107
  elif method == "json_mode":
1100
- llm = self.bind(format="json")
1108
+ llm = self.bind(
1109
+ format="json",
1110
+ structured_output_format={
1111
+ "kwargs": {"method": method},
1112
+ "schema": schema,
1113
+ },
1114
+ )
1101
1115
  output_parser = (
1102
1116
  PydanticOutputParser(pydantic_object=schema) # type: ignore[arg-type]
1103
1117
  if is_pydantic_schema
@@ -1111,7 +1125,13 @@ class ChatOllama(BaseChatModel):
1111
1125
  )
1112
1126
  if is_pydantic_schema:
1113
1127
  schema = cast(TypeBaseModel, schema)
1114
- llm = self.bind(format=schema.model_json_schema())
1128
+ llm = self.bind(
1129
+ format=schema.model_json_schema(),
1130
+ structured_output_format={
1131
+ "kwargs": {"method": method},
1132
+ "schema": schema,
1133
+ },
1134
+ )
1115
1135
  output_parser = PydanticOutputParser(pydantic_object=schema)
1116
1136
  else:
1117
1137
  if is_typeddict(schema):
@@ -1126,7 +1146,13 @@ class ChatOllama(BaseChatModel):
1126
1146
  else:
1127
1147
  # is JSON schema
1128
1148
  response_format = schema
1129
- llm = self.bind(format=response_format)
1149
+ llm = self.bind(
1150
+ format=response_format,
1151
+ structured_output_format={
1152
+ "kwargs": {"method": method},
1153
+ "schema": response_format,
1154
+ },
1155
+ )
1130
1156
  output_parser = JsonOutputParser()
1131
1157
  else:
1132
1158
  raise ValueError(
@@ -1,9 +1,6 @@
1
1
  """Ollama embeddings models."""
2
2
 
3
- from typing import (
4
- List,
5
- Optional,
6
- )
3
+ from typing import Any, Dict, List, Optional
7
4
 
8
5
  from langchain_core.embeddings import Embeddings
9
6
  from ollama import AsyncClient, Client
@@ -144,10 +141,89 @@ class OllamaEmbeddings(BaseModel, Embeddings):
144
141
  The async client to use for making requests.
145
142
  """
146
143
 
144
+ mirostat: Optional[int] = None
145
+ """Enable Mirostat sampling for controlling perplexity.
146
+ (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)"""
147
+
148
+ mirostat_eta: Optional[float] = None
149
+ """Influences how quickly the algorithm responds to feedback
150
+ from the generated text. A lower learning rate will result in
151
+ slower adjustments, while a higher learning rate will make
152
+ the algorithm more responsive. (Default: 0.1)"""
153
+
154
+ mirostat_tau: Optional[float] = None
155
+ """Controls the balance between coherence and diversity
156
+ of the output. A lower value will result in more focused and
157
+ coherent text. (Default: 5.0)"""
158
+
159
+ num_ctx: Optional[int] = None
160
+ """Sets the size of the context window used to generate the
161
+ next token. (Default: 2048) """
162
+
163
+ num_gpu: Optional[int] = None
164
+ """The number of GPUs to use. On macOS it defaults to 1 to
165
+ enable metal support, 0 to disable."""
166
+
167
+ num_thread: Optional[int] = None
168
+ """Sets the number of threads to use during computation.
169
+ By default, Ollama will detect this for optimal performance.
170
+ It is recommended to set this value to the number of physical
171
+ CPU cores your system has (as opposed to the logical number of cores)."""
172
+
173
+ repeat_last_n: Optional[int] = None
174
+ """Sets how far back for the model to look back to prevent
175
+ repetition. (Default: 64, 0 = disabled, -1 = num_ctx)"""
176
+
177
+ repeat_penalty: Optional[float] = None
178
+ """Sets how strongly to penalize repetitions. A higher value (e.g., 1.5)
179
+ will penalize repetitions more strongly, while a lower value (e.g., 0.9)
180
+ will be more lenient. (Default: 1.1)"""
181
+
182
+ temperature: Optional[float] = None
183
+ """The temperature of the model. Increasing the temperature will
184
+ make the model answer more creatively. (Default: 0.8)"""
185
+
186
+ stop: Optional[List[str]] = None
187
+ """Sets the stop tokens to use."""
188
+
189
+ tfs_z: Optional[float] = None
190
+ """Tail free sampling is used to reduce the impact of less probable
191
+ tokens from the output. A higher value (e.g., 2.0) will reduce the
192
+ impact more, while a value of 1.0 disables this setting. (default: 1)"""
193
+
194
+ top_k: Optional[int] = None
195
+ """Reduces the probability of generating nonsense. A higher value (e.g. 100)
196
+ will give more diverse answers, while a lower value (e.g. 10)
197
+ will be more conservative. (Default: 40)"""
198
+
199
+ top_p: Optional[float] = None
200
+ """Works together with top-k. A higher value (e.g., 0.95) will lead
201
+ to more diverse text, while a lower value (e.g., 0.5) will
202
+ generate more focused and conservative text. (Default: 0.9)"""
203
+
147
204
  model_config = ConfigDict(
148
205
  extra="forbid",
149
206
  )
150
207
 
208
+ @property
209
+ def _default_params(self) -> Dict[str, Any]:
210
+ """Get the default parameters for calling Ollama."""
211
+ return {
212
+ "mirostat": self.mirostat,
213
+ "mirostat_eta": self.mirostat_eta,
214
+ "mirostat_tau": self.mirostat_tau,
215
+ "num_ctx": self.num_ctx,
216
+ "num_gpu": self.num_gpu,
217
+ "num_thread": self.num_thread,
218
+ "repeat_last_n": self.repeat_last_n,
219
+ "repeat_penalty": self.repeat_penalty,
220
+ "temperature": self.temperature,
221
+ "stop": self.stop,
222
+ "tfs_z": self.tfs_z,
223
+ "top_k": self.top_k,
224
+ "top_p": self.top_p,
225
+ }
226
+
151
227
  @model_validator(mode="after")
152
228
  def _set_clients(self) -> Self:
153
229
  """Set clients to use for ollama."""
@@ -158,7 +234,9 @@ class OllamaEmbeddings(BaseModel, Embeddings):
158
234
 
159
235
  def embed_documents(self, texts: List[str]) -> List[List[float]]:
160
236
  """Embed search docs."""
161
- embedded_docs = self._client.embed(self.model, texts)["embeddings"]
237
+ embedded_docs = self._client.embed(
238
+ self.model, texts, options=self._default_params
239
+ )["embeddings"]
162
240
  return embedded_docs
163
241
 
164
242
  def embed_query(self, text: str) -> List[float]:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langchain-ollama
3
- Version: 0.2.2
3
+ Version: 0.2.3
4
4
  Summary: An integration package connecting Ollama and LangChain
5
5
  Home-page: https://github.com/langchain-ai/langchain
6
6
  License: MIT
@@ -12,7 +12,7 @@ Classifier: Programming Language :: Python :: 3.10
12
12
  Classifier: Programming Language :: Python :: 3.11
13
13
  Classifier: Programming Language :: Python :: 3.12
14
14
  Classifier: Programming Language :: Python :: 3.13
15
- Requires-Dist: langchain-core (>=0.3.27,<0.4.0)
15
+ Requires-Dist: langchain-core (>=0.3.33,<0.4.0)
16
16
  Requires-Dist: ollama (>=0.4.4,<1)
17
17
  Project-URL: Repository, https://github.com/langchain-ai/langchain
18
18
  Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-ollama%3D%3D0%22&expanded=true
@@ -0,0 +1,9 @@
1
+ langchain_ollama/__init__.py,sha256=SxPRrWcPayJpbwhheTtlqCaPp9ffiAAgZMM5Wc1yYpM,634
2
+ langchain_ollama/chat_models.py,sha256=YDaHyz5t4EfQrMIGJsNFdiPH9LJUOBdrBjlr0qAC8GM,48172
3
+ langchain_ollama/embeddings.py,sha256=rZLgMvuEVqMRo1kPr9pPPrGVpEOes76cwzkXJRged_4,8397
4
+ langchain_ollama/llms.py,sha256=ojnYU0efhN10xhUINu1dCR2Erw79J_mYS6_l45J7Vls,12778
5
+ langchain_ollama/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ langchain_ollama-0.2.3.dist-info/LICENSE,sha256=2btS8uNUDWD_UNjw9ba6ZJt_00aUjEw9CGyK-xIHY8c,1072
7
+ langchain_ollama-0.2.3.dist-info/METADATA,sha256=BZ3HPeJJiDPaEhUjJIC-3SmIhQuNs6r97LS7EOVoPsE,1876
8
+ langchain_ollama-0.2.3.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
9
+ langchain_ollama-0.2.3.dist-info/RECORD,,
@@ -1,9 +0,0 @@
1
- langchain_ollama/__init__.py,sha256=SxPRrWcPayJpbwhheTtlqCaPp9ffiAAgZMM5Wc1yYpM,634
2
- langchain_ollama/chat_models.py,sha256=_kEf5o3CGtP7JL0HKZ8sIH9xdzktnGbBVdXULR2_sdo,47323
3
- langchain_ollama/embeddings.py,sha256=svqdPF44qX5qbFpZmLiXrzTC-AldmMlZRS5wBfY-EmA,5056
4
- langchain_ollama/llms.py,sha256=ojnYU0efhN10xhUINu1dCR2Erw79J_mYS6_l45J7Vls,12778
5
- langchain_ollama/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
- langchain_ollama-0.2.2.dist-info/LICENSE,sha256=2btS8uNUDWD_UNjw9ba6ZJt_00aUjEw9CGyK-xIHY8c,1072
7
- langchain_ollama-0.2.2.dist-info/METADATA,sha256=8eb8LP6TyzNBiiO60iS5ZMLKK0XW6PQUdKKQPHwYD7M,1876
8
- langchain_ollama-0.2.2.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
9
- langchain_ollama-0.2.2.dist-info/RECORD,,