langchain-ollama 0.2.3__tar.gz → 0.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (23) hide show
  1. {langchain_ollama-0.2.3 → langchain_ollama-0.3.0}/PKG-INFO +6 -15
  2. {langchain_ollama-0.2.3 → langchain_ollama-0.3.0}/langchain_ollama/chat_models.py +132 -92
  3. {langchain_ollama-0.2.3 → langchain_ollama-0.3.0}/langchain_ollama/embeddings.py +11 -4
  4. langchain_ollama-0.3.0/pyproject.toml +90 -0
  5. langchain_ollama-0.3.0/tests/__init__.py +0 -0
  6. langchain_ollama-0.3.0/tests/integration_tests/__init__.py +0 -0
  7. langchain_ollama-0.3.0/tests/integration_tests/chat_models/test_chat_models.py +92 -0
  8. langchain_ollama-0.3.0/tests/integration_tests/chat_models/test_chat_models_reasoning.py +162 -0
  9. langchain_ollama-0.3.0/tests/integration_tests/chat_models/test_chat_models_standard.py +29 -0
  10. langchain_ollama-0.3.0/tests/integration_tests/test_compile.py +7 -0
  11. langchain_ollama-0.3.0/tests/integration_tests/test_embeddings.py +17 -0
  12. langchain_ollama-0.3.0/tests/integration_tests/test_llms.py +66 -0
  13. langchain_ollama-0.3.0/tests/unit_tests/__init__.py +0 -0
  14. langchain_ollama-0.3.0/tests/unit_tests/test_chat_models.py +25 -0
  15. langchain_ollama-0.3.0/tests/unit_tests/test_embeddings.py +8 -0
  16. langchain_ollama-0.3.0/tests/unit_tests/test_imports.py +12 -0
  17. langchain_ollama-0.3.0/tests/unit_tests/test_llms.py +28 -0
  18. langchain_ollama-0.2.3/pyproject.toml +0 -99
  19. {langchain_ollama-0.2.3 → langchain_ollama-0.3.0}/LICENSE +0 -0
  20. {langchain_ollama-0.2.3 → langchain_ollama-0.3.0}/README.md +0 -0
  21. {langchain_ollama-0.2.3 → langchain_ollama-0.3.0}/langchain_ollama/__init__.py +0 -0
  22. {langchain_ollama-0.2.3 → langchain_ollama-0.3.0}/langchain_ollama/llms.py +0 -0
  23. {langchain_ollama-0.2.3 → langchain_ollama-0.3.0}/langchain_ollama/py.typed +0 -0
@@ -1,22 +1,14 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langchain-ollama
3
- Version: 0.2.3
3
+ Version: 0.3.0
4
4
  Summary: An integration package connecting Ollama and LangChain
5
- Home-page: https://github.com/langchain-ai/langchain
6
5
  License: MIT
7
- Requires-Python: >=3.9,<4.0
8
- Classifier: License :: OSI Approved :: MIT License
9
- Classifier: Programming Language :: Python :: 3
10
- Classifier: Programming Language :: Python :: 3.9
11
- Classifier: Programming Language :: Python :: 3.10
12
- Classifier: Programming Language :: Python :: 3.11
13
- Classifier: Programming Language :: Python :: 3.12
14
- Classifier: Programming Language :: Python :: 3.13
15
- Requires-Dist: langchain-core (>=0.3.33,<0.4.0)
16
- Requires-Dist: ollama (>=0.4.4,<1)
17
- Project-URL: Repository, https://github.com/langchain-ai/langchain
18
- Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-ollama%3D%3D0%22&expanded=true
19
6
  Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/partners/ollama
7
+ Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-ollama%3D%3D0%22&expanded=true
8
+ Project-URL: repository, https://github.com/langchain-ai/langchain
9
+ Requires-Python: <4.0,>=3.9
10
+ Requires-Dist: ollama<1,>=0.4.4
11
+ Requires-Dist: langchain-core<1.0.0,>=0.3.47
20
12
  Description-Content-Type: text/markdown
21
13
 
22
14
  # langchain-ollama
@@ -63,4 +55,3 @@ from langchain_ollama import OllamaLLM
63
55
  llm = OllamaLLM(model="llama3")
64
56
  llm.invoke("The meaning of life is")
65
57
  ```
66
-
@@ -7,12 +7,14 @@ from typing import (
7
7
  AsyncIterator,
8
8
  Callable,
9
9
  Dict,
10
+ Final,
10
11
  Iterator,
11
12
  List,
12
13
  Literal,
13
14
  Mapping,
14
15
  Optional,
15
16
  Sequence,
17
+ Tuple,
16
18
  Type,
17
19
  Union,
18
20
  cast,
@@ -30,6 +32,7 @@ from langchain_core.messages import (
30
32
  AIMessage,
31
33
  AIMessageChunk,
32
34
  BaseMessage,
35
+ BaseMessageChunk,
33
36
  HumanMessage,
34
37
  SystemMessage,
35
38
  ToolCall,
@@ -47,15 +50,19 @@ from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResu
47
50
  from langchain_core.runnables import Runnable, RunnableMap, RunnablePassthrough
48
51
  from langchain_core.tools import BaseTool
49
52
  from langchain_core.utils.function_calling import (
50
- _convert_any_typed_dicts_to_pydantic as convert_any_typed_dicts_to_pydantic,
53
+ convert_to_json_schema,
54
+ convert_to_openai_tool,
51
55
  )
52
- from langchain_core.utils.function_calling import convert_to_openai_tool
53
56
  from langchain_core.utils.pydantic import TypeBaseModel, is_basemodel_subclass
54
57
  from ollama import AsyncClient, Client, Message, Options
55
58
  from pydantic import BaseModel, PrivateAttr, model_validator
56
59
  from pydantic.json_schema import JsonSchemaValue
60
+ from pydantic.v1 import BaseModel as BaseModelV1
57
61
  from typing_extensions import Self, is_typeddict
58
62
 
63
+ DEFAULT_THINK_TOKEN_START: Final[str] = "<think>"
64
+ DEFAULT_THINK_TOKEN_END: Final[str] = "</think>"
65
+
59
66
 
60
67
  def _get_usage_metadata_from_generation_info(
61
68
  generation_info: Optional[Mapping[str, Any]],
@@ -124,13 +131,17 @@ def _parse_arguments_from_tool_call(
124
131
  if "function" not in raw_tool_call:
125
132
  return None
126
133
  arguments = raw_tool_call["function"]["arguments"]
127
- parsed_arguments = {}
134
+ parsed_arguments: dict = {}
128
135
  if isinstance(arguments, dict):
129
136
  for key, value in arguments.items():
130
137
  if isinstance(value, str):
131
- parsed_arguments[key] = _parse_json_string(
138
+ parsed_value = _parse_json_string(
132
139
  value, skip=True, raw_tool_call=raw_tool_call
133
140
  )
141
+ if isinstance(parsed_value, (dict, list)):
142
+ parsed_arguments[key] = parsed_value
143
+ else:
144
+ parsed_arguments[key] = value
134
145
  else:
135
146
  parsed_arguments[key] = value
136
147
  else:
@@ -228,7 +239,7 @@ class ChatOllama(BaseChatModel):
228
239
  ("human", "Return the words Hello World!"),
229
240
  ]
230
241
  for chunk in llm.stream(messages):
231
- print(chunk)
242
+ print(chunk.text(), end="")
232
243
 
233
244
 
234
245
  .. code-block:: python
@@ -330,6 +341,13 @@ class ChatOllama(BaseChatModel):
330
341
  model: str
331
342
  """Model name to use."""
332
343
 
344
+ extract_reasoning: Optional[Union[bool, Tuple[str, str]]] = False
345
+ """Whether to extract the reasoning tokens in think blocks.
346
+ Extracts `chunk.content` to `chunk.additional_kwargs.reasoning_content`.
347
+ If a tuple is supplied, they are assumed to be the (start, end) tokens.
348
+ If `extract_reasoning=True`, the tokens will default to (<think>, </think>).
349
+ """
350
+
333
351
  mirostat: Optional[int] = None
334
352
  """Enable Mirostat sampling for controlling perplexity.
335
353
  (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)"""
@@ -563,6 +581,28 @@ class ChatOllama(BaseChatModel):
563
581
 
564
582
  return ollama_messages
565
583
 
584
+ def _extract_reasoning(
585
+ self, message_chunk: BaseMessageChunk, is_thinking: bool
586
+ ) -> Tuple[BaseMessageChunk, bool]:
587
+ """Mutate a message chunk to extract reasoning content."""
588
+ if not self.extract_reasoning:
589
+ return message_chunk, is_thinking
590
+ elif self.extract_reasoning is True:
591
+ start_token = DEFAULT_THINK_TOKEN_START
592
+ end_token = DEFAULT_THINK_TOKEN_END
593
+ else:
594
+ start_token, end_token = cast(tuple, self.extract_reasoning)
595
+ if start_token in message_chunk.content:
596
+ is_thinking = True
597
+ content = message_chunk.content
598
+ if is_thinking:
599
+ message_chunk.additional_kwargs["reasoning_content"] = content
600
+ message_chunk.content = ""
601
+ if end_token in content:
602
+ is_thinking = False
603
+
604
+ return message_chunk, is_thinking
605
+
566
606
  async def _acreate_chat_stream(
567
607
  self,
568
608
  messages: List[BaseMessage],
@@ -599,35 +639,17 @@ class ChatOllama(BaseChatModel):
599
639
  **kwargs: Any,
600
640
  ) -> ChatGenerationChunk:
601
641
  final_chunk = None
602
- for stream_resp in self._create_chat_stream(messages, stop, **kwargs):
603
- if not isinstance(stream_resp, str):
604
- chunk = ChatGenerationChunk(
605
- message=AIMessageChunk(
606
- content=(
607
- stream_resp["message"]["content"]
608
- if "message" in stream_resp
609
- and "content" in stream_resp["message"]
610
- else ""
611
- ),
612
- usage_metadata=_get_usage_metadata_from_generation_info(
613
- stream_resp
614
- ),
615
- tool_calls=_get_tool_calls_from_response(stream_resp),
616
- ),
617
- generation_info=(
618
- dict(stream_resp) if stream_resp.get("done") is True else None
619
- ),
642
+ for chunk in self._iterate_over_stream(messages, stop, **kwargs):
643
+ if final_chunk is None:
644
+ final_chunk = chunk
645
+ else:
646
+ final_chunk += chunk
647
+ if run_manager:
648
+ run_manager.on_llm_new_token(
649
+ chunk.text,
650
+ chunk=chunk,
651
+ verbose=verbose,
620
652
  )
621
- if final_chunk is None:
622
- final_chunk = chunk
623
- else:
624
- final_chunk += chunk
625
- if run_manager:
626
- run_manager.on_llm_new_token(
627
- chunk.text,
628
- chunk=chunk,
629
- verbose=verbose,
630
- )
631
653
  if final_chunk is None:
632
654
  raise ValueError("No data received from Ollama stream.")
633
655
 
@@ -642,35 +664,17 @@ class ChatOllama(BaseChatModel):
642
664
  **kwargs: Any,
643
665
  ) -> ChatGenerationChunk:
644
666
  final_chunk = None
645
- async for stream_resp in self._acreate_chat_stream(messages, stop, **kwargs):
646
- if not isinstance(stream_resp, str):
647
- chunk = ChatGenerationChunk(
648
- message=AIMessageChunk(
649
- content=(
650
- stream_resp["message"]["content"]
651
- if "message" in stream_resp
652
- and "content" in stream_resp["message"]
653
- else ""
654
- ),
655
- usage_metadata=_get_usage_metadata_from_generation_info(
656
- stream_resp
657
- ),
658
- tool_calls=_get_tool_calls_from_response(stream_resp),
659
- ),
660
- generation_info=(
661
- dict(stream_resp) if stream_resp.get("done") is True else None
662
- ),
667
+ async for chunk in self._aiterate_over_stream(messages, stop, **kwargs):
668
+ if final_chunk is None:
669
+ final_chunk = chunk
670
+ else:
671
+ final_chunk += chunk
672
+ if run_manager:
673
+ await run_manager.on_llm_new_token(
674
+ chunk.text,
675
+ chunk=chunk,
676
+ verbose=verbose,
663
677
  )
664
- if final_chunk is None:
665
- final_chunk = chunk
666
- else:
667
- final_chunk += chunk
668
- if run_manager:
669
- await run_manager.on_llm_new_token(
670
- chunk.text,
671
- chunk=chunk,
672
- verbose=verbose,
673
- )
674
678
  if final_chunk is None:
675
679
  raise ValueError("No data received from Ollama stream.")
676
680
 
@@ -707,18 +711,19 @@ class ChatOllama(BaseChatModel):
707
711
  content=final_chunk.text,
708
712
  usage_metadata=cast(AIMessageChunk, final_chunk.message).usage_metadata,
709
713
  tool_calls=cast(AIMessageChunk, final_chunk.message).tool_calls,
714
+ additional_kwargs=final_chunk.message.additional_kwargs,
710
715
  ),
711
716
  generation_info=generation_info,
712
717
  )
713
718
  return ChatResult(generations=[chat_generation])
714
719
 
715
- def _stream(
720
+ def _iterate_over_stream(
716
721
  self,
717
722
  messages: List[BaseMessage],
718
723
  stop: Optional[List[str]] = None,
719
- run_manager: Optional[CallbackManagerForLLMRun] = None,
720
724
  **kwargs: Any,
721
725
  ) -> Iterator[ChatGenerationChunk]:
726
+ is_thinking = False
722
727
  for stream_resp in self._create_chat_stream(messages, stop, **kwargs):
723
728
  if not isinstance(stream_resp, str):
724
729
  chunk = ChatGenerationChunk(
@@ -738,20 +743,35 @@ class ChatOllama(BaseChatModel):
738
743
  dict(stream_resp) if stream_resp.get("done") is True else None
739
744
  ),
740
745
  )
741
- if run_manager:
742
- run_manager.on_llm_new_token(
743
- chunk.text,
744
- verbose=self.verbose,
746
+ if self.extract_reasoning:
747
+ message, is_thinking = self._extract_reasoning(
748
+ chunk.message, is_thinking
745
749
  )
750
+ chunk.message = message
746
751
  yield chunk
747
752
 
748
- async def _astream(
753
+ def _stream(
754
+ self,
755
+ messages: List[BaseMessage],
756
+ stop: Optional[List[str]] = None,
757
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
758
+ **kwargs: Any,
759
+ ) -> Iterator[ChatGenerationChunk]:
760
+ for chunk in self._iterate_over_stream(messages, stop, **kwargs):
761
+ if run_manager:
762
+ run_manager.on_llm_new_token(
763
+ chunk.text,
764
+ verbose=self.verbose,
765
+ )
766
+ yield chunk
767
+
768
+ async def _aiterate_over_stream(
749
769
  self,
750
770
  messages: List[BaseMessage],
751
771
  stop: Optional[List[str]] = None,
752
- run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
753
772
  **kwargs: Any,
754
773
  ) -> AsyncIterator[ChatGenerationChunk]:
774
+ is_thinking = False
755
775
  async for stream_resp in self._acreate_chat_stream(messages, stop, **kwargs):
756
776
  if not isinstance(stream_resp, str):
757
777
  chunk = ChatGenerationChunk(
@@ -771,13 +791,28 @@ class ChatOllama(BaseChatModel):
771
791
  dict(stream_resp) if stream_resp.get("done") is True else None
772
792
  ),
773
793
  )
774
- if run_manager:
775
- await run_manager.on_llm_new_token(
776
- chunk.text,
777
- verbose=self.verbose,
794
+ if self.extract_reasoning:
795
+ message, is_thinking = self._extract_reasoning(
796
+ chunk.message, is_thinking
778
797
  )
798
+ chunk.message = message
779
799
  yield chunk
780
800
 
801
+ async def _astream(
802
+ self,
803
+ messages: List[BaseMessage],
804
+ stop: Optional[List[str]] = None,
805
+ run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
806
+ **kwargs: Any,
807
+ ) -> AsyncIterator[ChatGenerationChunk]:
808
+ async for chunk in self._aiterate_over_stream(messages, stop, **kwargs):
809
+ if run_manager:
810
+ await run_manager.on_llm_new_token(
811
+ chunk.text,
812
+ verbose=self.verbose,
813
+ )
814
+ yield chunk
815
+
781
816
  async def _agenerate(
782
817
  self,
783
818
  messages: List[BaseMessage],
@@ -794,6 +829,7 @@ class ChatOllama(BaseChatModel):
794
829
  content=final_chunk.text,
795
830
  usage_metadata=cast(AIMessageChunk, final_chunk.message).usage_metadata,
796
831
  tool_calls=cast(AIMessageChunk, final_chunk.message).tool_calls,
832
+ additional_kwargs=final_chunk.message.additional_kwargs,
797
833
  ),
798
834
  generation_info=generation_info,
799
835
  )
@@ -831,9 +867,7 @@ class ChatOllama(BaseChatModel):
831
867
  self,
832
868
  schema: Union[Dict, type],
833
869
  *,
834
- method: Literal[
835
- "function_calling", "json_mode", "json_schema"
836
- ] = "function_calling",
870
+ method: Literal["function_calling", "json_mode", "json_schema"] = "json_schema",
837
871
  include_raw: bool = False,
838
872
  **kwargs: Any,
839
873
  ) -> Runnable[LanguageModelInput, Union[Dict, BaseModel]]:
@@ -857,10 +891,10 @@ class ChatOllama(BaseChatModel):
857
891
 
858
892
  method: The method for steering model generation, one of:
859
893
 
860
- - "function_calling":
861
- Uses Ollama's tool-calling API
862
894
  - "json_schema":
863
895
  Uses Ollama's structured output API: https://ollama.com/blog/structured-outputs
896
+ - "function_calling":
897
+ Uses Ollama's tool-calling API
864
898
  - "json_mode":
865
899
  Specifies ``format="json"``. Note that if using JSON mode then you
866
900
  must include instructions for formatting the output into the
@@ -891,7 +925,11 @@ class ChatOllama(BaseChatModel):
891
925
 
892
926
  Added support for structured output API via ``format`` parameter.
893
927
 
894
- .. dropdown:: Example: schema=Pydantic class, method="function_calling", include_raw=False
928
+ .. versionchanged:: 0.3.0
929
+
930
+ Updated default ``method`` to ``"json_schema"``.
931
+
932
+ .. dropdown:: Example: schema=Pydantic class, method="json_schema", include_raw=False
895
933
 
896
934
  .. code-block:: python
897
935
 
@@ -924,7 +962,7 @@ class ChatOllama(BaseChatModel):
924
962
  # justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
925
963
  # )
926
964
 
927
- .. dropdown:: Example: schema=Pydantic class, method="function_calling", include_raw=True
965
+ .. dropdown:: Example: schema=Pydantic class, method="json_schema", include_raw=True
928
966
 
929
967
  .. code-block:: python
930
968
 
@@ -953,7 +991,7 @@ class ChatOllama(BaseChatModel):
953
991
  # 'parsing_error': None
954
992
  # }
955
993
 
956
- .. dropdown:: Example: schema=Pydantic class, method="json_schema", include_raw=False
994
+ .. dropdown:: Example: schema=Pydantic class, method="function_calling", include_raw=False
957
995
 
958
996
  .. code-block:: python
959
997
 
@@ -974,7 +1012,7 @@ class ChatOllama(BaseChatModel):
974
1012
 
975
1013
  llm = ChatOllama(model="llama3.1", temperature=0)
976
1014
  structured_llm = llm.with_structured_output(
977
- AnswerWithJustification, method="json_schema"
1015
+ AnswerWithJustification, method="function_calling"
978
1016
  )
979
1017
 
980
1018
  structured_llm.invoke(
@@ -1076,6 +1114,7 @@ class ChatOllama(BaseChatModel):
1076
1114
  # 'parsing_error': None
1077
1115
  # }
1078
1116
  """ # noqa: E501, D301
1117
+ _ = kwargs.pop("strict", None)
1079
1118
  if kwargs:
1080
1119
  raise ValueError(f"Received unsupported arguments {kwargs}")
1081
1120
  is_pydantic_schema = _is_pydantic_class(schema)
@@ -1090,7 +1129,7 @@ class ChatOllama(BaseChatModel):
1090
1129
  llm = self.bind_tools(
1091
1130
  [schema],
1092
1131
  tool_choice=tool_name,
1093
- structured_output_format={
1132
+ ls_structured_output_format={
1094
1133
  "kwargs": {"method": method},
1095
1134
  "schema": formatted_tool,
1096
1135
  },
@@ -1107,7 +1146,7 @@ class ChatOllama(BaseChatModel):
1107
1146
  elif method == "json_mode":
1108
1147
  llm = self.bind(
1109
1148
  format="json",
1110
- structured_output_format={
1149
+ ls_structured_output_format={
1111
1150
  "kwargs": {"method": method},
1112
1151
  "schema": schema,
1113
1152
  },
@@ -1125,9 +1164,13 @@ class ChatOllama(BaseChatModel):
1125
1164
  )
1126
1165
  if is_pydantic_schema:
1127
1166
  schema = cast(TypeBaseModel, schema)
1167
+ if issubclass(schema, BaseModelV1):
1168
+ response_format = schema.schema()
1169
+ else:
1170
+ response_format = schema.model_json_schema()
1128
1171
  llm = self.bind(
1129
- format=schema.model_json_schema(),
1130
- structured_output_format={
1172
+ format=response_format,
1173
+ ls_structured_output_format={
1131
1174
  "kwargs": {"method": method},
1132
1175
  "schema": schema,
1133
1176
  },
@@ -1135,20 +1178,17 @@ class ChatOllama(BaseChatModel):
1135
1178
  output_parser = PydanticOutputParser(pydantic_object=schema)
1136
1179
  else:
1137
1180
  if is_typeddict(schema):
1138
- schema = cast(type, schema)
1139
- response_format = convert_any_typed_dicts_to_pydantic(
1140
- schema, visited={}
1141
- ).schema() # type: ignore[attr-defined]
1181
+ response_format = convert_to_json_schema(schema)
1142
1182
  if "required" not in response_format:
1143
1183
  response_format["required"] = list(
1144
1184
  response_format["properties"].keys()
1145
1185
  )
1146
1186
  else:
1147
1187
  # is JSON schema
1148
- response_format = schema
1188
+ response_format = cast(dict, schema)
1149
1189
  llm = self.bind(
1150
1190
  format=response_format,
1151
- structured_output_format={
1191
+ ls_structured_output_format={
1152
1192
  "kwargs": {"method": method},
1153
1193
  "schema": response_format,
1154
1194
  },
@@ -164,6 +164,11 @@ class OllamaEmbeddings(BaseModel, Embeddings):
164
164
  """The number of GPUs to use. On macOS it defaults to 1 to
165
165
  enable metal support, 0 to disable."""
166
166
 
167
+ keep_alive: Optional[int] = None
168
+ """controls how long the model will stay loaded into memory
169
+ following the request (default: 5m)
170
+ """
171
+
167
172
  num_thread: Optional[int] = None
168
173
  """Sets the number of threads to use during computation.
169
174
  By default, Ollama will detect this for optimal performance.
@@ -235,7 +240,7 @@ class OllamaEmbeddings(BaseModel, Embeddings):
235
240
  def embed_documents(self, texts: List[str]) -> List[List[float]]:
236
241
  """Embed search docs."""
237
242
  embedded_docs = self._client.embed(
238
- self.model, texts, options=self._default_params
243
+ self.model, texts, options=self._default_params, keep_alive=self.keep_alive
239
244
  )["embeddings"]
240
245
  return embedded_docs
241
246
 
@@ -245,9 +250,11 @@ class OllamaEmbeddings(BaseModel, Embeddings):
245
250
 
246
251
  async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
247
252
  """Embed search docs."""
248
- embedded_docs = (await self._async_client.embed(self.model, texts))[
249
- "embeddings"
250
- ]
253
+ embedded_docs = (
254
+ await self._async_client.embed(
255
+ self.model, texts, keep_alive=self.keep_alive
256
+ )
257
+ )["embeddings"]
251
258
  return embedded_docs
252
259
 
253
260
  async def aembed_query(self, text: str) -> List[float]:
@@ -0,0 +1,90 @@
1
+ [build-system]
2
+ requires = [
3
+ "pdm-backend",
4
+ ]
5
+ build-backend = "pdm.backend"
6
+
7
+ [project]
8
+ authors = []
9
+ requires-python = "<4.0,>=3.9"
10
+ dependencies = [
11
+ "ollama<1,>=0.4.4",
12
+ "langchain-core<1.0.0,>=0.3.47",
13
+ ]
14
+ name = "langchain-ollama"
15
+ version = "0.3.0"
16
+ description = "An integration package connecting Ollama and LangChain"
17
+ readme = "README.md"
18
+
19
+ [project.license]
20
+ text = "MIT"
21
+
22
+ [project.urls]
23
+ "Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/partners/ollama"
24
+ "Release Notes" = "https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-ollama%3D%3D0%22&expanded=true"
25
+ repository = "https://github.com/langchain-ai/langchain"
26
+
27
+ [dependency-groups]
28
+ test = [
29
+ "pytest<8.0.0,>=7.4.3",
30
+ "pytest-asyncio<1.0.0,>=0.23.2",
31
+ "syrupy<5.0.0,>=4.0.2",
32
+ "pytest-socket<1.0.0,>=0.7.0",
33
+ "pytest-watcher<1.0.0,>=0.3.4",
34
+ "langchain-core",
35
+ "langchain-tests",
36
+ ]
37
+ codespell = [
38
+ "codespell<3.0.0,>=2.2.6",
39
+ ]
40
+ test_integration = []
41
+ lint = [
42
+ "ruff<1.0.0,>=0.1.8",
43
+ ]
44
+ dev = [
45
+ "langchain-core",
46
+ ]
47
+ typing = [
48
+ "mypy<2.0.0,>=1.7.1",
49
+ "langchain-core",
50
+ ]
51
+
52
+ [tool.uv.sources.langchain-core]
53
+ path = "../../core"
54
+ editable = true
55
+
56
+ [tool.uv.sources.langchain-tests]
57
+ path = "../../standard-tests"
58
+ editable = true
59
+
60
+ [tool.mypy]
61
+ disallow_untyped_defs = "True"
62
+
63
+ [tool.ruff.lint]
64
+ select = [
65
+ "E",
66
+ "F",
67
+ "I",
68
+ "T201",
69
+ "D",
70
+ ]
71
+
72
+ [tool.ruff.lint.pydocstyle]
73
+ convention = "google"
74
+
75
+ [tool.ruff.lint.per-file-ignores]
76
+ "tests/**" = [
77
+ "D",
78
+ ]
79
+
80
+ [tool.coverage.run]
81
+ omit = [
82
+ "tests/*",
83
+ ]
84
+
85
+ [tool.pytest.ini_options]
86
+ addopts = "--snapshot-warn-unused --strict-markers --strict-config --durations=5"
87
+ markers = [
88
+ "compile: mark placeholder test used to compile integration tests without running them",
89
+ ]
90
+ asyncio_mode = "auto"
File without changes
@@ -0,0 +1,92 @@
1
+ """Ollama specific chat model integration tests"""
2
+
3
+ from typing import List, Optional
4
+
5
+ import pytest
6
+ from pydantic import BaseModel, Field
7
+ from typing_extensions import Annotated, TypedDict
8
+
9
+ from langchain_ollama import ChatOllama
10
+
11
+
12
+ @pytest.mark.parametrize(("method"), [("function_calling"), ("json_schema")])
13
+ def test_structured_output(method: str) -> None:
14
+ """Test to verify structured output via tool calling and ``format`` parameter."""
15
+
16
+ class Joke(BaseModel):
17
+ """Joke to tell user."""
18
+
19
+ setup: str = Field(description="question to set up a joke")
20
+ punchline: str = Field(description="answer to resolve the joke")
21
+
22
+ llm = ChatOllama(model="llama3.1", temperature=0)
23
+ query = "Tell me a joke about cats."
24
+
25
+ # Pydantic
26
+ structured_llm = llm.with_structured_output(Joke, method=method) # type: ignore[arg-type]
27
+ result = structured_llm.invoke(query)
28
+ assert isinstance(result, Joke)
29
+
30
+ for chunk in structured_llm.stream(query):
31
+ assert isinstance(chunk, Joke)
32
+
33
+ # JSON Schema
34
+ structured_llm = llm.with_structured_output(Joke.model_json_schema(), method=method) # type: ignore[arg-type]
35
+ result = structured_llm.invoke(query)
36
+ assert isinstance(result, dict)
37
+ assert set(result.keys()) == {"setup", "punchline"}
38
+
39
+ for chunk in structured_llm.stream(query):
40
+ assert isinstance(chunk, dict)
41
+ assert isinstance(chunk, dict) # for mypy
42
+ assert set(chunk.keys()) == {"setup", "punchline"}
43
+
44
+ # Typed Dict
45
+ class JokeSchema(TypedDict):
46
+ """Joke to tell user."""
47
+
48
+ setup: Annotated[str, "question to set up a joke"]
49
+ punchline: Annotated[str, "answer to resolve the joke"]
50
+
51
+ structured_llm = llm.with_structured_output(JokeSchema, method=method) # type: ignore[arg-type]
52
+ result = structured_llm.invoke(query)
53
+ assert isinstance(result, dict)
54
+ assert set(result.keys()) == {"setup", "punchline"}
55
+
56
+ for chunk in structured_llm.stream(query):
57
+ assert isinstance(chunk, dict)
58
+ assert isinstance(chunk, dict) # for mypy
59
+ assert set(chunk.keys()) == {"setup", "punchline"}
60
+
61
+
62
+ @pytest.mark.parametrize(("model"), [("llama3.1")])
63
+ def test_structured_output_deeply_nested(model: str) -> None:
64
+ """Test to verify structured output with a nested objects."""
65
+ llm = ChatOllama(model=model, temperature=0)
66
+
67
+ class Person(BaseModel):
68
+ """Information about a person."""
69
+
70
+ name: Optional[str] = Field(default=None, description="The name of the person")
71
+ hair_color: Optional[str] = Field(
72
+ default=None, description="The color of the person's hair if known"
73
+ )
74
+ height_in_meters: Optional[str] = Field(
75
+ default=None, description="Height measured in meters"
76
+ )
77
+
78
+ class Data(BaseModel):
79
+ """Extracted data about people."""
80
+
81
+ people: List[Person]
82
+
83
+ chat = llm.with_structured_output(Data) # type: ignore[arg-type]
84
+ text = (
85
+ "Alan Smith is 6 feet tall and has blond hair."
86
+ "Alan Poe is 3 feet tall and has grey hair."
87
+ )
88
+ result = chat.invoke(text)
89
+ assert isinstance(result, Data)
90
+
91
+ for chunk in chat.stream(text):
92
+ assert isinstance(chunk, Data)
@@ -0,0 +1,162 @@
1
+ """Ollama specific chat model integration tests for reasoning models."""
2
+
3
+ import pytest
4
+ from langchain_core.messages import AIMessageChunk, BaseMessageChunk, HumanMessage
5
+ from pydantic import ValidationError
6
+
7
+ from langchain_ollama import ChatOllama
8
+
9
+ SAMPLE = "What is 3^3?"
10
+
11
+
12
+ @pytest.mark.parametrize(("model"), [("deepseek-r1:1.5b")])
13
+ def test_deepseek_messages_stream_no_reasoning(model: str) -> None:
14
+ """Test deepseek model without parsing."""
15
+ llm = ChatOllama(model=model, num_ctx=2**12)
16
+ messages = [
17
+ {
18
+ "role": "user",
19
+ "content": SAMPLE,
20
+ }
21
+ ]
22
+ result = None
23
+ for chunk in llm.stream(messages):
24
+ assert isinstance(chunk, BaseMessageChunk)
25
+ if result is None:
26
+ result = chunk
27
+ continue
28
+ result += chunk
29
+ assert isinstance(result, AIMessageChunk)
30
+ assert result.content
31
+ assert "<think>" in result.content and "</think>" in result.content
32
+ assert "reasoning_content" not in result.additional_kwargs
33
+
34
+
35
+ @pytest.mark.parametrize(("model"), [("deepseek-r1:1.5b")])
36
+ def test_deepseek_messages_stream_bool(model: str) -> None:
37
+ """Test deepseek model with reasoning bool=True"""
38
+ llm = ChatOllama(model=model, num_ctx=2**12, extract_reasoning=True)
39
+ messages = [
40
+ {
41
+ "role": "user",
42
+ "content": SAMPLE,
43
+ }
44
+ ]
45
+ result = None
46
+ for chunk in llm.stream(messages):
47
+ assert isinstance(chunk, BaseMessageChunk)
48
+ if result is None:
49
+ result = chunk
50
+ continue
51
+ result += chunk
52
+ assert isinstance(result, AIMessageChunk)
53
+ assert result.content
54
+ assert "<think>" not in result.content and "</think>" not in result.content
55
+ assert "reasoning_content" in result.additional_kwargs
56
+ assert len(result.additional_kwargs["reasoning_content"]) > 0
57
+ assert "<think>" in result.additional_kwargs["reasoning_content"]
58
+ assert "</think>" in result.additional_kwargs["reasoning_content"]
59
+ clean_content = (
60
+ result.additional_kwargs["reasoning_content"]
61
+ .replace("<think>", "")
62
+ .replace("</think>", "")
63
+ .strip()
64
+ )
65
+ assert len(clean_content) > 0
66
+
67
+
68
+ @pytest.mark.parametrize(("model"), [("deepseek-r1:1.5b")])
69
+ def test_deepseek_messages_stream_tuple(model: str) -> None:
70
+ """Test deepseek model with reasoning with tuple=..."""
71
+ llm = ChatOllama(
72
+ model=model, num_ctx=2**12, extract_reasoning=("<think>", "</think>")
73
+ )
74
+ messages = [
75
+ {
76
+ "role": "user",
77
+ "content": SAMPLE,
78
+ }
79
+ ]
80
+ result = None
81
+ for chunk in llm.stream(messages):
82
+ assert isinstance(chunk, BaseMessageChunk)
83
+ if result is None:
84
+ result = chunk
85
+ continue
86
+ result += chunk
87
+ assert isinstance(result, AIMessageChunk)
88
+ assert result.content
89
+ assert "<think>" not in result.content and "</think>" not in result.content
90
+ assert "reasoning_content" in result.additional_kwargs
91
+ assert len(result.additional_kwargs["reasoning_content"]) > 0
92
+ assert "<think>" in result.additional_kwargs["reasoning_content"]
93
+ assert "</think>" in result.additional_kwargs["reasoning_content"]
94
+ clean_content = (
95
+ result.additional_kwargs["reasoning_content"]
96
+ .replace("<think>", "")
97
+ .replace("</think>", "")
98
+ .strip()
99
+ )
100
+ assert len(clean_content) > 0
101
+
102
+
103
+ @pytest.mark.parametrize(("model"), [("deepseek-r1:1.5b")])
104
+ def test_deepseek_messages_invoke_no_reasoning(model: str) -> None:
105
+ """Test deepseek model without parsing using invoke."""
106
+ llm = ChatOllama(model=model, num_ctx=2**12)
107
+ message = HumanMessage(content=SAMPLE)
108
+ result = llm.invoke([message])
109
+ assert result.content
110
+ assert "<think>" in result.content and "</think>" in result.content
111
+ assert "reasoning_content" not in result.additional_kwargs
112
+
113
+
114
+ @pytest.mark.parametrize(("model"), [("deepseek-r1:1.5b")])
115
+ def test_deepseek_messages_invoke_bool(model: str) -> None:
116
+ """Test deepseek model with reasoning bool=True using invoke"""
117
+ llm = ChatOllama(model=model, num_ctx=2**12, extract_reasoning=True)
118
+ message = HumanMessage(content=SAMPLE)
119
+ result = llm.invoke([message])
120
+ assert result.content
121
+ assert "<think>" not in result.content and "</think>" not in result.content
122
+ assert "reasoning_content" in result.additional_kwargs
123
+ assert len(result.additional_kwargs["reasoning_content"]) > 0
124
+ assert "<think>" in result.additional_kwargs["reasoning_content"]
125
+ assert "</think>" in result.additional_kwargs["reasoning_content"]
126
+ clean_content = (
127
+ result.additional_kwargs["reasoning_content"]
128
+ .replace("<think>", "")
129
+ .replace("</think>", "")
130
+ .strip()
131
+ )
132
+ assert len(clean_content) > 0
133
+
134
+
135
+ @pytest.mark.parametrize(("model"), [("deepseek-r1:1.5b")])
136
+ def test_deepseek_messages_invoke_tuple(model: str) -> None:
137
+ """Test deepseek model with reasoning with tuple=... using invoke"""
138
+ llm = ChatOllama(
139
+ model=model, num_ctx=2**12, extract_reasoning=("<think>", "</think>")
140
+ )
141
+ message = HumanMessage(content=SAMPLE)
142
+ result = llm.invoke([message])
143
+ assert result.content
144
+ assert "<think>" not in result.content and "</think>" not in result.content
145
+ assert "reasoning_content" in result.additional_kwargs
146
+ assert len(result.additional_kwargs["reasoning_content"]) > 0
147
+ assert "<think>" in result.additional_kwargs["reasoning_content"]
148
+ assert "</think>" in result.additional_kwargs["reasoning_content"]
149
+ clean_content = (
150
+ result.additional_kwargs["reasoning_content"]
151
+ .replace("<think>", "")
152
+ .replace("</think>", "")
153
+ .strip()
154
+ )
155
+ assert len(clean_content) > 0
156
+
157
+
158
+ @pytest.mark.parametrize(("model"), [("deepseek-r1:1.5b")])
159
+ def test_deepseek_invalid(model: str) -> None:
160
+ """Test deepseek model with reasoning raises ValidationError"""
161
+ with pytest.raises(ValidationError):
162
+ _ = ChatOllama(model=model, extract_reasoning={"invalid": "data"}) # type: ignore[arg-type]
@@ -0,0 +1,29 @@
1
+ """Test chat model integration using standard integration tests."""
2
+
3
+ from typing import Type
4
+
5
+ from langchain_tests.integration_tests import ChatModelIntegrationTests
6
+
7
+ from langchain_ollama.chat_models import ChatOllama
8
+
9
+
10
+ class TestChatOllama(ChatModelIntegrationTests):
11
+ @property
12
+ def chat_model_class(self) -> Type[ChatOllama]:
13
+ return ChatOllama
14
+
15
+ @property
16
+ def chat_model_params(self) -> dict:
17
+ return {"model": "llama3.1"}
18
+
19
+ @property
20
+ def supports_image_inputs(self) -> bool:
21
+ return True
22
+
23
+ @property
24
+ def supports_json_mode(self) -> bool:
25
+ return True
26
+
27
+ @property
28
+ def has_tool_choice(self) -> bool:
29
+ return False
@@ -0,0 +1,7 @@
1
+ import pytest
2
+
3
+
4
+ @pytest.mark.compile
5
+ def test_placeholder() -> None:
6
+ """Used for compiling integration tests without running any real tests."""
7
+ pass
@@ -0,0 +1,17 @@
1
+ """Test Ollama embeddings."""
2
+
3
+ from typing import Type
4
+
5
+ from langchain_tests.integration_tests import EmbeddingsIntegrationTests
6
+
7
+ from langchain_ollama.embeddings import OllamaEmbeddings
8
+
9
+
10
+ class TestOllamaEmbeddings(EmbeddingsIntegrationTests):
11
+ @property
12
+ def embeddings_class(self) -> Type[OllamaEmbeddings]:
13
+ return OllamaEmbeddings
14
+
15
+ @property
16
+ def embedding_model_params(self) -> dict:
17
+ return {"model": "llama3:latest"}
@@ -0,0 +1,66 @@
1
+ """Test OllamaLLM llm."""
2
+
3
+ from langchain_ollama.llms import OllamaLLM
4
+
5
+ MODEL_NAME = "llama3"
6
+
7
+
8
+ def test_stream() -> None:
9
+ """Test streaming tokens from OpenAI."""
10
+ llm = OllamaLLM(model=MODEL_NAME)
11
+
12
+ for token in llm.stream("I'm Pickle Rick"):
13
+ assert isinstance(token, str)
14
+
15
+
16
+ async def test_astream() -> None:
17
+ """Test streaming tokens from OpenAI."""
18
+ llm = OllamaLLM(model=MODEL_NAME)
19
+
20
+ async for token in llm.astream("I'm Pickle Rick"):
21
+ assert isinstance(token, str)
22
+
23
+
24
+ async def test_abatch() -> None:
25
+ """Test streaming tokens from OllamaLLM."""
26
+ llm = OllamaLLM(model=MODEL_NAME)
27
+
28
+ result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"])
29
+ for token in result:
30
+ assert isinstance(token, str)
31
+
32
+
33
+ async def test_abatch_tags() -> None:
34
+ """Test batch tokens from OllamaLLM."""
35
+ llm = OllamaLLM(model=MODEL_NAME)
36
+
37
+ result = await llm.abatch(
38
+ ["I'm Pickle Rick", "I'm not Pickle Rick"], config={"tags": ["foo"]}
39
+ )
40
+ for token in result:
41
+ assert isinstance(token, str)
42
+
43
+
44
+ def test_batch() -> None:
45
+ """Test batch tokens from OllamaLLM."""
46
+ llm = OllamaLLM(model=MODEL_NAME)
47
+
48
+ result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
49
+ for token in result:
50
+ assert isinstance(token, str)
51
+
52
+
53
+ async def test_ainvoke() -> None:
54
+ """Test invoke tokens from OllamaLLM."""
55
+ llm = OllamaLLM(model=MODEL_NAME)
56
+
57
+ result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]})
58
+ assert isinstance(result, str)
59
+
60
+
61
+ def test_invoke() -> None:
62
+ """Test invoke tokens from OllamaLLM."""
63
+ llm = OllamaLLM(model=MODEL_NAME)
64
+
65
+ result = llm.invoke("I'm Pickle Rick", config=dict(tags=["foo"]))
66
+ assert isinstance(result, str)
File without changes
@@ -0,0 +1,25 @@
1
+ """Test chat model integration."""
2
+ import json
3
+ from typing import Dict, Type
4
+
5
+ from langchain_tests.unit_tests import ChatModelUnitTests
6
+
7
+ from langchain_ollama.chat_models import ChatOllama, _parse_arguments_from_tool_call
8
+
9
+
10
+ class TestChatOllama(ChatModelUnitTests):
11
+ @property
12
+ def chat_model_class(self) -> Type[ChatOllama]:
13
+ return ChatOllama
14
+
15
+ @property
16
+ def chat_model_params(self) -> Dict:
17
+ return {"model": "llama3-groq-tool-use"}
18
+
19
+
20
+ def test__parse_arguments_from_tool_call() -> None:
21
+ raw_response = '{"model":"sample-model","message":{"role":"assistant","content":"","tool_calls":[{"function":{"name":"get_profile_details","arguments":{"arg_1":"12345678901234567890123456"}}}]},"done":false}' # noqa: E501
22
+ raw_tool_calls = json.loads(raw_response)["message"]["tool_calls"]
23
+ response = _parse_arguments_from_tool_call(raw_tool_calls[0])
24
+ assert response is not None
25
+ assert isinstance(response["arg_1"], str)
@@ -0,0 +1,8 @@
1
+ """Test embedding model integration."""
2
+
3
+ from langchain_ollama.embeddings import OllamaEmbeddings
4
+
5
+
6
+ def test_initialization() -> None:
7
+ """Test embedding model initialization."""
8
+ OllamaEmbeddings(model="llama3", keep_alive=1)
@@ -0,0 +1,12 @@
1
+ from langchain_ollama import __all__
2
+
3
+ EXPECTED_ALL = [
4
+ "OllamaLLM",
5
+ "ChatOllama",
6
+ "OllamaEmbeddings",
7
+ "__version__",
8
+ ]
9
+
10
+
11
+ def test_all_imports() -> None:
12
+ assert sorted(EXPECTED_ALL) == sorted(__all__)
@@ -0,0 +1,28 @@
1
+ """Test Ollama Chat API wrapper."""
2
+
3
+ from langchain_ollama import OllamaLLM
4
+
5
+
6
+ def test_initialization() -> None:
7
+ """Test integration initialization."""
8
+ OllamaLLM(model="llama3")
9
+
10
+
11
+ def test_model_params() -> None:
12
+ # Test standard tracing params
13
+ llm = OllamaLLM(model="llama3")
14
+ ls_params = llm._get_ls_params()
15
+ assert ls_params == {
16
+ "ls_provider": "ollama",
17
+ "ls_model_type": "llm",
18
+ "ls_model_name": "llama3",
19
+ }
20
+
21
+ llm = OllamaLLM(model="llama3", num_predict=3)
22
+ ls_params = llm._get_ls_params()
23
+ assert ls_params == {
24
+ "ls_provider": "ollama",
25
+ "ls_model_type": "llm",
26
+ "ls_model_name": "llama3",
27
+ "ls_max_tokens": 3,
28
+ }
@@ -1,99 +0,0 @@
1
- [build-system]
2
- requires = ["poetry-core>=1.0.0"]
3
- build-backend = "poetry.core.masonry.api"
4
-
5
- [tool.poetry]
6
- name = "langchain-ollama"
7
- version = "0.2.3"
8
- description = "An integration package connecting Ollama and LangChain"
9
- authors = []
10
- readme = "README.md"
11
- repository = "https://github.com/langchain-ai/langchain"
12
- license = "MIT"
13
-
14
- [tool.mypy]
15
- disallow_untyped_defs = "True"
16
-
17
- [tool.poetry.urls]
18
- "Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/partners/ollama"
19
- "Release Notes" = "https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-ollama%3D%3D0%22&expanded=true"
20
-
21
- [tool.poetry.dependencies]
22
- python = ">=3.9,<4.0"
23
- ollama = ">=0.4.4,<1"
24
- langchain-core = "^0.3.33"
25
-
26
- [tool.ruff.lint]
27
- select = [
28
- "E", # pycodestyle
29
- "F", # pyflakes
30
- "I", # isort
31
- "T201", # print
32
- "D", # pydocstyle
33
-
34
- ]
35
-
36
- [tool.ruff.lint.pydocstyle]
37
- convention = "google"
38
-
39
- [tool.ruff.lint.per-file-ignores]
40
- "tests/**" = ["D"] # ignore docstring checks for tests
41
-
42
- [tool.coverage.run]
43
- omit = ["tests/*"]
44
-
45
- [tool.pytest.ini_options]
46
- addopts = "--snapshot-warn-unused --strict-markers --strict-config --durations=5"
47
- markers = [
48
- "compile: mark placeholder test used to compile integration tests without running them",
49
- ]
50
- asyncio_mode = "auto"
51
-
52
- [tool.poetry.group.test]
53
- optional = true
54
-
55
- [tool.poetry.group.codespell]
56
- optional = true
57
-
58
- [tool.poetry.group.test_integration]
59
- optional = true
60
-
61
- [tool.poetry.group.lint]
62
- optional = true
63
-
64
- [tool.poetry.group.dev]
65
- optional = true
66
-
67
- [tool.poetry.group.test.dependencies]
68
- pytest = "^7.4.3"
69
- pytest-asyncio = "^0.23.2"
70
- syrupy = "^4.0.2"
71
- pytest-socket = "^0.7.0"
72
- pytest-watcher = "^0.3.4"
73
-
74
- [tool.poetry.group.codespell.dependencies]
75
- codespell = "^2.2.6"
76
-
77
- [tool.poetry.group.test_integration.dependencies]
78
-
79
- [tool.poetry.group.lint.dependencies]
80
- ruff = "^0.1.8"
81
-
82
- [tool.poetry.group.typing.dependencies]
83
- mypy = "^1.7.1"
84
-
85
- [tool.poetry.group.test.dependencies.langchain-core]
86
- path = "../../core"
87
- develop = true
88
-
89
- [tool.poetry.group.test.dependencies.langchain-tests]
90
- path = "../../standard-tests"
91
- develop = true
92
-
93
- [tool.poetry.group.typing.dependencies.langchain-core]
94
- path = "../../core"
95
- develop = true
96
-
97
- [tool.poetry.group.dev.dependencies.langchain-core]
98
- path = "../../core"
99
- develop = true