langchain-core 1.0.0a1__py3-none-any.whl → 1.0.0a3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (131) hide show
  1. langchain_core/_api/beta_decorator.py +17 -40
  2. langchain_core/_api/deprecation.py +20 -7
  3. langchain_core/_api/path.py +19 -2
  4. langchain_core/_import_utils.py +7 -0
  5. langchain_core/agents.py +10 -6
  6. langchain_core/callbacks/base.py +28 -15
  7. langchain_core/callbacks/manager.py +81 -69
  8. langchain_core/callbacks/usage.py +4 -2
  9. langchain_core/chat_history.py +29 -21
  10. langchain_core/document_loaders/base.py +34 -9
  11. langchain_core/document_loaders/langsmith.py +3 -0
  12. langchain_core/documents/base.py +35 -10
  13. langchain_core/documents/transformers.py +4 -2
  14. langchain_core/embeddings/fake.py +8 -5
  15. langchain_core/env.py +2 -3
  16. langchain_core/example_selectors/base.py +12 -0
  17. langchain_core/exceptions.py +7 -0
  18. langchain_core/globals.py +17 -28
  19. langchain_core/indexing/api.py +57 -45
  20. langchain_core/indexing/base.py +5 -8
  21. langchain_core/indexing/in_memory.py +23 -3
  22. langchain_core/language_models/__init__.py +6 -2
  23. langchain_core/language_models/_utils.py +28 -4
  24. langchain_core/language_models/base.py +33 -21
  25. langchain_core/language_models/chat_models.py +103 -29
  26. langchain_core/language_models/fake_chat_models.py +5 -7
  27. langchain_core/language_models/llms.py +54 -20
  28. langchain_core/load/dump.py +2 -3
  29. langchain_core/load/load.py +15 -1
  30. langchain_core/load/serializable.py +38 -43
  31. langchain_core/memory.py +7 -3
  32. langchain_core/messages/__init__.py +7 -17
  33. langchain_core/messages/ai.py +41 -34
  34. langchain_core/messages/base.py +16 -7
  35. langchain_core/messages/block_translators/__init__.py +10 -8
  36. langchain_core/messages/block_translators/anthropic.py +3 -1
  37. langchain_core/messages/block_translators/bedrock.py +3 -1
  38. langchain_core/messages/block_translators/bedrock_converse.py +3 -1
  39. langchain_core/messages/block_translators/google_genai.py +3 -1
  40. langchain_core/messages/block_translators/google_vertexai.py +3 -1
  41. langchain_core/messages/block_translators/groq.py +3 -1
  42. langchain_core/messages/block_translators/langchain_v0.py +3 -136
  43. langchain_core/messages/block_translators/ollama.py +3 -1
  44. langchain_core/messages/block_translators/openai.py +252 -10
  45. langchain_core/messages/content.py +26 -124
  46. langchain_core/messages/human.py +2 -13
  47. langchain_core/messages/system.py +2 -6
  48. langchain_core/messages/tool.py +34 -14
  49. langchain_core/messages/utils.py +189 -74
  50. langchain_core/output_parsers/base.py +5 -2
  51. langchain_core/output_parsers/json.py +4 -4
  52. langchain_core/output_parsers/list.py +7 -22
  53. langchain_core/output_parsers/openai_functions.py +3 -0
  54. langchain_core/output_parsers/openai_tools.py +6 -1
  55. langchain_core/output_parsers/pydantic.py +4 -0
  56. langchain_core/output_parsers/string.py +5 -1
  57. langchain_core/output_parsers/xml.py +19 -19
  58. langchain_core/outputs/chat_generation.py +18 -7
  59. langchain_core/outputs/generation.py +14 -3
  60. langchain_core/outputs/llm_result.py +8 -1
  61. langchain_core/prompt_values.py +10 -4
  62. langchain_core/prompts/base.py +6 -11
  63. langchain_core/prompts/chat.py +88 -60
  64. langchain_core/prompts/dict.py +16 -8
  65. langchain_core/prompts/few_shot.py +9 -11
  66. langchain_core/prompts/few_shot_with_templates.py +5 -1
  67. langchain_core/prompts/image.py +12 -5
  68. langchain_core/prompts/loading.py +2 -2
  69. langchain_core/prompts/message.py +5 -6
  70. langchain_core/prompts/pipeline.py +13 -8
  71. langchain_core/prompts/prompt.py +22 -8
  72. langchain_core/prompts/string.py +18 -10
  73. langchain_core/prompts/structured.py +7 -2
  74. langchain_core/rate_limiters.py +2 -2
  75. langchain_core/retrievers.py +7 -6
  76. langchain_core/runnables/base.py +387 -246
  77. langchain_core/runnables/branch.py +11 -28
  78. langchain_core/runnables/config.py +20 -17
  79. langchain_core/runnables/configurable.py +34 -19
  80. langchain_core/runnables/fallbacks.py +20 -13
  81. langchain_core/runnables/graph.py +48 -38
  82. langchain_core/runnables/graph_ascii.py +40 -17
  83. langchain_core/runnables/graph_mermaid.py +54 -25
  84. langchain_core/runnables/graph_png.py +27 -31
  85. langchain_core/runnables/history.py +55 -58
  86. langchain_core/runnables/passthrough.py +44 -21
  87. langchain_core/runnables/retry.py +44 -23
  88. langchain_core/runnables/router.py +9 -8
  89. langchain_core/runnables/schema.py +9 -0
  90. langchain_core/runnables/utils.py +53 -90
  91. langchain_core/stores.py +19 -31
  92. langchain_core/sys_info.py +9 -8
  93. langchain_core/tools/base.py +36 -27
  94. langchain_core/tools/convert.py +25 -14
  95. langchain_core/tools/simple.py +36 -8
  96. langchain_core/tools/structured.py +25 -12
  97. langchain_core/tracers/base.py +2 -2
  98. langchain_core/tracers/context.py +5 -1
  99. langchain_core/tracers/core.py +110 -46
  100. langchain_core/tracers/evaluation.py +22 -26
  101. langchain_core/tracers/event_stream.py +97 -42
  102. langchain_core/tracers/langchain.py +12 -3
  103. langchain_core/tracers/langchain_v1.py +10 -2
  104. langchain_core/tracers/log_stream.py +56 -17
  105. langchain_core/tracers/root_listeners.py +4 -20
  106. langchain_core/tracers/run_collector.py +6 -16
  107. langchain_core/tracers/schemas.py +5 -1
  108. langchain_core/utils/aiter.py +14 -6
  109. langchain_core/utils/env.py +3 -0
  110. langchain_core/utils/function_calling.py +46 -20
  111. langchain_core/utils/interactive_env.py +6 -2
  112. langchain_core/utils/iter.py +12 -5
  113. langchain_core/utils/json.py +12 -3
  114. langchain_core/utils/json_schema.py +156 -40
  115. langchain_core/utils/loading.py +5 -1
  116. langchain_core/utils/mustache.py +25 -16
  117. langchain_core/utils/pydantic.py +38 -9
  118. langchain_core/utils/utils.py +25 -9
  119. langchain_core/vectorstores/base.py +7 -20
  120. langchain_core/vectorstores/in_memory.py +20 -14
  121. langchain_core/vectorstores/utils.py +18 -12
  122. langchain_core/version.py +1 -1
  123. langchain_core-1.0.0a3.dist-info/METADATA +77 -0
  124. langchain_core-1.0.0a3.dist-info/RECORD +181 -0
  125. langchain_core/beta/__init__.py +0 -1
  126. langchain_core/beta/runnables/__init__.py +0 -1
  127. langchain_core/beta/runnables/context.py +0 -448
  128. langchain_core-1.0.0a1.dist-info/METADATA +0 -106
  129. langchain_core-1.0.0a1.dist-info/RECORD +0 -184
  130. {langchain_core-1.0.0a1.dist-info → langchain_core-1.0.0a3.dist-info}/WHEEL +0 -0
  131. {langchain_core-1.0.0a1.dist-info → langchain_core-1.0.0a3.dist-info}/entry_points.txt +0 -0
@@ -57,6 +57,9 @@ def get_pydantic_major_version() -> int:
57
57
  """DEPRECATED - Get the major version of Pydantic.
58
58
 
59
59
  Use PYDANTIC_VERSION.major instead.
60
+
61
+ Returns:
62
+ The major version of Pydantic.
60
63
  """
61
64
  return PYDANTIC_VERSION.major
62
65
 
@@ -74,12 +77,20 @@ TBaseModel = TypeVar("TBaseModel", bound=PydanticBaseModel)
74
77
 
75
78
 
76
79
  def is_pydantic_v1_subclass(cls: type) -> bool:
77
- """Check if the installed Pydantic version is 1.x-like."""
80
+ """Check if the given class is Pydantic v1-like.
81
+
82
+ Returns:
83
+ True if the given class is a subclass of Pydantic ``BaseModel`` 1.x.
84
+ """
78
85
  return issubclass(cls, BaseModelV1)
79
86
 
80
87
 
81
88
  def is_pydantic_v2_subclass(cls: type) -> bool:
82
- """Check if the installed Pydantic version is 1.x-like."""
89
+ """Check if the given class is Pydantic v2-like.
90
+
91
+ Returns:
92
+ True if the given class is a subclass of Pydantic BaseModel 2.x.
93
+ """
83
94
  return issubclass(cls, BaseModel)
84
95
 
85
96
 
@@ -90,6 +101,9 @@ def is_basemodel_subclass(cls: type) -> bool:
90
101
 
91
102
  * pydantic.BaseModel in Pydantic 2.x
92
103
  * pydantic.v1.BaseModel in Pydantic 2.x
104
+
105
+ Returns:
106
+ True if the given class is a subclass of Pydantic ``BaseModel``.
93
107
  """
94
108
  # Before we can use issubclass on the cls we need to check if it is a class
95
109
  if not inspect.isclass(cls) or isinstance(cls, GenericAlias):
@@ -105,6 +119,9 @@ def is_basemodel_instance(obj: Any) -> bool:
105
119
 
106
120
  * pydantic.BaseModel in Pydantic 2.x
107
121
  * pydantic.v1.BaseModel in Pydantic 2.x
122
+
123
+ Returns:
124
+ True if the given class is an instance of Pydantic ``BaseModel``.
108
125
  """
109
126
  return isinstance(obj, (BaseModel, BaseModelV1))
110
127
 
@@ -125,7 +142,7 @@ def pre_init(func: Callable) -> Any:
125
142
  # Ideally we would use @model_validator(mode="before") but this would change the
126
143
  # order of the validators. See https://github.com/pydantic/pydantic/discussions/7434.
127
144
  # So we keep root_validator for backward compatibility.
128
- @root_validator(pre=True)
145
+ @root_validator(pre=True) # type: ignore[deprecated]
129
146
  @wraps(func)
130
147
  def wrapper(cls: type[BaseModel], values: dict[str, Any]) -> dict[str, Any]:
131
148
  """Decorator to run a function before model initialization.
@@ -262,7 +279,11 @@ def _create_subset_model(
262
279
  descriptions: Optional[dict] = None,
263
280
  fn_description: Optional[str] = None,
264
281
  ) -> type[BaseModel]:
265
- """Create subset model using the same pydantic version as the input model."""
282
+ """Create subset model using the same pydantic version as the input model.
283
+
284
+ Returns:
285
+ The created subset model.
286
+ """
266
287
  if issubclass(model, BaseModelV1):
267
288
  return _create_subset_model_v1(
268
289
  name,
@@ -299,13 +320,21 @@ def get_fields(model: BaseModelV1) -> dict[str, ModelField]: ...
299
320
  def get_fields(
300
321
  model: Union[type[Union[BaseModel, BaseModelV1]], BaseModel, BaseModelV1],
301
322
  ) -> Union[dict[str, FieldInfoV2], dict[str, ModelField]]:
302
- """Get the field names of a Pydantic model."""
303
- if hasattr(model, "model_fields"):
304
- return model.model_fields
323
+ """Return the field names of a Pydantic model.
305
324
 
306
- if hasattr(model, "__fields__"):
325
+ Args:
326
+ model: The Pydantic model or instance.
327
+
328
+ Raises:
329
+ TypeError: If the model is not a Pydantic model.
330
+ """
331
+ if not isinstance(model, type):
332
+ model = type(model)
333
+ if issubclass(model, BaseModel):
334
+ return model.model_fields
335
+ if issubclass(model, BaseModelV1):
307
336
  return model.__fields__
308
- msg = f"Expected a Pydantic model. Got {type(model)}"
337
+ msg = f"Expected a Pydantic model. Got {model}"
309
338
  raise TypeError(msg)
310
339
 
311
340
 
@@ -22,17 +22,14 @@ from langchain_core.utils.pydantic import (
22
22
 
23
23
 
24
24
  def xor_args(*arg_groups: tuple[str, ...]) -> Callable:
25
- """Validate specified keyword args are mutually exclusive.".
25
+ """Validate specified keyword args are mutually exclusive.
26
26
 
27
27
  Args:
28
28
  *arg_groups (tuple[str, ...]): Groups of mutually exclusive keyword args.
29
29
 
30
30
  Returns:
31
31
  Callable: Decorator that validates the specified keyword args
32
- are mutually exclusive
33
-
34
- Raises:
35
- ValueError: If more than one arg in a group is defined.
32
+ are mutually exclusive.
36
33
  """
37
34
 
38
35
  def decorator(func: Callable) -> Callable:
@@ -138,7 +135,7 @@ def guard_import(
138
135
  try:
139
136
  module = importlib.import_module(module_name, package)
140
137
  except (ImportError, ModuleNotFoundError) as e:
141
- pip_name = pip_name or module_name.split(".")[0].replace("_", "-")
138
+ pip_name = pip_name or module_name.split(".", maxsplit=1)[0].replace("_", "-")
142
139
  msg = (
143
140
  f"Could not import {module_name} python package. "
144
141
  f"Please install it with `pip install {pip_name}`."
@@ -224,7 +221,7 @@ def _build_model_kwargs(
224
221
  values: dict[str, Any],
225
222
  all_required_field_names: set[str],
226
223
  ) -> dict[str, Any]:
227
- """Build "model_kwargs" param from Pydanitc constructor values.
224
+ """Build "model_kwargs" param from Pydantic constructor values.
228
225
 
229
226
  Args:
230
227
  values: All init args passed in by user.
@@ -382,10 +379,21 @@ def from_env(
382
379
  error_message: the error message which will be raised if the key is not found
383
380
  and no default value is provided.
384
381
  This will be raised as a ValueError.
382
+
383
+ Returns:
384
+ factory method that will look up the value from the environment.
385
385
  """
386
386
 
387
387
  def get_from_env_fn() -> Optional[str]:
388
- """Get a value from an environment variable."""
388
+ """Get a value from an environment variable.
389
+
390
+ Raises:
391
+ ValueError: If the environment variable is not set and no default is
392
+ provided.
393
+
394
+ Returns:
395
+ The value from the environment.
396
+ """
389
397
  if isinstance(key, (list, tuple)):
390
398
  for k in key:
391
399
  if k in os.environ:
@@ -446,7 +454,15 @@ def secret_from_env(
446
454
  """
447
455
 
448
456
  def get_secret_from_env() -> Optional[SecretStr]:
449
- """Get a value from an environment variable."""
457
+ """Get a value from an environment variable.
458
+
459
+ Raises:
460
+ ValueError: If the environment variable is not set and no default is
461
+ provided.
462
+
463
+ Returns:
464
+ The secret from the environment.
465
+ """
450
466
  if isinstance(key, (list, tuple)):
451
467
  for k in key:
452
468
  if k in os.environ:
@@ -38,6 +38,7 @@ from typing import (
38
38
  from pydantic import ConfigDict, Field, model_validator
39
39
  from typing_extensions import Self, override
40
40
 
41
+ from langchain_core.documents import Document
41
42
  from langchain_core.embeddings import Embeddings
42
43
  from langchain_core.retrievers import BaseRetriever, LangSmithRetrieverParams
43
44
  from langchain_core.runnables.config import run_in_executor
@@ -49,7 +50,6 @@ if TYPE_CHECKING:
49
50
  AsyncCallbackManagerForRetrieverRun,
50
51
  CallbackManagerForRetrieverRun,
51
52
  )
52
- from langchain_core.documents import Document
53
53
 
54
54
  logger = logging.getLogger(__name__)
55
55
 
@@ -85,9 +85,6 @@ class VectorStore(ABC):
85
85
  ValueError: If the number of ids does not match the number of texts.
86
86
  """
87
87
  if type(self).add_documents != VectorStore.add_documents:
88
- # Import document in local scope to avoid circular imports
89
- from langchain_core.documents import Document
90
-
91
88
  # This condition is triggered if the subclass has provided
92
89
  # an implementation of the upsert method.
93
90
  # The existing add_texts
@@ -234,9 +231,6 @@ class VectorStore(ABC):
234
231
  # For backward compatibility
235
232
  kwargs["ids"] = ids
236
233
  if type(self).aadd_documents != VectorStore.aadd_documents:
237
- # Import document in local scope to avoid circular imports
238
- from langchain_core.documents import Document
239
-
240
234
  # This condition is triggered if the subclass has provided
241
235
  # an implementation of the upsert method.
242
236
  # The existing add_texts
@@ -270,9 +264,6 @@ class VectorStore(ABC):
270
264
 
271
265
  Returns:
272
266
  List of IDs of the added texts.
273
-
274
- Raises:
275
- ValueError: If the number of ids does not match the number of documents.
276
267
  """
277
268
  if type(self).add_texts != VectorStore.add_texts:
278
269
  if "ids" not in kwargs:
@@ -303,9 +294,6 @@ class VectorStore(ABC):
303
294
 
304
295
  Returns:
305
296
  List of IDs of the added texts.
306
-
307
- Raises:
308
- ValueError: If the number of IDs does not match the number of documents.
309
297
  """
310
298
  # If the async method has been overridden, we'll use that.
311
299
  if type(self).aadd_texts != VectorStore.aadd_texts:
@@ -435,6 +423,7 @@ class VectorStore(ABC):
435
423
  """The 'correct' relevance function.
436
424
 
437
425
  may differ depending on a few things, including:
426
+
438
427
  - the distance / similarity metric used by the VectorStore
439
428
  - the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
440
429
  - embedding dimensionality
@@ -969,30 +958,28 @@ class VectorStore(ABC):
969
958
  # Retrieve more documents with higher diversity
970
959
  # Useful if your dataset has many similar documents
971
960
  docsearch.as_retriever(
972
- search_type="mmr",
973
- search_kwargs={'k': 6, 'lambda_mult': 0.25}
961
+ search_type="mmr", search_kwargs={"k": 6, "lambda_mult": 0.25}
974
962
  )
975
963
 
976
964
  # Fetch more documents for the MMR algorithm to consider
977
965
  # But only return the top 5
978
966
  docsearch.as_retriever(
979
- search_type="mmr",
980
- search_kwargs={'k': 5, 'fetch_k': 50}
967
+ search_type="mmr", search_kwargs={"k": 5, "fetch_k": 50}
981
968
  )
982
969
 
983
970
  # Only retrieve documents that have a relevance score
984
971
  # Above a certain threshold
985
972
  docsearch.as_retriever(
986
973
  search_type="similarity_score_threshold",
987
- search_kwargs={'score_threshold': 0.8}
974
+ search_kwargs={"score_threshold": 0.8},
988
975
  )
989
976
 
990
977
  # Only get the single most similar document from the dataset
991
- docsearch.as_retriever(search_kwargs={'k': 1})
978
+ docsearch.as_retriever(search_kwargs={"k": 1})
992
979
 
993
980
  # Use a filter to only retrieve documents from a specific paper
994
981
  docsearch.as_retriever(
995
- search_kwargs={'filter': {'paper_title':'GPT-4 Technical Report'}}
982
+ search_kwargs={"filter": {"paper_title": "GPT-4 Technical Report"}}
996
983
  )
997
984
 
998
985
  """
@@ -27,6 +27,13 @@ if TYPE_CHECKING:
27
27
  from langchain_core.embeddings import Embeddings
28
28
  from langchain_core.indexing import UpsertResponse
29
29
 
30
+ try:
31
+ import numpy as np
32
+
33
+ _HAS_NUMPY = True
34
+ except ImportError:
35
+ _HAS_NUMPY = False
36
+
30
37
 
31
38
  class InMemoryVectorStore(VectorStore):
32
39
  """In-memory vector store implementation.
@@ -83,7 +90,7 @@ class InMemoryVectorStore(VectorStore):
83
90
  Search:
84
91
  .. code-block:: python
85
92
 
86
- results = vector_store.similarity_search(query="thud",k=1)
93
+ results = vector_store.similarity_search(query="thud", k=1)
87
94
  for doc in results:
88
95
  print(f"* {doc.page_content} [{doc.metadata}]")
89
96
 
@@ -97,6 +104,7 @@ class InMemoryVectorStore(VectorStore):
97
104
  def _filter_function(doc: Document) -> bool:
98
105
  return doc.metadata.get("bar") == "baz"
99
106
 
107
+
100
108
  results = vector_store.similarity_search(
101
109
  query="thud", k=1, filter=_filter_function
102
110
  )
@@ -111,9 +119,7 @@ class InMemoryVectorStore(VectorStore):
111
119
  Search with score:
112
120
  .. code-block:: python
113
121
 
114
- results = vector_store.similarity_search_with_score(
115
- query="qux", k=1
116
- )
122
+ results = vector_store.similarity_search_with_score(query="qux", k=1)
117
123
  for doc, score in results:
118
124
  print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
119
125
 
@@ -135,7 +141,7 @@ class InMemoryVectorStore(VectorStore):
135
141
 
136
142
  # search with score
137
143
  results = await vector_store.asimilarity_search_with_score(query="qux", k=1)
138
- for doc,score in results:
144
+ for doc, score in results:
139
145
  print(f"* [SIM={score:3f}] {doc.page_content} [{doc.metadata}]")
140
146
 
141
147
  .. code-block:: none
@@ -190,7 +196,6 @@ class InMemoryVectorStore(VectorStore):
190
196
  ids: Optional[list[str]] = None,
191
197
  **kwargs: Any,
192
198
  ) -> list[str]:
193
- """Add documents to the store."""
194
199
  texts = [doc.page_content for doc in documents]
195
200
  vectors = self.embedding.embed_documents(texts)
196
201
 
@@ -224,7 +229,6 @@ class InMemoryVectorStore(VectorStore):
224
229
  async def aadd_documents(
225
230
  self, documents: list[Document], ids: Optional[list[str]] = None, **kwargs: Any
226
231
  ) -> list[str]:
227
- """Add documents to the store."""
228
232
  texts = [doc.page_content for doc in documents]
229
233
  vectors = await self.embedding.aembed_documents(texts)
230
234
 
@@ -372,7 +376,11 @@ class InMemoryVectorStore(VectorStore):
372
376
  docs = [
373
377
  doc
374
378
  for doc in docs
375
- if filter(Document(page_content=doc["text"], metadata=doc["metadata"]))
379
+ if filter(
380
+ Document(
381
+ id=doc["id"], page_content=doc["text"], metadata=doc["metadata"]
382
+ )
383
+ )
376
384
  ]
377
385
 
378
386
  if not docs:
@@ -499,14 +507,12 @@ class InMemoryVectorStore(VectorStore):
499
507
  filter=filter,
500
508
  )
501
509
 
502
- try:
503
- import numpy as np
504
- except ImportError as e:
510
+ if not _HAS_NUMPY:
505
511
  msg = (
506
512
  "numpy must be installed to use max_marginal_relevance_search "
507
513
  "pip install numpy"
508
514
  )
509
- raise ImportError(msg) from e
515
+ raise ImportError(msg)
510
516
 
511
517
  mmr_chosen_indices = maximal_marginal_relevance(
512
518
  np.array(embedding, dtype=np.float32),
@@ -597,7 +603,7 @@ class InMemoryVectorStore(VectorStore):
597
603
  A VectorStore object.
598
604
  """
599
605
  path_: Path = Path(path)
600
- with path_.open("r") as f:
606
+ with path_.open("r", encoding="utf-8") as f:
601
607
  store = load(json.load(f))
602
608
  vectorstore = cls(embedding=embedding, **kwargs)
603
609
  vectorstore.store = store
@@ -611,5 +617,5 @@ class InMemoryVectorStore(VectorStore):
611
617
  """
612
618
  path_: Path = Path(path)
613
619
  path_.parent.mkdir(exist_ok=True, parents=True)
614
- with path_.open("w") as f:
620
+ with path_.open("w", encoding="utf-8") as f:
615
621
  json.dump(dumpd(self.store), f, indent=2)
@@ -10,9 +10,21 @@ import logging
10
10
  import warnings
11
11
  from typing import TYPE_CHECKING, Union
12
12
 
13
- if TYPE_CHECKING:
13
+ try:
14
14
  import numpy as np
15
15
 
16
+ _HAS_NUMPY = True
17
+ except ImportError:
18
+ _HAS_NUMPY = False
19
+
20
+ try:
21
+ import simsimd as simd # type: ignore[import-not-found]
22
+
23
+ _HAS_SIMSIMD = True
24
+ except ImportError:
25
+ _HAS_SIMSIMD = False
26
+
27
+ if TYPE_CHECKING:
16
28
  Matrix = Union[list[list[float]], list[np.ndarray], np.ndarray]
17
29
 
18
30
  logger = logging.getLogger(__name__)
@@ -33,14 +45,12 @@ def _cosine_similarity(x: Matrix, y: Matrix) -> np.ndarray:
33
45
  ValueError: If the number of columns in X and Y are not the same.
34
46
  ImportError: If numpy is not installed.
35
47
  """
36
- try:
37
- import numpy as np
38
- except ImportError as e:
48
+ if not _HAS_NUMPY:
39
49
  msg = (
40
50
  "cosine_similarity requires numpy to be installed. "
41
51
  "Please install numpy with `pip install numpy`."
42
52
  )
43
- raise ImportError(msg) from e
53
+ raise ImportError(msg)
44
54
 
45
55
  if len(x) == 0 or len(y) == 0:
46
56
  return np.array([[]])
@@ -70,9 +80,7 @@ def _cosine_similarity(x: Matrix, y: Matrix) -> np.ndarray:
70
80
  f"and Y has shape {y.shape}."
71
81
  )
72
82
  raise ValueError(msg)
73
- try:
74
- import simsimd as simd # type: ignore[import-not-found]
75
- except ImportError:
83
+ if not _HAS_SIMSIMD:
76
84
  logger.debug(
77
85
  "Unable to import simsimd, defaulting to NumPy implementation. If you want "
78
86
  "to use simsimd please install with `pip install simsimd`."
@@ -113,14 +121,12 @@ def maximal_marginal_relevance(
113
121
  Raises:
114
122
  ImportError: If numpy is not installed.
115
123
  """
116
- try:
117
- import numpy as np
118
- except ImportError as e:
124
+ if not _HAS_NUMPY:
119
125
  msg = (
120
126
  "maximal_marginal_relevance requires numpy to be installed. "
121
127
  "Please install numpy with `pip install numpy`."
122
128
  )
123
- raise ImportError(msg) from e
129
+ raise ImportError(msg)
124
130
 
125
131
  if min(k, len(embedding_list)) <= 0:
126
132
  return []
langchain_core/version.py CHANGED
@@ -1,3 +1,3 @@
1
1
  """langchain-core version information and utilities."""
2
2
 
3
- VERSION = "1.0.0a1"
3
+ VERSION = "1.0.0a3"
@@ -0,0 +1,77 @@
1
+ Metadata-Version: 2.1
2
+ Name: langchain-core
3
+ Version: 1.0.0a3
4
+ Summary: Building applications with LLMs through composability
5
+ License: MIT
6
+ Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/core
7
+ Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-core%3D%3D0%22&expanded=true
8
+ Project-URL: repository, https://github.com/langchain-ai/langchain
9
+ Requires-Python: >=3.10
10
+ Requires-Dist: langsmith>=0.3.45
11
+ Requires-Dist: tenacity!=8.4.0,<10.0.0,>=8.1.0
12
+ Requires-Dist: jsonpatch<2.0,>=1.33
13
+ Requires-Dist: PyYAML>=5.3
14
+ Requires-Dist: typing-extensions>=4.7
15
+ Requires-Dist: packaging>=23.2
16
+ Requires-Dist: pydantic>=2.7.4
17
+ Description-Content-Type: text/markdown
18
+
19
+ # 🦜🍎️ LangChain Core
20
+
21
+ [![PyPI - License](https://img.shields.io/pypi/l/langchain-core?style=flat-square)](https://opensource.org/licenses/MIT)
22
+ [![PyPI - Downloads](https://img.shields.io/pepy/dt/langchain-core)](https://pypistats.org/packages/langchain-core)
23
+
24
+ ## Quick Install
25
+
26
+ ```bash
27
+ pip install langchain-core
28
+ ```
29
+
30
+ ## What is it?
31
+
32
+ LangChain Core contains the base abstractions that power the the LangChain ecosystem.
33
+
34
+ These abstractions are designed to be as modular and simple as possible.
35
+
36
+ The benefit of having these abstractions is that any provider can implement the required interface and then easily be used in the rest of the LangChain ecosystem.
37
+
38
+ For full documentation see the [API reference](https://python.langchain.com/api_reference/core/index.html).
39
+
40
+ ## ⛰️ Why build on top of LangChain Core?
41
+
42
+ The LangChain ecosystem is built on top of `langchain-core`. Some of the benefits:
43
+
44
+ - **Modularity**: We've designed Core around abstractions that are independent of each other, and not tied to any specific model provider.
45
+ - **Stability**: We are committed to a stable versioning scheme, and will communicate any breaking changes with advance notice and version bumps.
46
+ - **Battle-tested**: Core components have the largest install base in the LLM ecosystem, and are used in production by many companies.
47
+
48
+ ## 1️⃣ Core Interface: Runnables
49
+
50
+ The concept of a `Runnable` is central to LangChain Core – it is the interface that most LangChain Core components implement, giving them
51
+
52
+ - A common invocation interface (`invoke()`, `batch()`, `stream()`, etc.)
53
+ - Built-in utilities for retries, fallbacks, schemas and runtime configurability
54
+ - Easy deployment with [LangGraph](https://github.com/langchain-ai/langgraph)
55
+
56
+ For more check out the [`Runnable` docs](https://python.langchain.com/docs/concepts/runnables/). Examples of components that implement the interface include: Chat Models, Tools, Retrievers, and Output Parsers.
57
+
58
+ ## 📕 Releases & Versioning
59
+
60
+ As `langchain-core` contains the base abstractions and runtime for the whole LangChain ecosystem, we will communicate any breaking changes with advance notice and version bumps. The exception for this is anything in `langchain_core.beta`. The reason for `langchain_core.beta` is that given the rate of change of the field, being able to move quickly is still a priority, and this module is our attempt to do so.
61
+
62
+ Minor version increases will occur for:
63
+
64
+ - Breaking changes for any public interfaces NOT in `langchain_core.beta`
65
+
66
+ Patch version increases will occur for:
67
+
68
+ - Bug fixes
69
+ - New features
70
+ - Any changes to private interfaces
71
+ - Any changes to `langchain_core.beta`
72
+
73
+ ## 💁 Contributing
74
+
75
+ As an open-source project in a rapidly developing field, we are extremely open to contributions, whether it be in the form of a new feature, improved infrastructure, or better documentation.
76
+
77
+ For detailed information on how to contribute, see the [Contributing Guide](https://python.langchain.com/docs/contributing/).