langchain 0.3.14__py3-none-any.whl → 0.3.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain might be problematic. Click here for more details.
- langchain/agents/openai_assistant/base.py +64 -5
- langchain/chains/moderation.py +2 -2
- langchain/model_laboratory.py +20 -7
- {langchain-0.3.14.dist-info → langchain-0.3.15.dist-info}/METADATA +3 -3
- {langchain-0.3.14.dist-info → langchain-0.3.15.dist-info}/RECORD +8 -8
- {langchain-0.3.14.dist-info → langchain-0.3.15.dist-info}/LICENSE +0 -0
- {langchain-0.3.14.dist-info → langchain-0.3.15.dist-info}/WHEEL +0 -0
- {langchain-0.3.14.dist-info → langchain-0.3.15.dist-info}/entry_points.txt +0 -0
|
@@ -293,6 +293,12 @@ class OpenAIAssistantRunnable(RunnableSerializable[Dict, OutputType]):
|
|
|
293
293
|
instructions: Additional run instructions.
|
|
294
294
|
model: Override Assistant model for this run.
|
|
295
295
|
tools: Override Assistant tools for this run.
|
|
296
|
+
parallel_tool_calls: Allow Assistant to set parallel_tool_calls
|
|
297
|
+
for this run.
|
|
298
|
+
top_p: Override Assistant top_p for this run.
|
|
299
|
+
temperature: Override Assistant temperature for this run.
|
|
300
|
+
max_completion_tokens: Allow setting max_completion_tokens for this run.
|
|
301
|
+
max_prompt_tokens: Allow setting max_prompt_tokens for this run.
|
|
296
302
|
run_metadata: Metadata to associate with new run.
|
|
297
303
|
config: Runnable config. Defaults to None.
|
|
298
304
|
|
|
@@ -408,9 +414,16 @@ class OpenAIAssistantRunnable(RunnableSerializable[Dict, OutputType]):
|
|
|
408
414
|
message_metadata: Metadata to associate with a new message.
|
|
409
415
|
thread_metadata: Metadata to associate with new thread. Only relevant
|
|
410
416
|
when a new thread is created.
|
|
411
|
-
instructions:
|
|
417
|
+
instructions: Overrides the instructions of the assistant.
|
|
418
|
+
additional_instructions: Appends additional instructions.
|
|
412
419
|
model: Override Assistant model for this run.
|
|
413
420
|
tools: Override Assistant tools for this run.
|
|
421
|
+
parallel_tool_calls: Allow Assistant to set parallel_tool_calls
|
|
422
|
+
for this run.
|
|
423
|
+
top_p: Override Assistant top_p for this run.
|
|
424
|
+
temperature: Override Assistant temperature for this run.
|
|
425
|
+
max_completion_tokens: Allow setting max_completion_tokens for this run.
|
|
426
|
+
max_prompt_tokens: Allow setting max_prompt_tokens for this run.
|
|
414
427
|
run_metadata: Metadata to associate with new run.
|
|
415
428
|
config: Runnable config. Defaults to None.
|
|
416
429
|
kwargs: Additional arguments.
|
|
@@ -507,7 +520,19 @@ class OpenAIAssistantRunnable(RunnableSerializable[Dict, OutputType]):
|
|
|
507
520
|
params = {
|
|
508
521
|
k: v
|
|
509
522
|
for k, v in input.items()
|
|
510
|
-
if k
|
|
523
|
+
if k
|
|
524
|
+
in (
|
|
525
|
+
"instructions",
|
|
526
|
+
"model",
|
|
527
|
+
"tools",
|
|
528
|
+
"additional_instructions",
|
|
529
|
+
"parallel_tool_calls",
|
|
530
|
+
"top_p",
|
|
531
|
+
"temperature",
|
|
532
|
+
"max_completion_tokens",
|
|
533
|
+
"max_prompt_tokens",
|
|
534
|
+
"run_metadata",
|
|
535
|
+
)
|
|
511
536
|
}
|
|
512
537
|
return self.client.beta.threads.runs.create(
|
|
513
538
|
input["thread_id"],
|
|
@@ -519,7 +544,18 @@ class OpenAIAssistantRunnable(RunnableSerializable[Dict, OutputType]):
|
|
|
519
544
|
params = {
|
|
520
545
|
k: v
|
|
521
546
|
for k, v in input.items()
|
|
522
|
-
if k
|
|
547
|
+
if k
|
|
548
|
+
in (
|
|
549
|
+
"instructions",
|
|
550
|
+
"model",
|
|
551
|
+
"tools",
|
|
552
|
+
"parallel_tool_calls",
|
|
553
|
+
"top_p",
|
|
554
|
+
"temperature",
|
|
555
|
+
"max_completion_tokens",
|
|
556
|
+
"max_prompt_tokens",
|
|
557
|
+
"run_metadata",
|
|
558
|
+
)
|
|
523
559
|
}
|
|
524
560
|
run = self.client.beta.threads.create_and_run(
|
|
525
561
|
assistant_id=self.assistant_id,
|
|
@@ -637,7 +673,19 @@ class OpenAIAssistantRunnable(RunnableSerializable[Dict, OutputType]):
|
|
|
637
673
|
params = {
|
|
638
674
|
k: v
|
|
639
675
|
for k, v in input.items()
|
|
640
|
-
if k
|
|
676
|
+
if k
|
|
677
|
+
in (
|
|
678
|
+
"instructions",
|
|
679
|
+
"model",
|
|
680
|
+
"tools",
|
|
681
|
+
"additional_instructions",
|
|
682
|
+
"parallel_tool_calls",
|
|
683
|
+
"top_p",
|
|
684
|
+
"temperature",
|
|
685
|
+
"max_completion_tokens",
|
|
686
|
+
"max_prompt_tokens",
|
|
687
|
+
"run_metadata",
|
|
688
|
+
)
|
|
641
689
|
}
|
|
642
690
|
return await self.async_client.beta.threads.runs.create(
|
|
643
691
|
input["thread_id"],
|
|
@@ -649,7 +697,18 @@ class OpenAIAssistantRunnable(RunnableSerializable[Dict, OutputType]):
|
|
|
649
697
|
params = {
|
|
650
698
|
k: v
|
|
651
699
|
for k, v in input.items()
|
|
652
|
-
if k
|
|
700
|
+
if k
|
|
701
|
+
in (
|
|
702
|
+
"instructions",
|
|
703
|
+
"model",
|
|
704
|
+
"tools",
|
|
705
|
+
"parallel_tool_calls",
|
|
706
|
+
"top_p",
|
|
707
|
+
"temperature",
|
|
708
|
+
"max_completion_tokens",
|
|
709
|
+
"max_prompt_tokens",
|
|
710
|
+
"run_metadata",
|
|
711
|
+
)
|
|
653
712
|
}
|
|
654
713
|
run = await self.async_client.beta.threads.create_and_run(
|
|
655
714
|
assistant_id=self.assistant_id,
|
langchain/chains/moderation.py
CHANGED
|
@@ -67,8 +67,8 @@ class OpenAIModerationChain(Chain):
|
|
|
67
67
|
if values["openai_pre_1_0"]:
|
|
68
68
|
values["client"] = openai.Moderation
|
|
69
69
|
else:
|
|
70
|
-
values["client"] = openai.OpenAI()
|
|
71
|
-
values["async_client"] = openai.AsyncOpenAI()
|
|
70
|
+
values["client"] = openai.OpenAI(api_key=openai_api_key)
|
|
71
|
+
values["async_client"] = openai.AsyncOpenAI(api_key=openai_api_key)
|
|
72
72
|
|
|
73
73
|
except ImportError:
|
|
74
74
|
raise ImportError(
|
langchain/model_laboratory.py
CHANGED
|
@@ -13,13 +13,23 @@ from langchain.chains.llm import LLMChain
|
|
|
13
13
|
|
|
14
14
|
|
|
15
15
|
class ModelLaboratory:
|
|
16
|
-
"""
|
|
16
|
+
"""A utility to experiment with and compare the performance of different models."""
|
|
17
17
|
|
|
18
18
|
def __init__(self, chains: Sequence[Chain], names: Optional[List[str]] = None):
|
|
19
|
-
"""Initialize with chains to experiment with.
|
|
19
|
+
"""Initialize the ModelLaboratory with chains to experiment with.
|
|
20
20
|
|
|
21
21
|
Args:
|
|
22
|
-
chains:
|
|
22
|
+
chains (Sequence[Chain]): A sequence of chains to experiment with.
|
|
23
|
+
Each chain must have exactly one input and one output variable.
|
|
24
|
+
names (Optional[List[str]]): Optional list of names corresponding to each chain.
|
|
25
|
+
If provided, its length must match the number of chains.
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
Raises:
|
|
29
|
+
ValueError: If any chain is not an instance of `Chain`.
|
|
30
|
+
ValueError: If a chain does not have exactly one input variable.
|
|
31
|
+
ValueError: If a chain does not have exactly one output variable.
|
|
32
|
+
ValueError: If the length of `names` does not match the number of chains.
|
|
23
33
|
"""
|
|
24
34
|
for chain in chains:
|
|
25
35
|
if not isinstance(chain, Chain):
|
|
@@ -50,12 +60,15 @@ class ModelLaboratory:
|
|
|
50
60
|
def from_llms(
|
|
51
61
|
cls, llms: List[BaseLLM], prompt: Optional[PromptTemplate] = None
|
|
52
62
|
) -> ModelLaboratory:
|
|
53
|
-
"""Initialize with LLMs
|
|
63
|
+
"""Initialize the ModelLaboratory with LLMs and an optional prompt.
|
|
54
64
|
|
|
55
65
|
Args:
|
|
56
|
-
llms: list of LLMs to experiment with
|
|
57
|
-
prompt
|
|
58
|
-
If
|
|
66
|
+
llms (List[BaseLLM]): A list of LLMs to experiment with.
|
|
67
|
+
prompt (Optional[PromptTemplate]): An optional prompt to use with the LLMs.
|
|
68
|
+
If provided, the prompt must contain exactly one input variable.
|
|
69
|
+
|
|
70
|
+
Returns:
|
|
71
|
+
ModelLaboratory: An instance of `ModelLaboratory` initialized with LLMs.
|
|
59
72
|
"""
|
|
60
73
|
if prompt is None:
|
|
61
74
|
prompt = PromptTemplate(input_variables=["_input"], template="{_input}")
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: langchain
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.15
|
|
4
4
|
Summary: Building applications with LLMs through composability
|
|
5
5
|
Home-page: https://github.com/langchain-ai/langchain
|
|
6
6
|
License: MIT
|
|
@@ -16,9 +16,9 @@ Requires-Dist: PyYAML (>=5.3)
|
|
|
16
16
|
Requires-Dist: SQLAlchemy (>=1.4,<3)
|
|
17
17
|
Requires-Dist: aiohttp (>=3.8.3,<4.0.0)
|
|
18
18
|
Requires-Dist: async-timeout (>=4.0.0,<5.0.0) ; python_version < "3.11"
|
|
19
|
-
Requires-Dist: langchain-core (>=0.3.
|
|
19
|
+
Requires-Dist: langchain-core (>=0.3.31,<0.4.0)
|
|
20
20
|
Requires-Dist: langchain-text-splitters (>=0.3.3,<0.4.0)
|
|
21
|
-
Requires-Dist: langsmith (>=0.1.17,<0.
|
|
21
|
+
Requires-Dist: langsmith (>=0.1.17,<0.4)
|
|
22
22
|
Requires-Dist: numpy (>=1.22.4,<2) ; python_version < "3.12"
|
|
23
23
|
Requires-Dist: numpy (>=1.26.2,<3) ; python_version >= "3.12"
|
|
24
24
|
Requires-Dist: pydantic (>=2.7.4,<3.0.0)
|
|
@@ -111,7 +111,7 @@ langchain/agents/mrkl/base.py,sha256=yonYGfgMkTixmrknWROMjwjddiUCgmWEkfIaWVlJdAU
|
|
|
111
111
|
langchain/agents/mrkl/output_parser.py,sha256=YQGSjQq5pR4kFUg1HrOS3laV6xgtHgtIOQ_TtJY0UFI,3720
|
|
112
112
|
langchain/agents/mrkl/prompt.py,sha256=2dTMP2lAWiLvCtuEijgQRjbKDlbPEnmx77duMwdJ7e4,641
|
|
113
113
|
langchain/agents/openai_assistant/__init__.py,sha256=Xssaqoxrix3hn1gKSOLmDRQzTxAoJk0ProGXmXQe8Mw,114
|
|
114
|
-
langchain/agents/openai_assistant/base.py,sha256=
|
|
114
|
+
langchain/agents/openai_assistant/base.py,sha256=KtRFxOEICdFVG8fOrcb6Z0o1HpsDWjOx0p2u0CLFhmc,30029
|
|
115
115
|
langchain/agents/openai_functions_agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
116
116
|
langchain/agents/openai_functions_agent/agent_token_buffer_memory.py,sha256=G5vrWDbv3oWojxafiW2qSae7Z7WUdZugI-ywjTP0zZ4,3790
|
|
117
117
|
langchain/agents/openai_functions_agent/base.py,sha256=katIW0vE87B7ezm9WU_fEMfeHSQPHZptM0zppQfnY-4,13474
|
|
@@ -280,7 +280,7 @@ langchain/chains/llm_summarization_checker/prompts/revise_summary.txt,sha256=nSS
|
|
|
280
280
|
langchain/chains/llm_symbolic_math/__init__.py,sha256=KQ6bFiFMsqs8PNtU-oo6l-czNBBwQUn2rEirz3gt-w8,470
|
|
281
281
|
langchain/chains/loading.py,sha256=57shFurz0r_FDoUSTcD5Hv7cZl4Rr2G2A_gT-p7XHCE,28829
|
|
282
282
|
langchain/chains/mapreduce.py,sha256=1Sjrnu21VaRtfAGQB-Mf-ssbsv3vk5-mXThwIq1IHTA,4117
|
|
283
|
-
langchain/chains/moderation.py,sha256=
|
|
283
|
+
langchain/chains/moderation.py,sha256=E50JkOxi_KYwHXqVFB8bDuHBTZ7cHUZ1EekVj7WeqB8,4472
|
|
284
284
|
langchain/chains/natbot/__init__.py,sha256=ACF2TYNK_CTfvmdLlG5Ry0_j9D6ZfjgfQxmeKe1BAIg,96
|
|
285
285
|
langchain/chains/natbot/base.py,sha256=pS4NHgEHDjqiHRcyxzNgrFrUG56tW8nQ7BOmxjvoe6c,5433
|
|
286
286
|
langchain/chains/natbot/crawler.py,sha256=E1mQUEsg8Jj6Eth-LBUcMU-Zc88JEA3a79kMhHkKO08,16050
|
|
@@ -811,7 +811,7 @@ langchain/memory/utils.py,sha256=PvauM6AkPRX5Hy5sY6NysuieRI9Oae1IeC61y1iIQMs,617
|
|
|
811
811
|
langchain/memory/vectorstore.py,sha256=RdOX2EDSFXAC6LEE_9aYWIJcVoZ32lUQuludOgPCAoc,4189
|
|
812
812
|
langchain/memory/vectorstore_token_buffer_memory.py,sha256=73GYFp_hExF1IRc6xFTOYU4lLdQAp0cvig6858OAJVQ,7618
|
|
813
813
|
langchain/memory/zep_memory.py,sha256=WMrAJ7jymx0_0d3JnhCuklJxfomsGhEEEQ6uPMJ21Bo,628
|
|
814
|
-
langchain/model_laboratory.py,sha256=
|
|
814
|
+
langchain/model_laboratory.py,sha256=qrC-S-5obNQVwJ1RUFizLdBm3Vej0w9WtdGXwDThK-8,4059
|
|
815
815
|
langchain/output_parsers/__init__.py,sha256=A9fDuB-lYuOIN8QbDx-fULqSwugB7saLRKD23gdaIl4,2720
|
|
816
816
|
langchain/output_parsers/boolean.py,sha256=1-_Xtqhq-9ll4GxfPXW_5sAjAbODCWKF6yTPdVhY8mQ,1689
|
|
817
817
|
langchain/output_parsers/combining.py,sha256=tBQx3lVAz4YL52unRsRGofAgQPFbIgDU8MnwONGw5WQ,1795
|
|
@@ -1335,8 +1335,8 @@ langchain/vectorstores/xata.py,sha256=HW_Oi5Hz8rH2JaUhRNWQ-3hLYmNzD8eAz6K5YqPArm
|
|
|
1335
1335
|
langchain/vectorstores/yellowbrick.py,sha256=-lnjGcRE8Q1nEPOTdbKYTw5noS2cy2ce1ePOU804-_o,624
|
|
1336
1336
|
langchain/vectorstores/zep.py,sha256=RJ2auxoA6uHHLEZknw3_jeFmYJYVt-PWKMBcNMGV6TM,798
|
|
1337
1337
|
langchain/vectorstores/zilliz.py,sha256=XhPPIUfKPFJw0_svCoBgCnNkkBLoRVVcyuMfOnE5IxU,609
|
|
1338
|
-
langchain-0.3.
|
|
1339
|
-
langchain-0.3.
|
|
1340
|
-
langchain-0.3.
|
|
1341
|
-
langchain-0.3.
|
|
1342
|
-
langchain-0.3.
|
|
1338
|
+
langchain-0.3.15.dist-info/LICENSE,sha256=TsZ-TKbmch26hJssqCJhWXyGph7iFLvyFBYAa3stBHg,1067
|
|
1339
|
+
langchain-0.3.15.dist-info/METADATA,sha256=NzjHGHj1h07IyJXooBwqxC-PhE6ohU18bjUPKS7706s,7127
|
|
1340
|
+
langchain-0.3.15.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
|
|
1341
|
+
langchain-0.3.15.dist-info/entry_points.txt,sha256=IgKjoXnkkVC8Nm7ggiFMCNAk01ua6RVTb9cmZTVNm5w,58
|
|
1342
|
+
langchain-0.3.15.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|