bisheng-langchain 1.0.1__py3-none-any.whl → 1.2.0.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. bisheng_langchain/agents/chatglm_functions_agent/base.py +6 -3
  2. bisheng_langchain/agents/llm_functions_agent/base.py +6 -3
  3. bisheng_langchain/chains/qa_generation/base.py +1 -1
  4. bisheng_langchain/chains/transform.py +1 -1
  5. bisheng_langchain/chat_models/host_llm.py +13 -8
  6. bisheng_langchain/chat_models/minimax.py +4 -7
  7. bisheng_langchain/chat_models/proxy_llm.py +5 -7
  8. bisheng_langchain/chat_models/qwen.py +5 -7
  9. bisheng_langchain/chat_models/sensetime.py +5 -7
  10. bisheng_langchain/chat_models/wenxin.py +4 -7
  11. bisheng_langchain/chat_models/xunfeiai.py +4 -7
  12. bisheng_langchain/chat_models/zhipuai.py +4 -7
  13. bisheng_langchain/embeddings/host_embedding.py +6 -4
  14. bisheng_langchain/embeddings/huggingfacegte.py +2 -2
  15. bisheng_langchain/embeddings/huggingfacemultilingual.py +2 -2
  16. bisheng_langchain/embeddings/wenxin.py +5 -8
  17. bisheng_langchain/gpts/agent_types/llm_functions_agent.py +6 -78
  18. bisheng_langchain/gpts/agent_types/llm_react_agent.py +2 -5
  19. bisheng_langchain/gpts/tools/api_tools/base.py +5 -7
  20. bisheng_langchain/gpts/tools/api_tools/firecrawl.py +1 -1
  21. bisheng_langchain/gpts/tools/api_tools/flow.py +1 -1
  22. bisheng_langchain/gpts/tools/api_tools/jina.py +6 -4
  23. bisheng_langchain/gpts/tools/api_tools/macro_data.py +3 -3
  24. bisheng_langchain/gpts/tools/api_tools/openapi.py +8 -6
  25. bisheng_langchain/gpts/tools/api_tools/sina.py +1 -1
  26. bisheng_langchain/gpts/tools/api_tools/tianyancha.py +6 -3
  27. bisheng_langchain/gpts/tools/bing_search/tool.py +2 -2
  28. bisheng_langchain/gpts/tools/calculator/tool.py +2 -2
  29. bisheng_langchain/gpts/tools/code_interpreter/tool.py +2 -2
  30. bisheng_langchain/gpts/tools/dalle_image_generator/tool.py +7 -11
  31. bisheng_langchain/gpts/tools/get_current_time/tool.py +1 -1
  32. bisheng_langchain/gpts/tools/message/dingding.py +1 -2
  33. bisheng_langchain/gpts/tools/message/email.py +2 -4
  34. bisheng_langchain/gpts/tools/message/feishu.py +10 -11
  35. bisheng_langchain/gpts/tools/message/wechat.py +2 -3
  36. bisheng_langchain/gpts/tools/sql_agent/tool.py +23 -20
  37. bisheng_langchain/input_output/input.py +7 -11
  38. bisheng_langchain/input_output/output.py +2 -6
  39. bisheng_langchain/memory/redis.py +3 -3
  40. bisheng_langchain/rag/bisheng_rag_chain.py +2 -8
  41. bisheng_langchain/rag/bisheng_rag_tool.py +1 -1
  42. bisheng_langchain/rag/init_retrievers/baseline_vector_retriever.py +1 -1
  43. bisheng_langchain/rag/init_retrievers/keyword_retriever.py +1 -1
  44. bisheng_langchain/rag/init_retrievers/mix_retriever.py +1 -1
  45. bisheng_langchain/rag/init_retrievers/smaller_chunks_retriever.py +2 -2
  46. bisheng_langchain/retrievers/ensemble.py +3 -2
  47. bisheng_langchain/utils/azure_dalle_image_generator.py +3 -2
  48. bisheng_langchain/utils/requests.py +10 -19
  49. bisheng_langchain/vectorstores/retriever.py +4 -7
  50. {bisheng_langchain-1.0.1.dist-info → bisheng_langchain-1.2.0.dev1.dist-info}/METADATA +13 -13
  51. {bisheng_langchain-1.0.1.dist-info → bisheng_langchain-1.2.0.dev1.dist-info}/RECORD +53 -53
  52. {bisheng_langchain-1.0.1.dist-info → bisheng_langchain-1.2.0.dev1.dist-info}/WHEEL +1 -1
  53. {bisheng_langchain-1.0.1.dist-info → bisheng_langchain-1.2.0.dev1.dist-info}/top_level.txt +0 -0
@@ -2,6 +2,8 @@ import json
2
2
  import re
3
3
  from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
4
4
 
5
+ from pydantic import model_validator, Field
6
+
5
7
  from bisheng_langchain.chat_models.host_llm import HostChatGLM
6
8
  from langchain.agents.agent import Agent, AgentOutputParser, BaseSingleActionAgent
7
9
  from langchain.agents.structured_chat.output_parser import StructuredChatOutputParserWithRetries
@@ -14,7 +16,6 @@ from langchain.schema import AgentAction, AgentFinish, BasePromptTemplate
14
16
  from langchain.schema.language_model import BaseLanguageModel
15
17
  from langchain.schema.messages import ChatMessage
16
18
  from langchain.tools import BaseTool, StructuredTool
17
- from langchain_core.pydantic_v1 import Field, root_validator
18
19
 
19
20
  HUMAN_MESSAGE_TEMPLATE = '{input}\n\n{agent_scratchpad}'
20
21
 
@@ -81,13 +82,15 @@ class ChatglmFunctionsAgent(BaseSingleActionAgent):
81
82
  """Get allowed tools."""
82
83
  return list([t.name for t in self.tools])
83
84
 
84
- @root_validator
85
+ @model_validator(mode='before')
86
+ @classmethod
85
87
  def validate_llm(cls, values: dict) -> dict:
86
88
  if not isinstance(values['llm'], HostChatGLM):
87
89
  raise ValueError('Only supported with ChatGLM3 models.')
88
90
  return values
89
91
 
90
- @root_validator
92
+ @model_validator(mode='before')
93
+ @classmethod
91
94
  def validate_prompt(cls, values: dict) -> dict:
92
95
  prompt: BasePromptTemplate = values['prompt']
93
96
  if 'agent_scratchpad' not in prompt.input_variables:
@@ -3,6 +3,8 @@ import json
3
3
  from json import JSONDecodeError
4
4
  from typing import Any, List, Optional, Sequence, Tuple, Union
5
5
 
6
+ from pydantic import model_validator
7
+
6
8
  from bisheng_langchain.chat_models.host_llm import HostQwenChat
7
9
  from bisheng_langchain.chat_models.proxy_llm import ProxyChatLLM
8
10
  from langchain.agents import BaseSingleActionAgent
@@ -16,7 +18,6 @@ from langchain.schema.messages import AIMessage, BaseMessage, FunctionMessage, S
16
18
  from langchain.tools import BaseTool
17
19
  from langchain.tools.convert_to_openai import format_tool_to_openai_function
18
20
  from langchain_core.agents import AgentActionMessageLog
19
- from langchain_core.pydantic_v1 import root_validator
20
21
  from langchain_openai import ChatOpenAI
21
22
 
22
23
 
@@ -135,7 +136,8 @@ class LLMFunctionsAgent(BaseSingleActionAgent):
135
136
  """Get allowed tools."""
136
137
  return list([t.name for t in self.tools])
137
138
 
138
- @root_validator
139
+ @model_validator(mode='before')
140
+ @classmethod
139
141
  def validate_llm(cls, values: dict) -> dict:
140
142
  if ((not isinstance(values['llm'], ChatOpenAI))
141
143
  and (not isinstance(values['llm'], HostQwenChat))
@@ -144,7 +146,8 @@ class LLMFunctionsAgent(BaseSingleActionAgent):
144
146
  'Only supported with ChatOpenAI and HostQwenChat and ProxyChatLLM models.')
145
147
  return values
146
148
 
147
- @root_validator
149
+ @model_validator(mode='before')
150
+ @classmethod
148
151
  def validate_prompt(cls, values: dict) -> dict:
149
152
  prompt: BasePromptTemplate = values['prompt']
150
153
  if 'agent_scratchpad' not in prompt.input_variables:
@@ -9,7 +9,7 @@ from typing import Any, Dict, List, Optional
9
9
  from langchain_core.callbacks import CallbackManagerForChainRun
10
10
  from langchain_core.language_models import BaseLanguageModel
11
11
  from langchain_core.prompts import BasePromptTemplate, ChatPromptTemplate
12
- from langchain_core.pydantic_v1 import Field
12
+ from pydantic import Field
13
13
  from langchain_text_splitters import RecursiveCharacterTextSplitter, TextSplitter
14
14
 
15
15
  from langchain.chains.base import Chain
@@ -6,7 +6,7 @@ from typing import Any, Awaitable, Callable, Dict, List, Optional
6
6
 
7
7
  from langchain.chains.base import Chain
8
8
  from langchain_core.callbacks import AsyncCallbackManagerForChainRun, CallbackManagerForChainRun
9
- from langchain_core.pydantic_v1 import Field
9
+ from pydantic import Field
10
10
 
11
11
  logger = logging.getLogger(__name__)
12
12
 
@@ -7,6 +7,8 @@ import sys
7
7
  from typing import TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional, Tuple, Union
8
8
 
9
9
  import requests
10
+ from pydantic import ConfigDict, model_validator, Field
11
+
10
12
  from bisheng_langchain.utils.requests import Requests
11
13
  from langchain.callbacks.manager import AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun
12
14
  from langchain.chat_models.base import BaseChatModel
@@ -15,7 +17,6 @@ from langchain.schema.messages import (AIMessage, BaseMessage, ChatMessage, Func
15
17
  HumanMessage, SystemMessage)
16
18
  from langchain.utils import get_from_dict_or_env
17
19
  from langchain_core.language_models.llms import create_base_retry_decorator
18
- from langchain_core.pydantic_v1 import Field, root_validator
19
20
 
20
21
  # from requests.exceptions import HTTPError
21
22
 
@@ -112,6 +113,7 @@ class BaseHostChatLLM(BaseChatModel):
112
113
  model_kwargs: Optional[Dict[str, Any]] = Field(default_factory=dict)
113
114
  """Holds any model parameters valid for `create` call not explicitly specified."""
114
115
  host_base_url: Optional[str] = None
116
+ is_ssl: Optional[bool] = False
115
117
 
116
118
  headers: Optional[Dict[str, str]] = Field(default_factory=dict)
117
119
 
@@ -137,13 +139,10 @@ class BaseHostChatLLM(BaseChatModel):
137
139
  verbose: Optional[bool] = False
138
140
 
139
141
  decoupled: Optional[bool] = False
142
+ model_config = ConfigDict(validate_by_name=True)
140
143
 
141
- class Config:
142
- """Configuration for this pydantic object."""
143
-
144
- allow_population_by_field_name = True
145
-
146
- @root_validator()
144
+ @model_validator(mode='before')
145
+ @classmethod
147
146
  def validate_environment(cls, values: Dict) -> Dict:
148
147
  """Validate that api key and python package exists in environment."""
149
148
  values['host_base_url'] = get_from_dict_or_env(values, 'host_base_url', 'HostBaseUrl')
@@ -170,7 +169,13 @@ class BaseHostChatLLM(BaseChatModel):
170
169
  headers = values['headers']
171
170
  else:
172
171
  headers = {'Content-Type': 'application/json'}
173
- values['client'] = Requests(headers=headers, request_timeout=values['request_timeout'])
172
+ if cls.is_ssl and "https" in values['host_base_url']:
173
+ import aiohttp
174
+ values['client'] = Requests(headers=headers,
175
+ aiosession=aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=False)),
176
+ request_timeout=values['request_timeout'])
177
+ else:
178
+ values['client'] = Requests(headers=headers, request_timeout=values['request_timeout'])
174
179
  except AttributeError:
175
180
  raise ValueError('Try upgrading it with `pip install --upgrade requests`.')
176
181
  return values
@@ -12,7 +12,7 @@ from langchain.schema import ChatGeneration, ChatResult
12
12
  from langchain.schema.messages import (AIMessage, BaseMessage, ChatMessage, FunctionMessage,
13
13
  HumanMessage, SystemMessage)
14
14
  from langchain.utils import get_from_dict_or_env
15
- from langchain_core.pydantic_v1 import Field, root_validator
15
+ from pydantic import ConfigDict, model_validator, Field
16
16
  from tenacity import (before_sleep_log, retry, retry_if_exception_type, stop_after_attempt,
17
17
  wait_exponential)
18
18
 
@@ -141,13 +141,10 @@ class ChatMinimaxAI(BaseChatModel):
141
141
  when tiktoken is called, you can specify a model name to use here."""
142
142
 
143
143
  verbose: Optional[bool] = False
144
+ model_config = ConfigDict(validate_by_name=True)
144
145
 
145
- class Config:
146
- """Configuration for this pydantic object."""
147
-
148
- allow_population_by_field_name = True
149
-
150
- @root_validator()
146
+ @model_validator(mode='before')
147
+ @classmethod
151
148
  def validate_environment(cls, values: Dict) -> Dict:
152
149
  """Validate that api key and python package exists in environment."""
153
150
  values['minimaxai_api_key'] = get_from_dict_or_env(values, 'minimaxai_api_key',
@@ -6,6 +6,8 @@ import logging
6
6
  import sys
7
7
  from typing import TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional, Tuple, Union
8
8
 
9
+ from pydantic import ConfigDict, model_validator, Field
10
+
9
11
  from bisheng_langchain.utils import requests
10
12
  from langchain.callbacks.manager import AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun
11
13
  from langchain.chat_models.base import BaseChatModel
@@ -13,7 +15,6 @@ from langchain.schema import ChatGeneration, ChatResult
13
15
  from langchain.schema.messages import (AIMessage, BaseMessage, ChatMessage, FunctionMessage,
14
16
  HumanMessage, SystemMessage)
15
17
  from langchain.utils import get_from_dict_or_env
16
- from langchain_core.pydantic_v1 import Field, root_validator
17
18
  from tenacity import (before_sleep_log, retry, retry_if_exception_type, stop_after_attempt,
18
19
  wait_exponential)
19
20
 
@@ -137,13 +138,10 @@ class ProxyChatLLM(BaseChatModel):
137
138
  when using one of the many model providers that expose an OpenAI-like
138
139
  API but with different models. In those cases, in order to avoid erroring
139
140
  when tiktoken is called, you can specify a model name to use here."""
141
+ model_config = ConfigDict(validate_by_name=True)
140
142
 
141
- class Config:
142
- """Configuration for this pydantic object."""
143
-
144
- allow_population_by_field_name = True
145
-
146
- @root_validator()
143
+ @model_validator(mode='before')
144
+ @classmethod
147
145
  def validate_environment(cls, values: Dict) -> Dict:
148
146
  """Validate that api key and python package exists in environment."""
149
147
  values['elemai_api_key'] = get_from_dict_or_env(values, 'elemai_api_key', 'ELEMAI_API_KEY')
@@ -7,6 +7,8 @@ import logging
7
7
  import sys
8
8
  from typing import TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional, Tuple, Union
9
9
 
10
+ from pydantic import ConfigDict, model_validator, Field
11
+
10
12
  from bisheng_langchain.utils.requests import Requests
11
13
  # import requests
12
14
  from langchain.callbacks.manager import AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun
@@ -15,7 +17,6 @@ from langchain.schema import ChatGeneration, ChatResult
15
17
  from langchain.schema.messages import (AIMessage, BaseMessage, ChatMessage, FunctionMessage,
16
18
  HumanMessage, SystemMessage, ToolMessage)
17
19
  from langchain.utils import get_from_dict_or_env
18
- from langchain_core.pydantic_v1 import Field, root_validator
19
20
  from tenacity import (before_sleep_log, retry, retry_if_exception_type, stop_after_attempt,
20
21
  wait_exponential)
21
22
 
@@ -165,13 +166,10 @@ class ChatQWen(BaseChatModel):
165
166
  API but with different models. In those cases, in order to avoid erroring
166
167
  when tiktoken is called, you can specify a model name to use here."""
167
168
  verbose: Optional[bool] = False
169
+ model_config = ConfigDict(validate_by_name=True)
168
170
 
169
- class Config:
170
- """Configuration for this pydantic object."""
171
-
172
- allow_population_by_field_name = True
173
-
174
- @root_validator()
171
+ @model_validator(mode='before')
172
+ @classmethod
175
173
  def validate_environment(cls, values: Dict) -> Dict:
176
174
  """Validate that api key and python package exists in environment."""
177
175
  values['api_key'] = get_from_dict_or_env(values, 'api_key', 'QWEN_API_KEY')
@@ -7,6 +7,8 @@ import time
7
7
  from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
8
8
 
9
9
  import jwt
10
+ from pydantic import ConfigDict, model_validator, Field
11
+
10
12
  from bisheng_langchain.utils.requests import Requests
11
13
  from langchain.callbacks.manager import AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun
12
14
  from langchain.chat_models.base import BaseChatModel
@@ -14,7 +16,6 @@ from langchain.schema import ChatGeneration, ChatResult
14
16
  from langchain.schema.messages import (AIMessage, BaseMessage, ChatMessage, FunctionMessage,
15
17
  HumanMessage, SystemMessage)
16
18
  from langchain.utils import get_from_dict_or_env
17
- from langchain_core.pydantic_v1 import Field, root_validator
18
19
  from tenacity import (before_sleep_log, retry, retry_if_exception_type, stop_after_attempt,
19
20
  wait_exponential)
20
21
 
@@ -157,13 +158,10 @@ class SenseChat(BaseChatModel):
157
158
  API but with different models. In those cases, in order to avoid erroring
158
159
  when tiktoken is called, you can specify a model name to use here."""
159
160
  verbose: Optional[bool] = False
161
+ model_config = ConfigDict(validate_by_name=True)
160
162
 
161
- class Config:
162
- """Configuration for this pydantic object."""
163
-
164
- allow_population_by_field_name = True
165
-
166
- @root_validator()
163
+ @model_validator(mode='before')
164
+ @classmethod
167
165
  def validate_environment(cls, values: Dict) -> Dict:
168
166
  """Validate that api key and python package exists in environment."""
169
167
 
@@ -12,7 +12,7 @@ from langchain.schema import ChatGeneration, ChatResult
12
12
  from langchain.schema.messages import (AIMessage, BaseMessage, ChatMessage, FunctionMessage,
13
13
  HumanMessage, SystemMessage)
14
14
  from langchain.utils import get_from_dict_or_env
15
- from langchain_core.pydantic_v1 import Field, root_validator
15
+ from pydantic import ConfigDict, model_validator, Field
16
16
  from tenacity import (before_sleep_log, retry, retry_if_exception_type, stop_after_attempt,
17
17
  wait_exponential)
18
18
 
@@ -138,13 +138,10 @@ class ChatWenxin(BaseChatModel):
138
138
  API but with different models. In those cases, in order to avoid erroring
139
139
  when tiktoken is called, you can specify a model name to use here."""
140
140
  verbose: Optional[bool] = False
141
+ model_config = ConfigDict(validate_by_name=True)
141
142
 
142
- class Config:
143
- """Configuration for this pydantic object."""
144
-
145
- allow_population_by_field_name = True
146
-
147
- @root_validator()
143
+ @model_validator(mode='before')
144
+ @classmethod
148
145
  def validate_environment(cls, values: Dict) -> Dict:
149
146
  """Validate that api key and python package exists in environment."""
150
147
  values['wenxin_api_key'] = get_from_dict_or_env(values, 'wenxin_api_key', 'WENXIN_API_KEY')
@@ -12,7 +12,7 @@ from langchain.schema import ChatGeneration, ChatResult
12
12
  from langchain.schema.messages import (AIMessage, BaseMessage, ChatMessage, FunctionMessage,
13
13
  HumanMessage, SystemMessage)
14
14
  from langchain.utils import get_from_dict_or_env
15
- from langchain_core.pydantic_v1 import Field, root_validator
15
+ from pydantic import ConfigDict, model_validator, Field
16
16
  from tenacity import (before_sleep_log, retry, retry_if_exception_type, stop_after_attempt,
17
17
  wait_exponential)
18
18
 
@@ -141,13 +141,10 @@ class ChatXunfeiAI(BaseChatModel):
141
141
  API but with different models. In those cases, in order to avoid erroring
142
142
  when tiktoken is called, you can specify a model name to use here."""
143
143
  verbose: Optional[bool] = False
144
+ model_config = ConfigDict(validate_by_name=True)
144
145
 
145
- class Config:
146
- """Configuration for this pydantic object."""
147
-
148
- allow_population_by_field_name = True
149
-
150
- @root_validator()
146
+ @model_validator(mode='before')
147
+ @classmethod
151
148
  def validate_environment(cls, values: Dict) -> Dict:
152
149
  """Validate that api key and python package exists in environment."""
153
150
  values['xunfeiai_appid'] = get_from_dict_or_env(
@@ -12,7 +12,7 @@ from langchain.schema import ChatGeneration, ChatResult
12
12
  from langchain.schema.messages import (AIMessage, BaseMessage, ChatMessage, FunctionMessage,
13
13
  HumanMessage, SystemMessage)
14
14
  from langchain.utils import get_from_dict_or_env
15
- from langchain_core.pydantic_v1 import Field, root_validator
15
+ from pydantic import ConfigDict, model_validator, Field
16
16
  from tenacity import (before_sleep_log, retry, retry_if_exception_type, stop_after_attempt,
17
17
  wait_exponential)
18
18
 
@@ -149,13 +149,10 @@ class ChatZhipuAI(BaseChatModel):
149
149
  API but with different models. In those cases, in order to avoid erroring
150
150
  when tiktoken is called, you can specify a model name to use here."""
151
151
  verbose: Optional[bool] = False
152
+ model_config = ConfigDict(validate_by_name=True)
152
153
 
153
- class Config:
154
- """Configuration for this pydantic object."""
155
-
156
- allow_population_by_field_name = True
157
-
158
- @root_validator()
154
+ @model_validator(mode='before')
155
+ @classmethod
159
156
  def validate_environment(cls, values: Dict) -> Dict:
160
157
  """Validate that api key and python package exists in environment."""
161
158
  values['zhipuai_api_key'] = get_from_dict_or_env(values, 'zhipuai_api_key',
@@ -6,7 +6,7 @@ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
6
6
  import requests
7
7
  from langchain.embeddings.base import Embeddings
8
8
  from langchain.utils import get_from_dict_or_env
9
- from langchain_core.pydantic_v1 import BaseModel, Field, root_validator
9
+ from pydantic import model_validator, BaseModel, Field
10
10
  from tenacity import (before_sleep_log, retry, retry_if_exception_type, stop_after_attempt,
11
11
  wait_exponential)
12
12
 
@@ -42,7 +42,7 @@ class HostEmbeddings(BaseModel, Embeddings):
42
42
  """host embedding models.
43
43
  """
44
44
 
45
- client: Optional[Any] #: :meta private:
45
+ client: Optional[Any] = None #: :meta private:
46
46
  """Model name to use."""
47
47
  model: str = 'embedding-host'
48
48
  host_base_url: str = None
@@ -64,7 +64,8 @@ class HostEmbeddings(BaseModel, Embeddings):
64
64
 
65
65
  url_ep: Optional[str] = None
66
66
 
67
- @root_validator()
67
+ @model_validator(mode='before')
68
+ @classmethod
68
69
  def validate_environment(cls, values: Dict) -> Dict:
69
70
  """Validate that api key and python package exists in environment."""
70
71
  values['host_base_url'] = get_from_dict_or_env(values, 'host_base_url', 'HostBaseUrl')
@@ -164,7 +165,8 @@ class CustomHostEmbedding(HostEmbeddings):
164
165
  model: str = Field('custom-embedding', alias='model')
165
166
  embedding_ctx_length: int = 512
166
167
 
167
- @root_validator()
168
+ @model_validator(mode='before')
169
+ @classmethod
168
170
  def validate_environment(cls, values: Dict) -> Dict:
169
171
  """Validate that api key and python package exists in environment."""
170
172
  values['host_base_url'] = get_from_dict_or_env(values, 'host_base_url', 'HostBaseUrl')
@@ -2,7 +2,7 @@ from typing import Any, Dict, List, Optional
2
2
 
3
3
  import requests
4
4
  from langchain_core.embeddings import Embeddings
5
- from langchain_core.pydantic_v1 import BaseModel, Extra, Field
5
+ from pydantic import BaseModel, Extra, Field
6
6
  DEFAULT_Multilingual_MODEL = "thenlper/gte-large-zh"
7
7
 
8
8
 
@@ -27,7 +27,7 @@ class HuggingFaceGteEmbeddings(BaseModel, Embeddings):
27
27
  )
28
28
  """
29
29
 
30
- client: Any #: :meta private:
30
+ client: Any = None #: :meta private:
31
31
  model_name: str = DEFAULT_Multilingual_MODEL
32
32
  """Model name to use."""
33
33
  cache_folder: Optional[str] = None
@@ -2,7 +2,7 @@ from typing import Any, Dict, List, Optional
2
2
 
3
3
  import requests
4
4
  from langchain_core.embeddings import Embeddings
5
- from langchain_core.pydantic_v1 import BaseModel, Extra, Field
5
+ from pydantic import BaseModel, Extra, Field
6
6
  DEFAULT_Multilingual_MODEL = "intfloat/multilingual-e5-large"
7
7
 
8
8
 
@@ -27,7 +27,7 @@ class HuggingFaceMultilingualEmbeddings(BaseModel, Embeddings):
27
27
  )
28
28
  """
29
29
 
30
- client: Any #: :meta private:
30
+ client: Any = None #: :meta private:
31
31
  model_name: str = DEFAULT_Multilingual_MODEL
32
32
  """Model name to use."""
33
33
  cache_folder: Optional[str] = None
@@ -7,7 +7,7 @@ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
7
7
  # import numpy as np
8
8
  from langchain.embeddings.base import Embeddings
9
9
  from langchain.utils import get_from_dict_or_env
10
- from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator
10
+ from pydantic import ConfigDict, model_validator, BaseModel, Field
11
11
  from requests.exceptions import HTTPError
12
12
  from tenacity import (before_sleep_log, retry, retry_if_exception_type, stop_after_attempt,
13
13
  wait_exponential)
@@ -55,7 +55,7 @@ class WenxinEmbeddings(BaseModel, Embeddings):
55
55
 
56
56
  """
57
57
 
58
- client: Optional[Any] #: :meta private:
58
+ client: Optional[Any] = None #: :meta private:
59
59
  model: str = 'embedding-v1'
60
60
 
61
61
  deployment: Optional[str] = 'default'
@@ -72,13 +72,10 @@ class WenxinEmbeddings(BaseModel, Embeddings):
72
72
 
73
73
  model_kwargs: Optional[Dict[str, Any]] = Field(default_factory=dict)
74
74
  """Holds any model parameters valid for `create` call not explicitly specified."""
75
+ model_config = ConfigDict(extra='forbid')
75
76
 
76
- class Config:
77
- """Configuration for this pydantic object."""
78
-
79
- extra = Extra.forbid
80
-
81
- @root_validator()
77
+ @model_validator(mode='before')
78
+ @classmethod
82
79
  def validate_environment(cls, values: Dict) -> Dict:
83
80
  """Validate that api key and python package exists in environment."""
84
81
  values['wenxin_api_key'] = get_from_dict_or_env(values, 'wenxin_api_key', 'WENXIN_API_KEY')
@@ -8,7 +8,7 @@ from langchain_core.language_models.base import LanguageModelLike
8
8
  from langchain_core.messages import FunctionMessage, SystemMessage, ToolMessage
9
9
  from langgraph.graph import END
10
10
  from langgraph.graph.message import MessageGraph
11
- from langgraph.prebuilt import ToolExecutor, ToolInvocation
11
+ from langgraph.prebuilt import ToolNode
12
12
  from langgraph.utils.runnable import RunnableCallable
13
13
 
14
14
 
@@ -35,7 +35,7 @@ def get_openai_functions_agent_executor(tools: list[BaseTool], llm: LanguageMode
35
35
  llm_with_tools = llm
36
36
 
37
37
  agent = _get_messages | llm_with_tools
38
- tool_executor = ToolExecutor(tools)
38
+ tool_nodes = ToolNode(tools=tools)
39
39
 
40
40
  # Define the function that determines whether to continue or not
41
41
  def should_continue(messages):
@@ -55,63 +55,11 @@ def get_openai_functions_agent_executor(tools: list[BaseTool], llm: LanguageMode
55
55
 
56
56
  # Define the function to execute tools
57
57
  async def acall_tool(messages):
58
- actions: list[ToolInvocation] = []
59
- # Based on the continue condition
60
- # we know the last message involves a function call
61
- last_message = messages[-1]
62
- for tool_call in last_message.additional_kwargs['tool_calls']:
63
- function = tool_call['function']
64
- function_name = function['name']
65
- try:
66
- _tool_input = json.loads(function['arguments'] or '{}')
67
- except Exception as e:
68
- raise Exception(f"Error parsing arguments for function: {function_name}. arguments: {function['arguments']}. error: {str(e)}")
69
- # We construct an ToolInvocation from the function_call
70
- actions.append(ToolInvocation(
71
- tool=function_name,
72
- tool_input=_tool_input,
73
- ))
74
- # We call the tool_executor and get back a response
75
- responses = await tool_executor.abatch(actions, **kwargs)
76
- # We use the response to create a ToolMessage
77
- tool_messages = [
78
- LiberalToolMessage(
79
- tool_call_id=tool_call['id'],
80
- content=response,
81
- additional_kwargs={'name': tool_call['function']['name']},
82
- )
83
- for tool_call, response in zip(last_message.additional_kwargs['tool_calls'], responses)
84
- ]
58
+ tool_messages = await tool_nodes._afunc(messages, None, store=None)
85
59
  return tool_messages
86
60
 
87
61
  def call_tool(messages):
88
- actions: list[ToolInvocation] = []
89
- # Based on the continue condition
90
- # we know the last message involves a function call
91
- last_message = messages[-1]
92
- for tool_call in last_message.additional_kwargs['tool_calls']:
93
- function = tool_call['function']
94
- function_name = function['name']
95
- try:
96
- _tool_input = json.loads(function['arguments'] or '{}')
97
- except Exception as e:
98
- raise Exception(f"Error parsing arguments for function: {function_name}. arguments: {function['arguments']}. error: {str(e)}")
99
- # We construct an ToolInvocation from the function_call
100
- actions.append(ToolInvocation(
101
- tool=function_name,
102
- tool_input=_tool_input,
103
- ))
104
- # We call the tool_executor and get back a response
105
- responses = tool_executor.batch(actions, **kwargs)
106
- # We use the response to create a ToolMessage
107
- tool_messages = [
108
- LiberalToolMessage(
109
- tool_call_id=tool_call['id'],
110
- content=response,
111
- additional_kwargs={'name': tool_call['function']['name']},
112
- )
113
- for tool_call, response in zip(last_message.additional_kwargs['tool_calls'], responses)
114
- ]
62
+ tool_messages = tool_nodes._func(messages, config=None, store=None)
115
63
  return tool_messages
116
64
 
117
65
  workflow = MessageGraph()
@@ -185,7 +133,7 @@ def get_qwen_local_functions_agent_executor(
185
133
  else:
186
134
  llm_with_tools = llm
187
135
  agent = _get_messages | llm_with_tools
188
- tool_executor = ToolExecutor(tools)
136
+ tool_nodes = ToolNode(tools=tools)
189
137
 
190
138
  # Define the function that determines whether to continue or not
191
139
  def should_continue(messages):
@@ -199,27 +147,7 @@ def get_qwen_local_functions_agent_executor(
199
147
 
200
148
  # Define the function to execute tools
201
149
  async def call_tool(messages):
202
- actions: list[ToolInvocation] = []
203
- # Based on the continue condition
204
- # we know the last message involves a function call
205
- last_message = messages[-1]
206
- # only one function
207
- function = last_message.additional_kwargs['function_call']
208
- function_name = function['name']
209
- try:
210
- _tool_input = json.loads(function['arguments'] or '{}')
211
- except Exception as e:
212
- raise Exception(
213
- f"Error parsing arguments for function: {function_name}. arguments: {function['arguments']}. error: {str(e)}")
214
- # We construct an ToolInvocation from the function_call
215
- actions.append(ToolInvocation(
216
- tool=function_name,
217
- tool_input=_tool_input,
218
- ))
219
- # We call the tool_executor and get back a response
220
- responses = await tool_executor.abatch(actions, **kwargs)
221
- # We use the response to create a ToolMessage
222
- tool_messages = [LiberalFunctionMessage(content=responses[0], name=function_name)]
150
+ tool_messages = await tool_nodes._afunc(messages, config=None, store=None)
223
151
  return tool_messages
224
152
 
225
153
  workflow = MessageGraph()
@@ -9,7 +9,7 @@ from langchain_core.language_models import LanguageModelLike
9
9
  from langchain_core.messages import BaseMessage
10
10
  from langgraph.graph import END, StateGraph
11
11
  from langgraph.graph.state import CompiledStateGraph
12
- from langgraph.prebuilt.tool_executor import ToolExecutor
12
+ from langgraph.prebuilt import ToolNode
13
13
  from langgraph.utils.runnable import RunnableCallable
14
14
 
15
15
 
@@ -64,10 +64,7 @@ def create_agent_executor(agent_runnable, tools, input_schema=None) -> CompiledS
64
64
  The `CompiledStateGraph` object.
65
65
  """
66
66
 
67
- if isinstance(tools, ToolExecutor):
68
- tool_executor = tools
69
- else:
70
- tool_executor = ToolExecutor(tools)
67
+ tool_executor = ToolNode(tools=tools)
71
68
 
72
69
  state = _get_agent_state(input_schema)
73
70
 
@@ -1,7 +1,8 @@
1
1
  from typing import Any, Dict, Tuple, Type, Union
2
2
 
3
+ from pydantic import ConfigDict, model_validator, BaseModel, Field
4
+
3
5
  from bisheng_langchain.utils.requests import Requests, RequestsWrapper
4
- from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator
5
6
  from langchain_core.tools import BaseTool, Tool
6
7
  from loguru import logger
7
8
 
@@ -32,13 +33,10 @@ class APIToolBase(BaseModel):
32
33
  params: Dict[str, Any] = {}
33
34
  input_key: str = 'keyword'
34
35
  args_schema: Type[BaseModel] = ApiArg
36
+ model_config = ConfigDict(extra="forbid")
35
37
 
36
- class Config:
37
- """Configuration for this pydantic object."""
38
-
39
- extra = Extra.forbid
40
-
41
- @root_validator()
38
+ @model_validator(mode='before')
39
+ @classmethod
42
40
  def validate_environment(cls, values: Dict) -> Dict:
43
41
  """Validate that api key and python package exists in environment."""
44
42
  timeout = values.get('request_timeout', 30)
@@ -3,7 +3,7 @@ from typing import Any, Dict, Type
3
3
 
4
4
  import requests
5
5
 
6
- from langchain_core.pydantic_v1 import BaseModel, Field, root_validator
6
+ from pydantic import BaseModel, Field
7
7
 
8
8
  from bisheng_langchain.gpts.tools.api_tools.base import (APIToolBase,
9
9
  MultArgsSchemaTool)
@@ -1,5 +1,5 @@
1
1
  from loguru import logger
2
- from langchain_core.pydantic_v1 import BaseModel, Field
2
+ from pydantic import BaseModel, Field
3
3
  from typing import Any
4
4
  from .base import APIToolBase
5
5
  from .base import MultArgsSchemaTool