bisheng-langchain 0.2.2.3__py3-none-any.whl → 0.2.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- bisheng_langchain/autogen_role/assistant.py +19 -6
- bisheng_langchain/autogen_role/custom.py +7 -9
- bisheng_langchain/autogen_role/groupchat_manager.py +14 -0
- bisheng_langchain/autogen_role/user.py +15 -0
- bisheng_langchain/chains/__init__.py +3 -2
- bisheng_langchain/chains/conversational_retrieval/__init__.py +0 -0
- bisheng_langchain/chains/conversational_retrieval/base.py +115 -0
- bisheng_langchain/chat_models/__init__.py +4 -2
- bisheng_langchain/chat_models/host_llm.py +28 -0
- bisheng_langchain/chat_models/sensetime.py +437 -0
- bisheng_langchain/document_loaders/elem_unstrcutured_loader.py +13 -7
- {bisheng_langchain-0.2.2.3.dist-info → bisheng_langchain-0.2.2.5.dist-info}/METADATA +1 -1
- {bisheng_langchain-0.2.2.3.dist-info → bisheng_langchain-0.2.2.5.dist-info}/RECORD +15 -12
- {bisheng_langchain-0.2.2.3.dist-info → bisheng_langchain-0.2.2.5.dist-info}/WHEEL +0 -0
- {bisheng_langchain-0.2.2.3.dist-info → bisheng_langchain-0.2.2.5.dist-info}/top_level.txt +0 -0
@@ -1,9 +1,11 @@
|
|
1
1
|
"""Chain that runs an arbitrary python function."""
|
2
2
|
import logging
|
3
|
-
|
3
|
+
import os
|
4
|
+
from typing import Callable, Dict, Optional
|
4
5
|
|
5
6
|
import openai
|
6
7
|
from autogen import AssistantAgent
|
8
|
+
from langchain.base_language import BaseLanguageModel
|
7
9
|
|
8
10
|
logger = logging.getLogger(__name__)
|
9
11
|
|
@@ -33,22 +35,32 @@ Reply "TERMINATE" in the end when everything is done.
|
|
33
35
|
openai_api_base: Optional[str] = '', # when llm_flag=True, need to set
|
34
36
|
openai_proxy: Optional[str] = '', # when llm_flag=True, need to set
|
35
37
|
temperature: Optional[float] = 0, # when llm_flag=True, need to set
|
36
|
-
|
38
|
+
api_type: Optional[str] = None, # when llm_flag=True, need to set
|
39
|
+
api_version: Optional[str] = None, # when llm_flag=True, need to set
|
40
|
+
llm: Optional[BaseLanguageModel] = None,
|
41
|
+
system_message: Optional[
|
42
|
+
str] = DEFAULT_SYSTEM_MESSAGE, # agent system message, llm or group chat manage will use # noqa
|
37
43
|
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
|
38
44
|
**kwargs,
|
39
45
|
):
|
40
|
-
is_termination_msg = (
|
41
|
-
|
42
|
-
)
|
46
|
+
is_termination_msg = (is_termination_msg if is_termination_msg is not None else
|
47
|
+
(lambda x: x.get('content') == 'TERMINATE'))
|
43
48
|
if openai_proxy:
|
44
49
|
openai.proxy = {'https': openai_proxy, 'http': openai_proxy}
|
50
|
+
else:
|
51
|
+
openai.proxy = None
|
45
52
|
if openai_api_base:
|
46
53
|
openai.api_base = openai_api_base
|
54
|
+
else:
|
55
|
+
openai.api_base = os.environ.get('OPENAI_API_BASE', 'https://api.openai.com/v1')
|
47
56
|
|
48
57
|
config_list = [
|
49
58
|
{
|
50
59
|
'model': model_name,
|
51
60
|
'api_key': openai_api_key,
|
61
|
+
'api_base': openai_api_base,
|
62
|
+
'api_type': api_type,
|
63
|
+
'api_version': api_version,
|
52
64
|
},
|
53
65
|
]
|
54
66
|
llm_config = {
|
@@ -61,9 +73,10 @@ Reply "TERMINATE" in the end when everything is done.
|
|
61
73
|
super().__init__(
|
62
74
|
name,
|
63
75
|
llm_config=llm_config,
|
76
|
+
llm=llm,
|
64
77
|
system_message=system_message,
|
65
78
|
is_termination_msg=is_termination_msg,
|
66
79
|
max_consecutive_auto_reply=None,
|
67
|
-
human_input_mode=
|
80
|
+
human_input_mode='NEVER',
|
68
81
|
code_execution_config=False,
|
69
82
|
)
|
@@ -1,4 +1,3 @@
|
|
1
|
-
|
2
1
|
from typing import Any, Awaitable, Callable, Dict, List, Optional, Union
|
3
2
|
|
4
3
|
from autogen import Agent, ConversableAgent
|
@@ -15,14 +14,13 @@ class AutoGenCustomRole(ConversableAgent):
|
|
15
14
|
coroutine: Optional[Callable[..., Awaitable[str]]] = None,
|
16
15
|
**kwargs,
|
17
16
|
):
|
18
|
-
super().__init__(
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
)
|
17
|
+
super().__init__(name=name,
|
18
|
+
system_message=system_message,
|
19
|
+
human_input_mode='NEVER',
|
20
|
+
code_execution_config=False,
|
21
|
+
llm_config=False,
|
22
|
+
llm=None,
|
23
|
+
**kwargs)
|
26
24
|
self.func = func
|
27
25
|
self.coroutine = coroutine
|
28
26
|
self.register_reply(Agent, AutoGenCustomRole.generate_custom_reply)
|
@@ -1,9 +1,11 @@
|
|
1
1
|
"""Chain that runs an arbitrary python function."""
|
2
2
|
import logging
|
3
|
+
import os
|
3
4
|
from typing import List, Optional
|
4
5
|
|
5
6
|
import openai
|
6
7
|
from autogen import Agent, GroupChat, GroupChatManager
|
8
|
+
from langchain.base_language import BaseLanguageModel
|
7
9
|
|
8
10
|
from .user import AutoGenUser
|
9
11
|
|
@@ -13,6 +15,7 @@ logger = logging.getLogger(__name__)
|
|
13
15
|
class AutoGenGroupChatManager(GroupChatManager):
|
14
16
|
"""A chat manager agent that can manage a group chat of multiple agents.
|
15
17
|
"""
|
18
|
+
|
16
19
|
def __init__(
|
17
20
|
self,
|
18
21
|
agents: List[Agent],
|
@@ -22,7 +25,10 @@ class AutoGenGroupChatManager(GroupChatManager):
|
|
22
25
|
openai_api_base: Optional[str] = '',
|
23
26
|
openai_proxy: Optional[str] = '',
|
24
27
|
temperature: Optional[float] = 0,
|
28
|
+
api_type: Optional[str] = None, # when llm_flag=True, need to set
|
29
|
+
api_version: Optional[str] = None, # when llm_flag=True, need to set
|
25
30
|
name: Optional[str] = 'chat_manager',
|
31
|
+
llm: Optional[BaseLanguageModel] = None,
|
26
32
|
system_message: Optional[str] = 'Group chat manager.',
|
27
33
|
**kwargs,
|
28
34
|
):
|
@@ -33,13 +39,20 @@ class AutoGenGroupChatManager(GroupChatManager):
|
|
33
39
|
|
34
40
|
if openai_proxy:
|
35
41
|
openai.proxy = {'https': openai_proxy, 'http': openai_proxy}
|
42
|
+
else:
|
43
|
+
openai.proxy = None
|
36
44
|
if openai_api_base:
|
37
45
|
openai.api_base = openai_api_base
|
46
|
+
else:
|
47
|
+
openai.api_base = os.environ.get('OPENAI_API_BASE', 'https://api.openai.com/v1')
|
38
48
|
|
39
49
|
config_list = [
|
40
50
|
{
|
41
51
|
'model': model_name,
|
42
52
|
'api_key': openai_api_key,
|
53
|
+
'api_base': openai_api_base,
|
54
|
+
'api_type': api_type,
|
55
|
+
'api_version': api_version,
|
43
56
|
},
|
44
57
|
]
|
45
58
|
llm_config = {
|
@@ -52,6 +65,7 @@ class AutoGenGroupChatManager(GroupChatManager):
|
|
52
65
|
super().__init__(
|
53
66
|
groupchat=groupchat,
|
54
67
|
llm_config=llm_config,
|
68
|
+
llm=llm,
|
55
69
|
name=name,
|
56
70
|
system_message=system_message,
|
57
71
|
)
|
@@ -1,9 +1,11 @@
|
|
1
1
|
"""Chain that runs an arbitrary python function."""
|
2
2
|
import logging
|
3
|
+
import os
|
3
4
|
from typing import Callable, Dict, Optional
|
4
5
|
|
5
6
|
import openai
|
6
7
|
from autogen import UserProxyAgent
|
8
|
+
from langchain.base_language import BaseLanguageModel
|
7
9
|
|
8
10
|
logger = logging.getLogger(__name__)
|
9
11
|
|
@@ -25,6 +27,9 @@ class AutoGenUserProxyAgent(UserProxyAgent):
|
|
25
27
|
openai_api_base: Optional[str] = '', # when llm_flag=True, need to set
|
26
28
|
openai_proxy: Optional[str] = '', # when llm_flag=True, need to set
|
27
29
|
temperature: Optional[float] = 0, # when llm_flag=True, need to set
|
30
|
+
api_type: Optional[str] = None, # when llm_flag=True, need to set
|
31
|
+
api_version: Optional[str] = None, # when llm_flag=True, need to set
|
32
|
+
llm: Optional[BaseLanguageModel] = None,
|
28
33
|
system_message: Optional[
|
29
34
|
str] = '', # agent system message, llm or group chat manage will use
|
30
35
|
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
|
@@ -44,12 +49,19 @@ class AutoGenUserProxyAgent(UserProxyAgent):
|
|
44
49
|
if llm_flag:
|
45
50
|
if openai_proxy:
|
46
51
|
openai.proxy = {'https': openai_proxy, 'http': openai_proxy}
|
52
|
+
else:
|
53
|
+
openai.proxy = None
|
47
54
|
if openai_api_base:
|
48
55
|
openai.api_base = openai_api_base
|
56
|
+
else:
|
57
|
+
openai.api_base = os.environ.get('OPENAI_API_BASE', 'https://api.openai.com/v1')
|
49
58
|
config_list = [
|
50
59
|
{
|
51
60
|
'model': model_name,
|
52
61
|
'api_key': openai_api_key,
|
62
|
+
'api_base': openai_api_base,
|
63
|
+
'api_type': api_type,
|
64
|
+
'api_version': api_version,
|
53
65
|
},
|
54
66
|
]
|
55
67
|
llm_config = {
|
@@ -68,6 +80,7 @@ class AutoGenUserProxyAgent(UserProxyAgent):
|
|
68
80
|
function_map=function_map,
|
69
81
|
code_execution_config=code_execution_config,
|
70
82
|
llm_config=llm_config,
|
83
|
+
llm=llm,
|
71
84
|
system_message=system_message)
|
72
85
|
|
73
86
|
|
@@ -96,6 +109,7 @@ class AutoGenUser(UserProxyAgent):
|
|
96
109
|
human_input_mode=human_input_mode,
|
97
110
|
code_execution_config=code_execution_config,
|
98
111
|
llm_config=llm_config,
|
112
|
+
llm=None,
|
99
113
|
system_message=system_message)
|
100
114
|
|
101
115
|
|
@@ -126,4 +140,5 @@ class AutoGenCoder(UserProxyAgent):
|
|
126
140
|
function_map=function_map,
|
127
141
|
code_execution_config=code_execution_config,
|
128
142
|
llm_config=llm_config,
|
143
|
+
llm=None,
|
129
144
|
system_message=system_message)
|
@@ -1,5 +1,6 @@
|
|
1
1
|
from bisheng_langchain.chains.autogen.auto_gen import AutoGenChain
|
2
2
|
from bisheng_langchain.chains.combine_documents.stuff import StuffDocumentsChain
|
3
|
+
from bisheng_langchain.chains.conversational_retrieval.base import ConversationalRetrievalChain
|
3
4
|
from bisheng_langchain.chains.retrieval.retrieval_chain import RetrievalChain
|
4
5
|
from bisheng_langchain.chains.router.multi_rule import MultiRuleChain
|
5
6
|
from bisheng_langchain.chains.router.rule_router import RuleBasedRouter
|
@@ -7,6 +8,6 @@ from bisheng_langchain.chains.router.rule_router import RuleBasedRouter
|
|
7
8
|
from .loader_output import LoaderOutputChain
|
8
9
|
|
9
10
|
__all__ = [
|
10
|
-
'StuffDocumentsChain', 'LoaderOutputChain', 'AutoGenChain', 'RuleBasedRouter',
|
11
|
-
'RetrievalChain'
|
11
|
+
'StuffDocumentsChain', 'LoaderOutputChain', 'AutoGenChain', 'RuleBasedRouter',
|
12
|
+
'MultiRuleChain', 'RetrievalChain', 'ConversationalRetrievalChain'
|
12
13
|
]
|
File without changes
|
@@ -0,0 +1,115 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
import inspect
|
4
|
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
5
|
+
|
6
|
+
from langchain.callbacks.manager import AsyncCallbackManagerForChainRun, CallbackManagerForChainRun
|
7
|
+
from langchain.chains.conversational_retrieval.base import \
|
8
|
+
ConversationalRetrievalChain as BaseConversationalRetrievalChain
|
9
|
+
from langchain_core.messages import BaseMessage
|
10
|
+
|
11
|
+
# Depending on the memory type and configuration, the chat history format may differ.
|
12
|
+
# This needs to be consolidated.
|
13
|
+
CHAT_TURN_TYPE = Union[Tuple[str, str], BaseMessage]
|
14
|
+
|
15
|
+
_ROLE_MAP = {'human': 'Human: ', 'ai': 'Assistant: '}
|
16
|
+
|
17
|
+
|
18
|
+
def _get_chat_history(chat_history: List[CHAT_TURN_TYPE]) -> str:
|
19
|
+
buffer = ''
|
20
|
+
for dialogue_turn in chat_history:
|
21
|
+
if isinstance(dialogue_turn, BaseMessage):
|
22
|
+
role_prefix = _ROLE_MAP.get(dialogue_turn.type, f'{dialogue_turn.type}: ')
|
23
|
+
buffer += f'\n{role_prefix}{dialogue_turn.content}'
|
24
|
+
elif isinstance(dialogue_turn, tuple):
|
25
|
+
human = 'Human: ' + dialogue_turn[0]
|
26
|
+
ai = 'Assistant: ' + dialogue_turn[1]
|
27
|
+
buffer += '\n' + '\n'.join([human, ai])
|
28
|
+
else:
|
29
|
+
raise ValueError(f'Unsupported chat history format: {type(dialogue_turn)}.'
|
30
|
+
f' Full chat history: {chat_history} ')
|
31
|
+
return buffer
|
32
|
+
|
33
|
+
|
34
|
+
class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
|
35
|
+
"""ConversationalRetrievalChain is a chain you can use to have a conversation with a character from a series."""
|
36
|
+
|
37
|
+
def _call(
|
38
|
+
self,
|
39
|
+
inputs: Dict[str, Any],
|
40
|
+
run_manager: Optional[CallbackManagerForChainRun] = None,
|
41
|
+
) -> Dict[str, Any]:
|
42
|
+
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
|
43
|
+
question = inputs['question']
|
44
|
+
get_chat_history = self.get_chat_history or _get_chat_history
|
45
|
+
chat_history_str = get_chat_history(inputs['chat_history'])
|
46
|
+
|
47
|
+
if chat_history_str:
|
48
|
+
# callbacks = _run_manager.get_child()
|
49
|
+
new_question = self.question_generator.run(question=question,
|
50
|
+
chat_history=chat_history_str)
|
51
|
+
else:
|
52
|
+
new_question = question
|
53
|
+
accepts_run_manager = ('run_manager' in inspect.signature(self._get_docs).parameters)
|
54
|
+
if accepts_run_manager:
|
55
|
+
docs = self._get_docs(new_question, inputs, run_manager=_run_manager)
|
56
|
+
else:
|
57
|
+
docs = self._get_docs(new_question, inputs) # type: ignore[call-arg]
|
58
|
+
output: Dict[str, Any] = {}
|
59
|
+
if self.response_if_no_docs_found is not None and len(docs) == 0:
|
60
|
+
output[self.output_key] = self.response_if_no_docs_found
|
61
|
+
else:
|
62
|
+
new_inputs = inputs.copy()
|
63
|
+
if self.rephrase_question:
|
64
|
+
new_inputs['question'] = new_question
|
65
|
+
new_inputs['chat_history'] = chat_history_str
|
66
|
+
answer = self.combine_docs_chain.run(input_documents=docs,
|
67
|
+
callbacks=_run_manager.get_child(),
|
68
|
+
**new_inputs)
|
69
|
+
output[self.output_key] = answer
|
70
|
+
|
71
|
+
if self.return_source_documents:
|
72
|
+
output['source_documents'] = docs
|
73
|
+
if self.return_generated_question:
|
74
|
+
output['generated_question'] = new_question
|
75
|
+
return output
|
76
|
+
|
77
|
+
async def _acall(
|
78
|
+
self,
|
79
|
+
inputs: Dict[str, Any],
|
80
|
+
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
|
81
|
+
) -> Dict[str, Any]:
|
82
|
+
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
|
83
|
+
question = inputs['question']
|
84
|
+
get_chat_history = self.get_chat_history or _get_chat_history
|
85
|
+
chat_history_str = get_chat_history(inputs['chat_history'])
|
86
|
+
if chat_history_str:
|
87
|
+
# callbacks = _run_manager.get_child()
|
88
|
+
new_question = await self.question_generator.arun(question=question,
|
89
|
+
chat_history=chat_history_str)
|
90
|
+
else:
|
91
|
+
new_question = question
|
92
|
+
accepts_run_manager = ('run_manager' in inspect.signature(self._aget_docs).parameters)
|
93
|
+
if accepts_run_manager:
|
94
|
+
docs = await self._aget_docs(new_question, inputs, run_manager=_run_manager)
|
95
|
+
else:
|
96
|
+
docs = await self._aget_docs(new_question, inputs) # type: ignore[call-arg]
|
97
|
+
|
98
|
+
output: Dict[str, Any] = {}
|
99
|
+
if self.response_if_no_docs_found is not None and len(docs) == 0:
|
100
|
+
output[self.output_key] = self.response_if_no_docs_found
|
101
|
+
else:
|
102
|
+
new_inputs = inputs.copy()
|
103
|
+
if self.rephrase_question:
|
104
|
+
new_inputs['question'] = new_question
|
105
|
+
new_inputs['chat_history'] = chat_history_str
|
106
|
+
answer = await self.combine_docs_chain.arun(input_documents=docs,
|
107
|
+
callbacks=_run_manager.get_child(),
|
108
|
+
**new_inputs)
|
109
|
+
output[self.output_key] = answer
|
110
|
+
|
111
|
+
if self.return_source_documents:
|
112
|
+
output['source_documents'] = docs
|
113
|
+
if self.return_generated_question:
|
114
|
+
output['generated_question'] = new_question
|
115
|
+
return output
|
@@ -1,12 +1,14 @@
|
|
1
|
-
from .host_llm import CustomLLMChat, HostBaichuanChat, HostChatGLM, HostLlama2Chat, HostQwenChat
|
1
|
+
from .host_llm import CustomLLMChat, HostBaichuanChat, HostChatGLM, HostLlama2Chat, HostQwenChat, HostYuanChat, HostYiChat
|
2
2
|
from .minimax import ChatMinimaxAI
|
3
3
|
from .proxy_llm import ProxyChatLLM
|
4
4
|
from .qwen import ChatQWen
|
5
5
|
from .wenxin import ChatWenxin
|
6
6
|
from .xunfeiai import ChatXunfeiAI
|
7
7
|
from .zhipuai import ChatZhipuAI
|
8
|
+
from .sensetime import SenseChat
|
8
9
|
|
9
10
|
__all__ = [
|
10
11
|
'ProxyChatLLM', 'ChatMinimaxAI', 'ChatWenxin', 'ChatZhipuAI', 'ChatXunfeiAI', 'HostChatGLM',
|
11
|
-
'HostBaichuanChat', 'HostLlama2Chat', 'HostQwenChat', 'CustomLLMChat', 'ChatQWen'
|
12
|
+
'HostBaichuanChat', 'HostLlama2Chat', 'HostQwenChat', 'CustomLLMChat', 'ChatQWen', 'SenseChat',
|
13
|
+
'HostYuanChat', 'HostYiChat'
|
12
14
|
]
|
@@ -535,3 +535,31 @@ class CustomLLMChat(BaseHostChatLLM):
|
|
535
535
|
def _llm_type(self) -> str:
|
536
536
|
"""Return type of chat model."""
|
537
537
|
return 'custom_llm_chat'
|
538
|
+
|
539
|
+
class HostYuanChat(BaseHostChatLLM):
|
540
|
+
# use custom llm chat api, api should compatiable with openai definition
|
541
|
+
model_name: str = Field('Yuan2-2B-Janus-hf', alias='model')
|
542
|
+
|
543
|
+
temperature: float = 1
|
544
|
+
top_p: float = 0.9
|
545
|
+
max_tokens: int = 4096
|
546
|
+
host_base_url: str
|
547
|
+
|
548
|
+
@property
|
549
|
+
def _llm_type(self) -> str:
|
550
|
+
"""Return type of chat model."""
|
551
|
+
return 'yuan2'
|
552
|
+
|
553
|
+
class HostYiChat(BaseHostChatLLM):
|
554
|
+
# use custom llm chat api, api should compatiable with openai definition
|
555
|
+
model_name: str = Field('Yi-34B-Chat', alias='model')
|
556
|
+
|
557
|
+
temperature: float = 0.6
|
558
|
+
top_p: float = 0.8
|
559
|
+
max_tokens: int = 4096
|
560
|
+
host_base_url: str
|
561
|
+
|
562
|
+
@property
|
563
|
+
def _llm_type(self) -> str:
|
564
|
+
"""Return type of chat model."""
|
565
|
+
return 'yi_chat'
|
@@ -0,0 +1,437 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
import json
|
4
|
+
import logging
|
5
|
+
import time
|
6
|
+
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
|
7
|
+
|
8
|
+
import jwt
|
9
|
+
from bisheng_langchain.utils.requests import Requests
|
10
|
+
from langchain.callbacks.manager import AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun
|
11
|
+
from langchain.chat_models.base import BaseChatModel
|
12
|
+
from langchain.schema import ChatGeneration, ChatResult
|
13
|
+
from langchain.schema.messages import (AIMessage, BaseMessage, ChatMessage, FunctionMessage,
|
14
|
+
HumanMessage, SystemMessage)
|
15
|
+
from langchain.utils import get_from_dict_or_env
|
16
|
+
from langchain_core.pydantic_v1 import Field, root_validator
|
17
|
+
from tenacity import (before_sleep_log, retry, retry_if_exception_type, stop_after_attempt,
|
18
|
+
wait_exponential)
|
19
|
+
|
20
|
+
# if TYPE_CHECKING:
|
21
|
+
# import jwt
|
22
|
+
|
23
|
+
logger = logging.getLogger(__name__)
|
24
|
+
|
25
|
+
|
26
|
+
def _import_pyjwt() -> Any:
|
27
|
+
try:
|
28
|
+
import jwt
|
29
|
+
except ImportError:
|
30
|
+
raise ValueError('Could not import jwt python package. '
|
31
|
+
'This is needed in order to calculate get_token_ids. '
|
32
|
+
'Please install it with `pip install PyJWT`.')
|
33
|
+
return jwt
|
34
|
+
|
35
|
+
|
36
|
+
def encode_jwt_token(ak, sk):
|
37
|
+
headers = {'alg': 'HS256', 'typ': 'JWT'}
|
38
|
+
payload = {
|
39
|
+
'iss': ak,
|
40
|
+
'exp': int(time.time()) + 18000, # 填写您期望的有效时间,此处示例代表当前时间+300分钟
|
41
|
+
'nbf': int(time.time()) - 500 # 填写您期望的生效时间,此处示例代表当前时间-500秒
|
42
|
+
}
|
43
|
+
token = jwt.encode(payload, sk, headers=headers)
|
44
|
+
return token
|
45
|
+
|
46
|
+
|
47
|
+
def _create_retry_decorator(llm):
|
48
|
+
|
49
|
+
min_seconds = 1
|
50
|
+
max_seconds = 20
|
51
|
+
# Wait 2^x * 1 second between each retry starting with
|
52
|
+
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
|
53
|
+
return retry(
|
54
|
+
reraise=True,
|
55
|
+
stop=stop_after_attempt(llm.max_retries),
|
56
|
+
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
|
57
|
+
retry=(retry_if_exception_type(Exception)),
|
58
|
+
before_sleep=before_sleep_log(logger, logging.WARNING),
|
59
|
+
)
|
60
|
+
|
61
|
+
|
62
|
+
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
|
63
|
+
role = _dict['role']
|
64
|
+
if role == 'user':
|
65
|
+
return HumanMessage(content=_dict['content'])
|
66
|
+
elif role == 'assistant':
|
67
|
+
content = _dict['content'] or '' # OpenAI returns None for tool invocations
|
68
|
+
|
69
|
+
if _dict.get('function_call'):
|
70
|
+
additional_kwargs = {'function_call': dict(_dict['function_call'])}
|
71
|
+
else:
|
72
|
+
additional_kwargs = {}
|
73
|
+
return AIMessage(content=content, additional_kwargs=additional_kwargs)
|
74
|
+
elif role == 'system':
|
75
|
+
return SystemMessage(content=_dict['content'])
|
76
|
+
elif role == 'function':
|
77
|
+
return FunctionMessage(content=_dict['content'], name=_dict['name'])
|
78
|
+
else:
|
79
|
+
return ChatMessage(content=_dict['content'], role=role)
|
80
|
+
|
81
|
+
|
82
|
+
def _convert_message_to_dict(message: BaseMessage) -> dict:
|
83
|
+
if isinstance(message, ChatMessage):
|
84
|
+
message_dict = {'role': message.role, 'content': message.content}
|
85
|
+
elif isinstance(message, HumanMessage):
|
86
|
+
message_dict = {'role': 'user', 'content': message.content}
|
87
|
+
elif isinstance(message, AIMessage):
|
88
|
+
message_dict = {'role': 'assistant', 'content': message.content}
|
89
|
+
elif isinstance(message, SystemMessage):
|
90
|
+
message_dict = {'role': 'system', 'content': message.content}
|
91
|
+
# raise ValueError(f"not support system role {message}")
|
92
|
+
|
93
|
+
elif isinstance(message, FunctionMessage):
|
94
|
+
raise ValueError(f'not support funciton {message}')
|
95
|
+
else:
|
96
|
+
raise ValueError(f'Got unknown type {message}')
|
97
|
+
|
98
|
+
# if "name" in message.additional_kwargs:
|
99
|
+
# message_dict["name"] = message.additional_kwargs["name"]
|
100
|
+
return message_dict
|
101
|
+
|
102
|
+
|
103
|
+
def _convert_message_to_dict2(message: BaseMessage) -> List[dict]:
|
104
|
+
if isinstance(message, ChatMessage):
|
105
|
+
message_dict = {'role': message.role, 'content': message.content}
|
106
|
+
elif isinstance(message, HumanMessage):
|
107
|
+
message_dict = {'role': 'user', 'content': message.content}
|
108
|
+
elif isinstance(message, AIMessage):
|
109
|
+
message_dict = {'role': 'assistant', 'content': message.content}
|
110
|
+
elif isinstance(message, SystemMessage):
|
111
|
+
raise ValueError(f'not support system role {message}')
|
112
|
+
|
113
|
+
elif isinstance(message, FunctionMessage):
|
114
|
+
raise ValueError(f'not support funciton {message}')
|
115
|
+
else:
|
116
|
+
raise ValueError(f'Got unknown type {message}')
|
117
|
+
|
118
|
+
return [message_dict]
|
119
|
+
|
120
|
+
|
121
|
+
url = 'https://api.sensenova.cn/v1/llm/chat-completions'
|
122
|
+
|
123
|
+
|
124
|
+
class SenseChat(BaseChatModel):
|
125
|
+
|
126
|
+
client: Optional[Any] #: :meta private:
|
127
|
+
model_name: str = Field(default='SenseChat', alias='model')
|
128
|
+
"""Model name to use."""
|
129
|
+
temperature: float = 0.8
|
130
|
+
top_p: float = 0.7
|
131
|
+
"""What sampling temperature to use."""
|
132
|
+
model_kwargs: Optional[Dict[str, Any]] = Field(default_factory=dict)
|
133
|
+
"""Holds any model parameters valid for `create` call not explicitly specified."""
|
134
|
+
access_key_id: Optional[str] = None
|
135
|
+
secret_access_key: Optional[str] = None
|
136
|
+
|
137
|
+
repetition_penalty: float = 1.05
|
138
|
+
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
|
139
|
+
"""Timeout for requests to OpenAI completion API. Default is 600 seconds."""
|
140
|
+
max_retries: Optional[int] = 6
|
141
|
+
"""Maximum number of retries to make when generating."""
|
142
|
+
streaming: Optional[bool] = False
|
143
|
+
"""Whether to stream the results or not."""
|
144
|
+
n: Optional[int] = 1
|
145
|
+
"""Number of chat completions to generate for each prompt."""
|
146
|
+
max_tokens: Optional[int] = 1024
|
147
|
+
"""Maximum number of tokens to generate."""
|
148
|
+
tiktoken_model_name: Optional[str] = None
|
149
|
+
"""The model name to pass to tiktoken when using this class.
|
150
|
+
Tiktoken is used to count the number of tokens in documents to constrain
|
151
|
+
them to be under a certain limit. By default, when set to None, this will
|
152
|
+
be the same as the embedding model name. However, there are some cases
|
153
|
+
where you may want to use this Embedding class with a model name not
|
154
|
+
supported by tiktoken. This can include when using Azure embeddings or
|
155
|
+
when using one of the many model providers that expose an OpenAI-like
|
156
|
+
API but with different models. In those cases, in order to avoid erroring
|
157
|
+
when tiktoken is called, you can specify a model name to use here."""
|
158
|
+
verbose: Optional[bool] = False
|
159
|
+
|
160
|
+
class Config:
|
161
|
+
"""Configuration for this pydantic object."""
|
162
|
+
|
163
|
+
allow_population_by_field_name = True
|
164
|
+
|
165
|
+
@root_validator()
|
166
|
+
def validate_environment(cls, values: Dict) -> Dict:
|
167
|
+
"""Validate that api key and python package exists in environment."""
|
168
|
+
|
169
|
+
_import_pyjwt()
|
170
|
+
|
171
|
+
values['access_key_id'] = get_from_dict_or_env(values, 'access_key_id', 'ACCESS_KEY_ID')
|
172
|
+
values['secret_access_key'] = get_from_dict_or_env(values, 'secret_access_key',
|
173
|
+
'SECRET_ACCESS_KEY')
|
174
|
+
token = encode_jwt_token(values['access_key_id'], values['secret_access_key'])
|
175
|
+
if isinstance(token, bytes):
|
176
|
+
token = token.decode('utf-8')
|
177
|
+
|
178
|
+
try:
|
179
|
+
header = {
|
180
|
+
'Authorization': 'Bearer {}'.format(token),
|
181
|
+
'Content-Type': 'application/json'
|
182
|
+
}
|
183
|
+
|
184
|
+
values['client'] = Requests(headers=header, )
|
185
|
+
except AttributeError:
|
186
|
+
raise ValueError('Try upgrading it with `pip install --upgrade requests`.')
|
187
|
+
return values
|
188
|
+
|
189
|
+
@property
|
190
|
+
def _default_params(self) -> Dict[str, Any]:
|
191
|
+
"""Get the default parameters for calling ZhipuAI API."""
|
192
|
+
return {
|
193
|
+
'model': self.model_name,
|
194
|
+
'temperature': self.temperature,
|
195
|
+
'top_p': self.top_p,
|
196
|
+
'max_tokens': self.max_tokens,
|
197
|
+
**self.model_kwargs,
|
198
|
+
}
|
199
|
+
|
200
|
+
def completion_with_retry(self, **kwargs: Any) -> Any:
|
201
|
+
retry_decorator = _create_retry_decorator(self)
|
202
|
+
|
203
|
+
@retry_decorator
|
204
|
+
def _completion_with_retry(**kwargs: Any) -> Any:
|
205
|
+
messages = kwargs.get('messages')
|
206
|
+
temperature = kwargs.get('temperature')
|
207
|
+
top_p = kwargs.get('top_p')
|
208
|
+
# messages
|
209
|
+
params = {
|
210
|
+
'messages': messages,
|
211
|
+
'model': self.model_name,
|
212
|
+
'top_p': top_p,
|
213
|
+
'temperature': temperature,
|
214
|
+
'repetition_penalty': self.repetition_penalty,
|
215
|
+
'n': self.n,
|
216
|
+
'max_new_tokens': self.max_tokens,
|
217
|
+
'stream': False # self.streaming
|
218
|
+
}
|
219
|
+
|
220
|
+
token = encode_jwt_token(self.access_key_id, self.secret_access_key)
|
221
|
+
if isinstance(token, bytes):
|
222
|
+
token = token.decode('utf-8')
|
223
|
+
self.client.headers.update({'Authorization': 'Bearer {}'.format(token)})
|
224
|
+
|
225
|
+
response = self.client.post(url=url, json=params).json()
|
226
|
+
return response
|
227
|
+
|
228
|
+
rsp_dict = _completion_with_retry(**kwargs)
|
229
|
+
if 'error' in rsp_dict:
|
230
|
+
logger.error(f'sensechat_error resp={rsp_dict}')
|
231
|
+
message = rsp_dict['error']['message']
|
232
|
+
raise Exception(message)
|
233
|
+
else:
|
234
|
+
# return rsp_dict['data'], rsp_dict.get('usage', '')
|
235
|
+
return rsp_dict, rsp_dict.get('usage', '')
|
236
|
+
|
237
|
+
async def acompletion_with_retry(self, **kwargs: Any) -> Any:
|
238
|
+
"""Use tenacity to retry the async completion call."""
|
239
|
+
retry_decorator = _create_retry_decorator(self)
|
240
|
+
|
241
|
+
if self.streaming:
|
242
|
+
self.client.headers.update({'Accept': 'text/event-stream'})
|
243
|
+
else:
|
244
|
+
self.client.headers.pop('Accept', '')
|
245
|
+
|
246
|
+
@retry_decorator
|
247
|
+
async def _acompletion_with_retry(**kwargs: Any) -> Any:
|
248
|
+
messages = kwargs.pop('messages', '')
|
249
|
+
|
250
|
+
inp = {
|
251
|
+
'messages': messages,
|
252
|
+
'model': self.model_name,
|
253
|
+
'top_p': self.top_p,
|
254
|
+
'temperature': self.temperature,
|
255
|
+
'repetition_penalty': self.repetition_penalty,
|
256
|
+
'n': self.n,
|
257
|
+
'max_new_tokens': self.max_tokens,
|
258
|
+
'stream': True
|
259
|
+
}
|
260
|
+
|
261
|
+
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
|
262
|
+
async with self.client.apost(url=url, json=inp) as response:
|
263
|
+
|
264
|
+
async for line in response.content.iter_any():
|
265
|
+
|
266
|
+
if b'\n' in line:
|
267
|
+
for txt_ in line.split(b'\n'):
|
268
|
+
yield txt_.decode('utf-8').strip()
|
269
|
+
else:
|
270
|
+
yield line.decode('utf-8').strip()
|
271
|
+
|
272
|
+
async for response in _acompletion_with_retry(**kwargs):
|
273
|
+
is_error = False
|
274
|
+
if response:
|
275
|
+
if response.startswith('event:error'):
|
276
|
+
is_error = True
|
277
|
+
elif response.startswith('data:'):
|
278
|
+
yield (is_error, response[len('data:'):])
|
279
|
+
if is_error:
|
280
|
+
break
|
281
|
+
elif response.startswith('{'):
|
282
|
+
yield (is_error, response)
|
283
|
+
else:
|
284
|
+
continue
|
285
|
+
|
286
|
+
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
|
287
|
+
overall_token_usage: dict = {}
|
288
|
+
for output in llm_outputs:
|
289
|
+
if output is None:
|
290
|
+
# Happens in streaming
|
291
|
+
continue
|
292
|
+
token_usage = output['token_usage']
|
293
|
+
for k, v in token_usage.items():
|
294
|
+
if k in overall_token_usage:
|
295
|
+
overall_token_usage[k] += v
|
296
|
+
else:
|
297
|
+
overall_token_usage[k] = v
|
298
|
+
return {'token_usage': overall_token_usage, 'model_name': self.model_name}
|
299
|
+
|
300
|
+
def _generate(
|
301
|
+
self,
|
302
|
+
messages: List[BaseMessage],
|
303
|
+
stop: Optional[List[str]] = None,
|
304
|
+
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
305
|
+
**kwargs: Any,
|
306
|
+
) -> ChatResult:
|
307
|
+
|
308
|
+
message_dicts, params = self._create_message_dicts(messages, stop)
|
309
|
+
params = {**params, **kwargs}
|
310
|
+
response, usage = self.completion_with_retry(messages=message_dicts, **params)
|
311
|
+
|
312
|
+
return self._create_chat_result(response)
|
313
|
+
|
314
|
+
async def _agenerate(
|
315
|
+
self,
|
316
|
+
messages: List[BaseMessage],
|
317
|
+
stop: Optional[List[str]] = None,
|
318
|
+
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
319
|
+
**kwargs: Any,
|
320
|
+
) -> ChatResult:
|
321
|
+
message_dicts, params = self._create_message_dicts(messages, stop)
|
322
|
+
params = {**params, **kwargs}
|
323
|
+
if self.streaming:
|
324
|
+
|
325
|
+
inner_completion = ''
|
326
|
+
role = 'user'
|
327
|
+
params['stream'] = True
|
328
|
+
function_call: Optional[dict] = None
|
329
|
+
async for is_error, stream_resp in self.acompletion_with_retry(messages=message_dicts,
|
330
|
+
**params):
|
331
|
+
if str(stream_resp).startswith('[DONE]'):
|
332
|
+
continue
|
333
|
+
output = json.loads(stream_resp)
|
334
|
+
if is_error:
|
335
|
+
logger.error(stream_resp)
|
336
|
+
raise ValueError(stream_resp)
|
337
|
+
if 'data' in output:
|
338
|
+
output = output['data']
|
339
|
+
|
340
|
+
choices = None
|
341
|
+
if 'choices' in output:
|
342
|
+
choices = output.get('choices')
|
343
|
+
|
344
|
+
if choices:
|
345
|
+
for choice in choices:
|
346
|
+
token = choice['delta']
|
347
|
+
|
348
|
+
inner_completion += token or ''
|
349
|
+
_function_call = ''
|
350
|
+
if run_manager:
|
351
|
+
await run_manager.on_llm_new_token(token)
|
352
|
+
if _function_call:
|
353
|
+
if function_call is None:
|
354
|
+
function_call = _function_call
|
355
|
+
else:
|
356
|
+
function_call['arguments'] += _function_call['arguments']
|
357
|
+
message = _convert_dict_to_message({
|
358
|
+
'content': inner_completion,
|
359
|
+
'role': role,
|
360
|
+
'function_call': function_call,
|
361
|
+
})
|
362
|
+
return ChatResult(generations=[ChatGeneration(message=message)])
|
363
|
+
else:
|
364
|
+
return self._generate(messages, stop, run_manager, **kwargs)
|
365
|
+
|
366
|
+
def _create_message_dicts(
|
367
|
+
self, messages: List[BaseMessage],
|
368
|
+
stop: Optional[List[str]]) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
|
369
|
+
params = dict(self._client_params)
|
370
|
+
if stop is not None:
|
371
|
+
if 'stop' in params:
|
372
|
+
raise ValueError('`stop` found in both the input and default params.')
|
373
|
+
params['stop'] = stop
|
374
|
+
|
375
|
+
system_content = ''
|
376
|
+
message_dicts = []
|
377
|
+
for m in messages:
|
378
|
+
if m.type == 'system':
|
379
|
+
system_content += m.content
|
380
|
+
continue
|
381
|
+
message_dicts.extend(_convert_message_to_dict2(m))
|
382
|
+
|
383
|
+
if system_content:
|
384
|
+
message_dicts[-1]['content'] = system_content + message_dicts[-1]['content']
|
385
|
+
|
386
|
+
return message_dicts, params
|
387
|
+
|
388
|
+
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
|
389
|
+
generations = []
|
390
|
+
|
391
|
+
def _norm_text(text):
|
392
|
+
if text[0] == '"' and text[-1] == '"':
|
393
|
+
out = eval(text)
|
394
|
+
else:
|
395
|
+
out = text
|
396
|
+
return out
|
397
|
+
|
398
|
+
for res in response['data']['choices']:
|
399
|
+
res['content'] = _norm_text(res['message'])
|
400
|
+
res['role'] = 'user'
|
401
|
+
message = _convert_dict_to_message(res)
|
402
|
+
gen = ChatGeneration(message=message)
|
403
|
+
generations.append(gen)
|
404
|
+
|
405
|
+
llm_output = {'token_usage': response['data']['usage'], 'model_name': self.model_name}
|
406
|
+
return ChatResult(generations=generations, llm_output=llm_output)
|
407
|
+
|
408
|
+
@property
|
409
|
+
def _identifying_params(self) -> Mapping[str, Any]:
|
410
|
+
"""Get the identifying parameters."""
|
411
|
+
return {**{'model_name': self.model_name}, **self._default_params}
|
412
|
+
|
413
|
+
@property
|
414
|
+
def _client_params(self) -> Mapping[str, Any]:
|
415
|
+
"""Get the parameters used for the openai client."""
|
416
|
+
zhipu_creds: Dict[str, Any] = {
|
417
|
+
'access_key_id': self.access_key_id,
|
418
|
+
'secret_access_key': self.secret_access_key,
|
419
|
+
'model': self.model_name,
|
420
|
+
}
|
421
|
+
return {**zhipu_creds, **self._default_params}
|
422
|
+
|
423
|
+
def _get_invocation_params(self,
|
424
|
+
stop: Optional[List[str]] = None,
|
425
|
+
**kwargs: Any) -> Dict[str, Any]:
|
426
|
+
"""Get the parameters used to invoke the model FOR THE CALLBACKS."""
|
427
|
+
return {
|
428
|
+
**super()._get_invocation_params(stop=stop, **kwargs),
|
429
|
+
**self._default_params,
|
430
|
+
'model': self.model_name,
|
431
|
+
'function': kwargs.get('functions'),
|
432
|
+
}
|
433
|
+
|
434
|
+
@property
|
435
|
+
def _llm_type(self) -> str:
|
436
|
+
"""Return type of chat model."""
|
437
|
+
return 'sense-chat'
|
@@ -63,7 +63,8 @@ class ElemUnstructuredLoader(BasePDFLoader):
|
|
63
63
|
unstructured_api_url: str = None,
|
64
64
|
start: int = 0,
|
65
65
|
n: int = None,
|
66
|
-
verbose: bool = False
|
66
|
+
verbose: bool = False,
|
67
|
+
kwargs: dict = {}) -> None:
|
67
68
|
"""Initialize with a file path."""
|
68
69
|
self.unstructured_api_url = unstructured_api_url
|
69
70
|
self.unstructured_api_key = unstructured_api_key
|
@@ -71,18 +72,18 @@ class ElemUnstructuredLoader(BasePDFLoader):
|
|
71
72
|
self.file_name = file_name
|
72
73
|
self.start = start
|
73
74
|
self.n = n
|
75
|
+
self.extra_kwargs = kwargs
|
74
76
|
super().__init__(file_path)
|
75
77
|
|
76
78
|
def load(self) -> List[Document]:
|
77
79
|
"""Load given path as pages."""
|
78
80
|
b64_data = base64.b64encode(open(self.file_path, 'rb').read()).decode()
|
81
|
+
parameters = {'start': self.start, 'n': self.n}
|
82
|
+
parameters.update(self.extra_kwargs)
|
79
83
|
payload = dict(filename=os.path.basename(self.file_name),
|
80
84
|
b64_data=[b64_data],
|
81
85
|
mode='partition',
|
82
|
-
parameters=
|
83
|
-
'start': self.start,
|
84
|
-
'n': self.n
|
85
|
-
})
|
86
|
+
parameters=parameters)
|
86
87
|
|
87
88
|
resp = requests.post(self.unstructured_api_url, headers=self.headers, json=payload).json()
|
88
89
|
|
@@ -112,18 +113,23 @@ class ElemUnstructuredLoaderV0(BasePDFLoader):
|
|
112
113
|
unstructured_api_url: str = None,
|
113
114
|
start: int = 0,
|
114
115
|
n: int = None,
|
115
|
-
verbose: bool = False
|
116
|
+
verbose: bool = False,
|
117
|
+
kwargs: dict = {}) -> None:
|
116
118
|
"""Initialize with a file path."""
|
117
119
|
self.unstructured_api_url = unstructured_api_url
|
118
120
|
self.unstructured_api_key = unstructured_api_key
|
121
|
+
self.start = start
|
122
|
+
self.n = n
|
119
123
|
self.headers = {'Content-Type': 'application/json'}
|
120
124
|
self.file_name = file_name
|
125
|
+
self.extra_kwargs = kwargs
|
121
126
|
super().__init__(file_path)
|
122
127
|
|
123
128
|
def load(self) -> List[Document]:
|
124
129
|
b64_data = base64.b64encode(open(self.file_path, 'rb').read()).decode()
|
125
130
|
payload = dict(filename=os.path.basename(self.file_name), b64_data=[b64_data], mode='text')
|
126
|
-
|
131
|
+
payload.update({'start': self.start, 'n': self.n})
|
132
|
+
payload.update(self.extra_kwargs)
|
127
133
|
resp = requests.post(self.unstructured_api_url, headers=self.headers, json=payload).json()
|
128
134
|
|
129
135
|
if 200 != resp.get('status_code'):
|
@@ -8,27 +8,30 @@ bisheng_langchain/agents/chatglm_functions_agent/prompt.py,sha256=OiBTRUOhvhSyO2
|
|
8
8
|
bisheng_langchain/agents/llm_functions_agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
9
9
|
bisheng_langchain/agents/llm_functions_agent/base.py,sha256=_bJNSYZJrY82Tcc0zwQLO4qyuTZbOkVDBA0hndGM12w,12311
|
10
10
|
bisheng_langchain/autogen_role/__init__.py,sha256=MnTGbAOK770JM9l95Qcxu93s2gNAmhlil7K9HdFG81o,430
|
11
|
-
bisheng_langchain/autogen_role/assistant.py,sha256=
|
12
|
-
bisheng_langchain/autogen_role/custom.py,sha256=
|
13
|
-
bisheng_langchain/autogen_role/groupchat_manager.py,sha256=
|
14
|
-
bisheng_langchain/autogen_role/user.py,sha256=
|
15
|
-
bisheng_langchain/chains/__init__.py,sha256=
|
11
|
+
bisheng_langchain/autogen_role/assistant.py,sha256=VGCoxJaRxRG6ZIJa2TsxcLZbMbF4KC8PRB76DOuznNU,4736
|
12
|
+
bisheng_langchain/autogen_role/custom.py,sha256=8xxtAzNF_N1fysyChynVD19t659Qvtcyj_LNiOrE7ew,2499
|
13
|
+
bisheng_langchain/autogen_role/groupchat_manager.py,sha256=O9XIove5yzyF_g3K5DnF-Fasdx0sUrRWMogYgEDYJAI,2314
|
14
|
+
bisheng_langchain/autogen_role/user.py,sha256=lISbJN5yFsUXHnDCUwr5t6R8O8K3dOMspH4l4_kITnE,5885
|
15
|
+
bisheng_langchain/chains/__init__.py,sha256=bZXTCzBbsaU9ks90SU5T2u2py006sArwKZJgCc8BNn8,679
|
16
16
|
bisheng_langchain/chains/loader_output.py,sha256=02ZercAFaudStTZ4t7mcVkGRj5pD78HZ6NO8HbmbDH8,1903
|
17
17
|
bisheng_langchain/chains/autogen/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
18
18
|
bisheng_langchain/chains/autogen/auto_gen.py,sha256=QIkfCO9-VN2wRkl3_TWVj-JkdL2dqMQNy93j3uB401s,3270
|
19
19
|
bisheng_langchain/chains/combine_documents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
20
20
|
bisheng_langchain/chains/combine_documents/stuff.py,sha256=z_E_wfhJrAYWcNVRPomPm5fGRDI3hqoC52wcMzgzxVA,2369
|
21
|
+
bisheng_langchain/chains/conversational_retrieval/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
22
|
+
bisheng_langchain/chains/conversational_retrieval/base.py,sha256=XiqBqov6No-wTVCou6qyMT5p2JQgoQI7OLQOYH8XUos,5313
|
21
23
|
bisheng_langchain/chains/question_answering/__init__.py,sha256=_gOZMc-SWprK6xc-Jj64jcr9nc-G4YkZbEYwfJNq_bY,8795
|
22
24
|
bisheng_langchain/chains/retrieval/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
23
25
|
bisheng_langchain/chains/retrieval/retrieval_chain.py,sha256=7VLJ-IPVjKfmAVgVET4cvKCO9DCMxwsGgVhW-wz5RZM,3050
|
24
26
|
bisheng_langchain/chains/router/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
25
27
|
bisheng_langchain/chains/router/multi_rule.py,sha256=BiFryj3-7rOxfttD-MyOkKWLCSGB9LVYd2rjOsIfQC8,375
|
26
28
|
bisheng_langchain/chains/router/rule_router.py,sha256=R2YRUnwn7s_7DbsSn27uPn4cIV0D-5iXEORXir0tNGM,1835
|
27
|
-
bisheng_langchain/chat_models/__init__.py,sha256=
|
28
|
-
bisheng_langchain/chat_models/host_llm.py,sha256=
|
29
|
+
bisheng_langchain/chat_models/__init__.py,sha256=7NuGJAUgeCF9yDqe7D3Yw69_5COlsExg811TSDErpps,599
|
30
|
+
bisheng_langchain/chat_models/host_llm.py,sha256=zbrWUf9Vvc9_8dlVLPoY7Cm0NL7WhE9DSd5F7xYhY2A,22420
|
29
31
|
bisheng_langchain/chat_models/minimax.py,sha256=JLs_f6vWD9beZYUtjD4FG28G8tZHrGUAWOwdLIuJomw,13901
|
30
32
|
bisheng_langchain/chat_models/proxy_llm.py,sha256=wzVBZik9WC3-f7kyQ1eu3Ooibqpcocln08knf5lV1Nw,17082
|
31
33
|
bisheng_langchain/chat_models/qwen.py,sha256=jGx_tW-LPxfegE6NvY6wID8ps2SsP813atjXnc04C-s,18841
|
34
|
+
bisheng_langchain/chat_models/sensetime.py,sha256=fuQ5yYGO5F7o7iQ7us17MlL4TAWRRFCCpNN9bAF-ydc,17056
|
32
35
|
bisheng_langchain/chat_models/wenxin.py,sha256=OBXmFWkUWZMu1lUz6hPAEawsbAcdgMWcm9WkJJLZyng,13671
|
33
36
|
bisheng_langchain/chat_models/xunfeiai.py,sha256=Yz09-I8u6XhGVnT5mdel15Z3CCQZqApJkgnaxyiZNFk,14037
|
34
37
|
bisheng_langchain/chat_models/zhipuai.py,sha256=KokWmDDwljsV2iFiRXZlylIaQRw4jDOq5aCnat53wnQ,14887
|
@@ -45,7 +48,7 @@ bisheng_langchain/document_loaders/custom_kv.py,sha256=sUKeK0e8-cCmKyj1FsR7SzBNW
|
|
45
48
|
bisheng_langchain/document_loaders/elem_html.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
46
49
|
bisheng_langchain/document_loaders/elem_image.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
47
50
|
bisheng_langchain/document_loaders/elem_pdf.py,sha256=K-TXILGNFLFjavhun_MFbUF4t2_WGA3Z-kbnr75lmW8,22243
|
48
|
-
bisheng_langchain/document_loaders/elem_unstrcutured_loader.py,sha256=
|
51
|
+
bisheng_langchain/document_loaders/elem_unstrcutured_loader.py,sha256=bJQObxHnk8FaF8RUBkqODzgeikrZ8wdl_TQPa2oEoQo,5169
|
49
52
|
bisheng_langchain/document_loaders/universal_kv.py,sha256=dJF_GQGKBMUjB_kX9CSp7xZRhXgwVuGPbMIzJwPh-C0,4063
|
50
53
|
bisheng_langchain/document_loaders/parsers/__init__.py,sha256=OOM_FJkwaU-zNS58fASw0TH8FNT6VXKb0VrvisgdrII,171
|
51
54
|
bisheng_langchain/document_loaders/parsers/ellm_client.py,sha256=B4Dea8xXXnGvB9j2OXv53HILNUmnWeNJz9ssNM-2fLM,1760
|
@@ -69,7 +72,7 @@ bisheng_langchain/vectorstores/__init__.py,sha256=zCZgDe7LyQ0iDkfcm5UJ5NxwKQSRHn
|
|
69
72
|
bisheng_langchain/vectorstores/elastic_keywords_search.py,sha256=gt_uw_fSMcEZWxbiA3V0RyA-utLOZlUY-qxdwnsfZks,12664
|
70
73
|
bisheng_langchain/vectorstores/milvus.py,sha256=44ZbDsIxdsbUnHOpEpCdrW5zvWnYvDdAVoDKjCFoyYI,34424
|
71
74
|
bisheng_langchain/vectorstores/retriever.py,sha256=hj4nAAl352EV_ANnU2OHJn7omCH3nBK82ydo14KqMH4,4353
|
72
|
-
bisheng_langchain-0.2.2.
|
73
|
-
bisheng_langchain-0.2.2.
|
74
|
-
bisheng_langchain-0.2.2.
|
75
|
-
bisheng_langchain-0.2.2.
|
75
|
+
bisheng_langchain-0.2.2.5.dist-info/METADATA,sha256=zPkSGdTgG56TrcN9xLWKEv02k_Wtg2WRqkCa1yoigtE,2299
|
76
|
+
bisheng_langchain-0.2.2.5.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
|
77
|
+
bisheng_langchain-0.2.2.5.dist-info/top_level.txt,sha256=Z6pPNyCo4ihyr9iqGQbH8sJiC4dAUwA_mAyGRQB5_Fs,18
|
78
|
+
bisheng_langchain-0.2.2.5.dist-info/RECORD,,
|
File without changes
|
File without changes
|