bisheng-langchain 0.2.3.2__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. bisheng_langchain/agents/llm_functions_agent/base.py +1 -1
  2. bisheng_langchain/chains/__init__.py +2 -1
  3. bisheng_langchain/chains/transform.py +85 -0
  4. bisheng_langchain/chat_models/host_llm.py +19 -5
  5. bisheng_langchain/chat_models/qwen.py +29 -8
  6. bisheng_langchain/document_loaders/custom_kv.py +1 -1
  7. bisheng_langchain/embeddings/host_embedding.py +9 -11
  8. bisheng_langchain/gpts/__init__.py +0 -0
  9. bisheng_langchain/gpts/agent_types/__init__.py +10 -0
  10. bisheng_langchain/gpts/agent_types/llm_functions_agent.py +220 -0
  11. bisheng_langchain/gpts/assistant.py +137 -0
  12. bisheng_langchain/gpts/auto_optimization.py +130 -0
  13. bisheng_langchain/gpts/auto_tool_selected.py +54 -0
  14. bisheng_langchain/gpts/load_tools.py +161 -0
  15. bisheng_langchain/gpts/message_types.py +11 -0
  16. bisheng_langchain/gpts/prompts/__init__.py +15 -0
  17. bisheng_langchain/gpts/prompts/assistant_prompt_opt.py +95 -0
  18. bisheng_langchain/gpts/prompts/base_prompt.py +1 -0
  19. bisheng_langchain/gpts/prompts/breif_description_prompt.py +104 -0
  20. bisheng_langchain/gpts/prompts/opening_dialog_prompt.py +118 -0
  21. bisheng_langchain/gpts/prompts/select_tools_prompt.py +29 -0
  22. bisheng_langchain/gpts/tools/__init__.py +0 -0
  23. bisheng_langchain/gpts/tools/api_tools/__init__.py +50 -0
  24. bisheng_langchain/gpts/tools/api_tools/base.py +90 -0
  25. bisheng_langchain/gpts/tools/api_tools/flow.py +59 -0
  26. bisheng_langchain/gpts/tools/api_tools/macro_data.py +397 -0
  27. bisheng_langchain/gpts/tools/api_tools/sina.py +221 -0
  28. bisheng_langchain/gpts/tools/api_tools/tianyancha.py +160 -0
  29. bisheng_langchain/gpts/tools/bing_search/__init__.py +0 -0
  30. bisheng_langchain/gpts/tools/bing_search/tool.py +55 -0
  31. bisheng_langchain/gpts/tools/calculator/__init__.py +0 -0
  32. bisheng_langchain/gpts/tools/calculator/tool.py +25 -0
  33. bisheng_langchain/gpts/tools/code_interpreter/__init__.py +0 -0
  34. bisheng_langchain/gpts/tools/code_interpreter/tool.py +261 -0
  35. bisheng_langchain/gpts/tools/dalle_image_generator/__init__.py +0 -0
  36. bisheng_langchain/gpts/tools/dalle_image_generator/tool.py +181 -0
  37. bisheng_langchain/gpts/tools/get_current_time/__init__.py +0 -0
  38. bisheng_langchain/gpts/tools/get_current_time/tool.py +23 -0
  39. bisheng_langchain/gpts/utils.py +197 -0
  40. bisheng_langchain/utils/requests.py +5 -1
  41. bisheng_langchain/vectorstores/milvus.py +1 -1
  42. {bisheng_langchain-0.2.3.2.dist-info → bisheng_langchain-0.3.0.dist-info}/METADATA +5 -2
  43. {bisheng_langchain-0.2.3.2.dist-info → bisheng_langchain-0.3.0.dist-info}/RECORD +45 -12
  44. {bisheng_langchain-0.2.3.2.dist-info → bisheng_langchain-0.3.0.dist-info}/WHEEL +0 -0
  45. {bisheng_langchain-0.2.3.2.dist-info → bisheng_langchain-0.3.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,137 @@
1
+ import asyncio
2
+ import logging
3
+ from enum import Enum
4
+ from functools import lru_cache
5
+ from typing import Any, Mapping, Optional, Sequence
6
+ from urllib.parse import urlparse
7
+
8
+ import httpx
9
+ import yaml
10
+ from bisheng_langchain.gpts.load_tools import get_all_tool_names, load_tools
11
+ from bisheng_langchain.gpts.utils import import_by_type, import_class
12
+ from langchain.tools import BaseTool
13
+ from langchain_core.language_models.base import LanguageModelLike
14
+ from langchain_core.messages import HumanMessage, AIMessage
15
+ from langchain_core.runnables import RunnableBinding
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ class ConfigurableAssistant(RunnableBinding):
21
+ agent_executor_type: str
22
+ tools: Sequence[BaseTool]
23
+ llm: LanguageModelLike
24
+ assistant_message: str
25
+ interrupt_before_action: bool = False
26
+ recursion_limit: int = 50
27
+
28
+ def __init__(
29
+ self,
30
+ *,
31
+ agent_executor_type: str,
32
+ tools: Sequence[BaseTool],
33
+ llm: LanguageModelLike,
34
+ assistant_message: str,
35
+ interrupt_before_action: bool = False,
36
+ recursion_limit: int = 50,
37
+ kwargs: Optional[Mapping[str, Any]] = None,
38
+ config: Optional[Mapping[str, Any]] = None,
39
+ **others: Any,
40
+ ) -> None:
41
+ others.pop("bound", None)
42
+ agent_executor_object = import_class(f'bisheng_langchain.gpts.agent_types.{agent_executor_type}')
43
+
44
+ _agent_executor = agent_executor_object(tools, llm, assistant_message, interrupt_before_action)
45
+ agent_executor = _agent_executor.with_config({"recursion_limit": recursion_limit})
46
+ super().__init__(
47
+ agent_executor_type=agent_executor_type,
48
+ tools=tools,
49
+ llm=llm,
50
+ assistant_message=assistant_message,
51
+ bound=agent_executor,
52
+ kwargs=kwargs or {},
53
+ config=config or {},
54
+ )
55
+
56
+
57
+ class BishengAssistant:
58
+
59
+ def __init__(self, yaml_path) -> None:
60
+ self.yaml_path = yaml_path
61
+ with open(self.yaml_path, 'r') as f:
62
+ self.params = yaml.safe_load(f)
63
+
64
+ self.assistant_params = self.params['assistant']
65
+
66
+ # init assistant prompt
67
+ prompt_type = self.assistant_params['prompt_type']
68
+ assistant_message = import_class(f'bisheng_langchain.gpts.prompts.{prompt_type}')
69
+
70
+ # init llm or agent
71
+ llm_params = self.assistant_params['llm']
72
+ llm_object = import_by_type(_type='llms', name=llm_params['type'])
73
+ if llm_params['type'] == 'ChatOpenAI' and llm_params['openai_proxy']:
74
+ llm_params.pop('type')
75
+ llm = llm_object(
76
+ http_client=httpx.Client(proxies=llm_params['openai_proxy']),
77
+ http_async_client=httpx.AsyncClient(proxies=llm_params['openai_proxy']),
78
+ **llm_params,
79
+ )
80
+ else:
81
+ llm_params.pop('type')
82
+ llm = llm_object(**llm_params)
83
+
84
+ # init tools
85
+ available_tools = get_all_tool_names()
86
+ tools = []
87
+ for tool in self.assistant_params['tools']:
88
+ tool_type = tool.pop('type')
89
+ tool_config = tool if tool else {}
90
+ if tool_type not in available_tools:
91
+ raise ValueError(f"Tool type {tool_type} not found in TOOLS")
92
+ _returned_tools = load_tools({tool_type: tool_config})
93
+ if isinstance(_returned_tools, list):
94
+ tools.extend(_returned_tools)
95
+ else:
96
+ tools.append(_returned_tools)
97
+
98
+ # init agent executor
99
+ agent_executor_params = self.assistant_params['agent_executor']
100
+ agent_executor_type = agent_executor_params.pop('type')
101
+ self.assistant = ConfigurableAssistant(
102
+ agent_executor_type=agent_executor_type,
103
+ tools=tools,
104
+ llm=llm,
105
+ assistant_message=assistant_message,
106
+ **agent_executor_params
107
+ )
108
+
109
+ def run(self, query, chat_history=[], chat_round=5):
110
+ if len(chat_history) % 2 != 0:
111
+ raise ValueError("chat history should be even")
112
+
113
+ # 限制chat_history轮数
114
+ if len(chat_history) > chat_round * 2:
115
+ chat_history = chat_history[-chat_round*2:]
116
+
117
+ inputs = []
118
+ for i in range(0, len(chat_history), 2):
119
+ inputs.append(HumanMessage(content=chat_history[i]))
120
+ inputs.append(AIMessage(content=chat_history[i+1]))
121
+ inputs.append(HumanMessage(content=query))
122
+ result = asyncio.run(self.assistant.ainvoke(inputs))
123
+ return result
124
+
125
+
126
+ if __name__ == "__main__":
127
+ from langchain.globals import set_debug
128
+
129
+ set_debug(True)
130
+ # chat_history = []
131
+ chat_history = ['你好', '你好,有什么可以帮助你吗?', '福蓉科技股价多少?', '福蓉科技(股票代码:300049)的当前股价为48.67元。']
132
+ query = "去年这个时候的股价是多少?"
133
+ bisheng_assistant = BishengAssistant("config/base_scene.yaml")
134
+ result = bisheng_assistant.run(query, chat_history=chat_history)
135
+ for r in result:
136
+ print(f'------------------')
137
+ print(type(r), r)
@@ -0,0 +1,130 @@
1
+ import json
2
+ import os
3
+ import re
4
+
5
+ import httpx
6
+ from bisheng_langchain.gpts.prompts import ASSISTANT_PROMPT_OPT, BREIF_DES_PROMPT, OPENDIALOG_PROMPT
7
+ from langchain_core.language_models.base import LanguageModelLike
8
+ from langchain_openai.chat_models import ChatOpenAI
9
+ from loguru import logger
10
+
11
+
12
+ def parse_markdown(input_str: str) -> str:
13
+ match = re.search(r'```(markdown)?(.*)```', input_str, re.DOTALL)
14
+ if match is None:
15
+ out_str = input_str
16
+ else:
17
+ out_str = match.group(2)
18
+
19
+ out_str = out_str.strip()
20
+ out_str = out_str.replace('```', '')
21
+ return out_str
22
+
23
+
24
+ def parse_json(input_str: str) -> str:
25
+ match = re.search(r'```(json)?(.*)```', input_str, re.DOTALL)
26
+ if match is None:
27
+ out_str = input_str
28
+ else:
29
+ out_str = match.group(2)
30
+
31
+ out_str = out_str.strip()
32
+ out_str = out_str.replace('```', '')
33
+ return out_str
34
+
35
+
36
+ def optimize_assistant_prompt(
37
+ llm: LanguageModelLike,
38
+ assistant_name: str,
39
+ assistant_description: str,
40
+ ) -> str:
41
+ """optimize assistant prompt
42
+
43
+ Args:
44
+ llm (LanguageModelLike):
45
+ assistant_name (str):
46
+ assistant_description (str):
47
+
48
+ Returns:
49
+ assistant_prompt(str):
50
+ """
51
+ chain = ({
52
+ 'assistant_name': lambda x: x['assistant_name'],
53
+ 'assistant_description': lambda x: x['assistant_description'],
54
+ }
55
+ | ASSISTANT_PROMPT_OPT
56
+ | llm)
57
+ chain_output = chain.invoke({
58
+ 'assistant_name': assistant_name,
59
+ 'assistant_description': assistant_description,
60
+ })
61
+ response = chain_output.content
62
+ assistant_prompt = parse_markdown(response)
63
+ return assistant_prompt
64
+
65
+
66
+ def generate_opening_dialog(
67
+ llm: LanguageModelLike,
68
+ description: str,
69
+ ) -> str:
70
+ chain = ({
71
+ 'description': lambda x: x['description'],
72
+ }
73
+ | OPENDIALOG_PROMPT
74
+ | llm)
75
+ time = 0
76
+ while time <= 3:
77
+ try:
78
+ chain_output = chain.invoke({
79
+ 'description': description,
80
+ })
81
+ output = parse_json(chain_output.content)
82
+ output = json.loads(output)
83
+ opening_lines = output[0]['开场白']
84
+ questions = output[0]['问题']
85
+ break
86
+ except Exception as e:
87
+ logger.info(f'第{time}次解析失败, 错误信息: {e}')
88
+ logger.info(f'模型输出结果为{chain_output.content}。')
89
+ time += 1
90
+ opening_lines = ''
91
+ questions = []
92
+
93
+ res = {}
94
+ res['opening_lines'] = opening_lines
95
+ res['questions'] = questions
96
+
97
+ return res
98
+
99
+
100
+ def generate_breif_description(
101
+ llm: LanguageModelLike,
102
+ description: str,
103
+ ) -> str:
104
+ chain = ({
105
+ 'description': lambda x: x['description'],
106
+ }
107
+ | BREIF_DES_PROMPT
108
+ | llm)
109
+ chain_output = chain.invoke({
110
+ 'description': description,
111
+ })
112
+ breif_description = chain_output.content
113
+ breif_description = breif_description.strip()
114
+ return breif_description
115
+
116
+
117
+ if __name__ == '__main__':
118
+ httpx_client = httpx.Client(proxies=os.getenv('OPENAI_PROXY'))
119
+ llm = ChatOpenAI(model='gpt-4-0125-preview', temperature=0.01, http_client=httpx_client)
120
+ # llm = ChatQWen(model="qwen1.5-72b-chat", temperature=0.01, api_key=os.getenv('QWEN_API_KEY'))
121
+ assistant_name = '金融分析助手'
122
+ assistant_description = '1. 分析上市公司最新的年报财报;2. 获取上市公司的最新新闻;'
123
+ assistant_prompt = optimize_assistant_prompt(llm, assistant_name, assistant_description)
124
+ # print(assistant_prompt)
125
+
126
+ opening_dialog = generate_opening_dialog(llm, assistant_prompt)
127
+ print(opening_dialog)
128
+
129
+ # breif_description = generate_breif_description(llm, assistant_prompt)
130
+ # print(breif_description)
@@ -0,0 +1,54 @@
1
+ from bisheng_langchain.gpts.prompts.select_tools_prompt import HUMAN_MSG, SYS_MSG
2
+ from langchain.prompts import (ChatPromptTemplate, HumanMessagePromptTemplate,
3
+ SystemMessagePromptTemplate)
4
+ from langchain_core.language_models.base import LanguageModelLike
5
+ from pydantic import BaseModel
6
+
7
+
8
+ class ToolInfo(BaseModel):
9
+ tool_name: str
10
+ tool_description: str
11
+
12
+
13
+ class ToolSelector:
14
+
15
+ def __init__(
16
+ self,
17
+ llm: LanguageModelLike,
18
+ tools: list[ToolInfo],
19
+ system_message: str = SYS_MSG,
20
+ human_message: str = HUMAN_MSG,
21
+ ) -> None:
22
+ self.llm = llm
23
+ self.tools = tools
24
+ self.system_message = system_message
25
+ self.human_message = human_message
26
+
27
+ def select(self, task_name: str, task_description: str) -> list[str]:
28
+ tool_pool = [tool.dict() for tool in self.tools]
29
+ messages = [
30
+ SystemMessagePromptTemplate.from_template(self.system_message),
31
+ HumanMessagePromptTemplate.from_template(self.human_message),
32
+ ]
33
+
34
+ chain = ({
35
+ 'tool_pool': lambda x: x['tool_pool'],
36
+ 'task_name': lambda x: x['task_name'],
37
+ 'task_description': lambda x: x['task_description'],
38
+ }
39
+ | ChatPromptTemplate.from_messages(messages)
40
+ | self.llm)
41
+
42
+ chain_output = chain.invoke({
43
+ 'tool_pool': tool_pool,
44
+ 'task_name': task_name,
45
+ 'task_description': task_description,
46
+ })
47
+
48
+ try:
49
+ all_tool_name = set([tool.tool_name for tool in self.tools])
50
+ output = list(set(eval(chain_output.content)) & all_tool_name)
51
+ return output
52
+ except Exception as e:
53
+ print(e)
54
+ return []
@@ -0,0 +1,161 @@
1
+ import warnings
2
+ from typing import Any, Callable, Dict, List, Optional, Tuple
3
+
4
+ import httpx
5
+ from bisheng_langchain.gpts.tools.api_tools import ALL_API_TOOLS
6
+ from bisheng_langchain.gpts.tools.bing_search.tool import BingSearchRun
7
+ from bisheng_langchain.gpts.tools.calculator.tool import calculator
8
+ from bisheng_langchain.gpts.tools.code_interpreter.tool import CodeInterpreterTool
9
+
10
+ # from langchain_community.utilities.dalle_image_generator import DallEAPIWrapper
11
+ from bisheng_langchain.gpts.tools.dalle_image_generator.tool import (
12
+ DallEAPIWrapper,
13
+ DallEImageGenerator,
14
+ )
15
+ from bisheng_langchain.gpts.tools.get_current_time.tool import get_current_time
16
+ from langchain_community.tools.arxiv.tool import ArxivQueryRun
17
+ from langchain_community.tools.bearly.tool import BearlyInterpreterTool
18
+ from langchain_community.utilities.arxiv import ArxivAPIWrapper
19
+ from langchain_community.utilities.bing_search import BingSearchAPIWrapper
20
+ from langchain_core.callbacks import BaseCallbackManager, Callbacks
21
+ from langchain_core.language_models import BaseLanguageModel
22
+ from langchain_core.tools import BaseTool, Tool
23
+ from mypy_extensions import Arg, KwArg
24
+
25
+
26
+ def _get_current_time() -> BaseTool:
27
+ return get_current_time
28
+
29
+
30
+ def _get_calculator() -> BaseTool:
31
+ return calculator
32
+
33
+
34
+ def _get_arxiv() -> BaseTool:
35
+ return ArxivQueryRun(api_wrapper=ArxivAPIWrapper())
36
+
37
+
38
+ _BASE_TOOLS: Dict[str, Callable[[], BaseTool]] = {
39
+ 'get_current_time': _get_current_time,
40
+ 'calculator': _get_calculator,
41
+ 'arxiv': _get_arxiv,
42
+ }
43
+
44
+ _LLM_TOOLS: Dict[str, Callable[[BaseLanguageModel], BaseTool]] = {}
45
+
46
+ _EXTRA_LLM_TOOLS: Dict[
47
+ str, Tuple[Callable[[Arg(BaseLanguageModel, 'llm'), KwArg(Any)], BaseTool], List[str]] # noqa # noqa #type: ignore
48
+ ] = {}
49
+
50
+
51
+ def _get_bing_search(**kwargs: Any) -> BaseTool:
52
+ return BingSearchRun(api_wrapper=BingSearchAPIWrapper(**kwargs))
53
+
54
+
55
+ def _get_dalle_image_generator(**kwargs: Any) -> Tool:
56
+ openai_api_key = kwargs.get('openai_api_key')
57
+ http_async_client = httpx.AsyncClient(proxies=kwargs.get('openai_proxy'))
58
+ httpc_client = httpx.Client(proxies=kwargs.get('openai_proxy'))
59
+ return DallEImageGenerator(
60
+ api_wrapper=DallEAPIWrapper(
61
+ model='dall-e-3',
62
+ api_key=openai_api_key,
63
+ http_client=httpc_client,
64
+ http_async_client=http_async_client,
65
+ )
66
+ )
67
+
68
+
69
+ def _get_bearly_code_interpreter(**kwargs: Any) -> Tool:
70
+ return BearlyInterpreterTool(**kwargs).as_tool()
71
+
72
+
73
+ def _get_native_code_interpreter(**kwargs: Any) -> Tool:
74
+ return CodeInterpreterTool(**kwargs).as_tool()
75
+
76
+
77
+ # 第二个list内填必填参数,第三个list内填可选参数
78
+ _EXTRA_PARAM_TOOLS: Dict[str, Tuple[Callable[[KwArg(Any)], BaseTool], List[Optional[str]], List[Optional[str]]]] = { # type: ignore
79
+ 'dalle_image_generator': (_get_dalle_image_generator, ['openai_api_key', 'openai_proxy'], []),
80
+ 'bing_search': (_get_bing_search, ['bing_subscription_key', 'bing_search_url'], []),
81
+ 'code_interpreter': (_get_native_code_interpreter, ["minio"], ['files']),
82
+ }
83
+
84
+ _API_TOOLS: Dict[str, Tuple[Callable[[KwArg(Any)], BaseTool], List[str]]] = {**ALL_API_TOOLS} # type: ignore
85
+
86
+ _ALL_TOOLS = {
87
+ **_BASE_TOOLS,
88
+ **_LLM_TOOLS,
89
+ **_EXTRA_LLM_TOOLS,
90
+ **_EXTRA_PARAM_TOOLS,
91
+ **_API_TOOLS,
92
+ }
93
+
94
+
95
+ def _handle_callbacks(callback_manager: Optional[BaseCallbackManager], callbacks: Callbacks) -> Callbacks:
96
+ if callback_manager is not None:
97
+ warnings.warn(
98
+ 'callback_manager is deprecated. Please use callbacks instead.',
99
+ DeprecationWarning,
100
+ )
101
+ if callbacks is not None:
102
+ raise ValueError('Cannot specify both callback_manager and callbacks arguments.')
103
+ return callback_manager
104
+ return callbacks
105
+
106
+
107
+ def load_tools(
108
+ tool_params: Dict[str, Dict[str, Any]],
109
+ llm: Optional[BaseLanguageModel] = None,
110
+ callbacks: Callbacks = None,
111
+ **kwargs: Any,
112
+ ) -> List[BaseTool]:
113
+ tools = []
114
+ callbacks = _handle_callbacks(callback_manager=kwargs.get('callback_manager'), callbacks=callbacks)
115
+ for name, params in tool_params.items():
116
+ if name in _BASE_TOOLS:
117
+ tools.append(_BASE_TOOLS[name]())
118
+ elif name in _LLM_TOOLS:
119
+ if llm is None:
120
+ raise ValueError(f'Tool {name} requires an LLM to be provided')
121
+ tool = _LLM_TOOLS[name](llm)
122
+ tools.append(tool)
123
+ elif name in _EXTRA_LLM_TOOLS:
124
+ if llm is None:
125
+ raise ValueError(f'Tool {name} requires an LLM to be provided')
126
+ _get_llm_tool_func, extra_keys = _EXTRA_LLM_TOOLS[name]
127
+ missing_keys = set(extra_keys).difference(params)
128
+ if missing_keys:
129
+ raise ValueError(f'Tool {name} requires some parameters that were not ' f'provided: {missing_keys}')
130
+ sub_kwargs = {k: params[k] for k in extra_keys}
131
+ tool = _get_llm_tool_func(llm=llm, **sub_kwargs)
132
+ tools.append(tool)
133
+ elif name in _EXTRA_PARAM_TOOLS:
134
+ _get_tool_func, extra_keys, optional_keys = _EXTRA_PARAM_TOOLS[name]
135
+ missing_keys = set(extra_keys).difference(params)
136
+ if missing_keys:
137
+ raise ValueError(f'Tool {name} requires some parameters that were not ' f'provided: {missing_keys}')
138
+ extra_kwargs = {k: params[k] for k in extra_keys}
139
+ optional_kwargs = {k: params[k] for k in optional_keys if k in params}
140
+ all_kwargs = {**extra_kwargs, **optional_kwargs}
141
+ tool = _get_tool_func(**all_kwargs)
142
+ tools.append(tool)
143
+ elif name in _API_TOOLS:
144
+ _get_api_tool_func, extra_keys = _API_TOOLS[name]
145
+ missing_keys = set(extra_keys).difference(params)
146
+ if missing_keys:
147
+ raise ValueError(f'Tool {name} requires some parameters that were not ' f'provided: {missing_keys}')
148
+ mini_kwargs = {k: params[k] for k in extra_keys}
149
+ tool = _get_api_tool_func(name=name, **mini_kwargs)
150
+ tools.append(tool)
151
+ else:
152
+ raise ValueError(f'Got unknown tool {name}')
153
+ if callbacks is not None:
154
+ for tool in tools:
155
+ tool.callbacks = callbacks
156
+ return tools
157
+
158
+
159
+ def get_all_tool_names() -> List[str]:
160
+ """Get a list of all possible tool names."""
161
+ return list(_ALL_TOOLS.keys())
@@ -0,0 +1,11 @@
1
+ from typing import Any
2
+
3
+ from langchain_core.messages import FunctionMessage, ToolMessage
4
+
5
+
6
+ class LiberalFunctionMessage(FunctionMessage):
7
+ content: Any
8
+
9
+
10
+ class LiberalToolMessage(ToolMessage):
11
+ content: Any
@@ -0,0 +1,15 @@
1
+ from bisheng_langchain.gpts.prompts.assistant_prompt_opt import ASSISTANT_PROMPT_OPT
2
+ from bisheng_langchain.gpts.prompts.base_prompt import DEFAULT_SYSTEM_MESSAGE
3
+ from bisheng_langchain.gpts.prompts.breif_description_prompt import BREIF_DES_PROMPT
4
+ from bisheng_langchain.gpts.prompts.opening_dialog_prompt import OPENDIALOG_PROMPT
5
+ from bisheng_langchain.gpts.prompts.select_tools_prompt import HUMAN_MSG, SYS_MSG
6
+
7
+
8
+ __all__ = [
9
+ "DEFAULT_SYSTEM_MESSAGE",
10
+ "ASSISTANT_PROMPT_OPT",
11
+ "OPENDIALOG_PROMPT",
12
+ "BREIF_DES_PROMPT",
13
+ "SYS_MSG",
14
+ "HUMAN_MSG",
15
+ ]
@@ -0,0 +1,95 @@
1
+ from langchain_core.prompts import PromptTemplate
2
+ from langchain_core.prompts.chat import (
3
+ ChatPromptTemplate,
4
+ HumanMessagePromptTemplate,
5
+ SystemMessagePromptTemplate,
6
+ )
7
+
8
+ # 例如助手的身份、相关完成任务的具体方法和步骤、回答问题时的语气以及应该注意什么问题等
9
+ system_template = """你是一个prompt优化大师,你会得到一个助手的名字和简单描述,你需要根据这些信息,为助手生成一个合适的角色描述、详细的技能说明、相关约束信息,输出为markdown格式。你需要按照以下格式进行组织输出内容:
10
+ ```markdown
11
+ ## 角色
12
+ 你是一个[助手的角色],[助手的角色描述]。
13
+
14
+ ## 技能
15
+ 1. [技能 1 的描述]:
16
+ - [技能 1 的具体内容]。
17
+ - [技能 1 的具体内容]。
18
+ 2. [技能 2 的描述]:
19
+ - [技能 2 的具体内容]。
20
+ - [技能 2 的具体内容]。
21
+
22
+ ## 限制
23
+ - [限制 1 的描述]。
24
+ - [限制 2 的描述]。
25
+ ```
26
+
27
+ 以下是一些例子:
28
+ 示例1:
29
+ 输入:
30
+ 助手名字: 金融分析助手
31
+ 助手描述: 1. 分析上市公司最新的年报财报;2. 获取上市公司的最新新闻;
32
+
33
+ 输出:
34
+ ```markdown
35
+ ## 角色
36
+ 你是一个金融分析师,会利用最新的信息和数据来分析公司的财务状况、市场趋势和行业动态,以帮助客户做出明智的投资决策。
37
+
38
+ ## 技能
39
+ 1. 分析上市公司最新的年报财报:
40
+ - 使用财务分析工具和技巧,对公司的财务报表进行详细的分析和解读。
41
+ - 评估公司的财务健康状况,包括营收、利润、资产负债表、现金流量等方面。
42
+ - 分析公司的财务指标,如利润率、偿债能力、周转率等,以评估其盈利能力和风险水平。
43
+ - 比较公司的财务表现与同行业其他公司的平均水平,以评估其相对竞争力。
44
+ 2. 获取上市公司的最新新闻:
45
+ - 使用新闻来源和数据库,定期获取上市公司的最新新闻和公告。
46
+ - 分析新闻对公司股价和投资者情绪的潜在影响。
47
+ - 关注公司的重大事件,如合并收购、产品发布、管理层变动等,以及这些事件对公司未来发展的影响。
48
+ - 结合财务分析和新闻分析,提供对公司的综合评估和投资建议。
49
+
50
+ ## 限制
51
+ - 只讨论与金融分析相关的内容,拒绝回答与金融分析无关的话题。
52
+ - 所有的输出内容必须按照给定的格式进行组织,不能偏离框架要求。
53
+ - 分析部分不能超过 100 字。
54
+ ```
55
+
56
+ 示例2:
57
+ 输入:
58
+ 助手名字: 前端开发助手
59
+ 助手描述: 你的角色是前端开发,能帮助我把图片制作成html页面,css使用tailwind.css,ui库使用antd
60
+
61
+ 输出:
62
+ ```markdown
63
+ # 角色
64
+ 你是一个前端开发工程师,可以使用 HTML、CSS 和 JavaScript 等技术构建网站和应用程序。
65
+
66
+ ## 技能
67
+ 1. 将图片制作成 HTML 页面
68
+ - 当用户需要将图片制作成 HTML 页面时,你可以根据用户提供的图片和要求,使用 HTML 和 CSS 等技术构建一个页面。
69
+ - 在构建页面时,你可以使用 Tailwind CSS 来简化 CSS 样式的编写,并使用 Antd 库来提供丰富的 UI 组件。
70
+ - 构建完成后,你可以将页面代码返回给用户,以便用户可以将其部署到服务器上或在本地查看。
71
+
72
+ 2. 提供前端开发相关的建议和帮助
73
+ - 当用户需要前端开发相关的建议和帮助时,你可以根据用户的问题,提供相关的建议和帮助。
74
+ - 你可以提供关于 HTML、CSS、JavaScript 等前端技术的建议和帮助,也可以提供关于前端开发工具和流程的建议和帮助。
75
+
76
+ ## 限制
77
+ - 只讨论与前端开发相关的内容,拒绝回答与前端开发无关的话题。
78
+ - 所输出的内容必须按照给定的格式进行组织,不能偏离框架要求。
79
+ ```
80
+ """
81
+
82
+ human_template = """
83
+ 输入:
84
+ 助手名字: {assistant_name}
85
+ 助手描述: {assistant_description}
86
+
87
+ 输出:
88
+ """
89
+
90
+
91
+ messages = [
92
+ SystemMessagePromptTemplate.from_template(system_template),
93
+ HumanMessagePromptTemplate.from_template(human_template),
94
+ ]
95
+ ASSISTANT_PROMPT_OPT = ChatPromptTemplate.from_messages(messages)
@@ -0,0 +1 @@
1
+ DEFAULT_SYSTEM_MESSAGE = "You are a helpful assistant."