flowllm 0.1.2__py3-none-any.whl → 0.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- flowllm/__init__.py +8 -3
- flowllm/app.py +1 -1
- flowllm/config/base.yaml +75 -0
- flowllm/config/fin_supply.yaml +39 -0
- flowllm/config/pydantic_config_parser.py +16 -1
- flowllm/context/__init__.py +2 -0
- flowllm/context/base_context.py +10 -20
- flowllm/context/flow_context.py +45 -2
- flowllm/context/service_context.py +73 -12
- flowllm/embedding_model/openai_compatible_embedding_model.py +1 -2
- flowllm/enumeration/chunk_enum.py +1 -0
- flowllm/flow/__init__.py +9 -0
- flowllm/flow/base_flow.py +44 -11
- flowllm/flow/expression/__init__.py +1 -0
- flowllm/flow/{parser → expression}/expression_parser.py +5 -2
- flowllm/flow/expression/expression_tool_flow.py +25 -0
- flowllm/flow/gallery/__init__.py +1 -8
- flowllm/flow/gallery/mock_tool_flow.py +46 -33
- flowllm/flow/tool_op_flow.py +97 -0
- flowllm/llm/base_llm.py +0 -2
- flowllm/llm/litellm_llm.py +2 -1
- flowllm/op/__init__.py +3 -3
- flowllm/op/akshare/get_ak_a_code_op.py +1 -1
- flowllm/op/akshare/get_ak_a_info_op.py +1 -1
- flowllm/op/base_llm_op.py +3 -2
- flowllm/op/base_op.py +258 -25
- flowllm/op/base_tool_op.py +47 -0
- flowllm/op/gallery/__init__.py +0 -1
- flowllm/op/gallery/mock_op.py +13 -7
- flowllm/op/llm/__init__.py +3 -0
- flowllm/op/llm/react_llm_op.py +105 -0
- flowllm/op/{agent/react_prompt.yaml → llm/react_llm_prompt.yaml} +17 -10
- flowllm/op/llm/simple_llm_op.py +48 -0
- flowllm/op/llm/stream_llm_op.py +61 -0
- flowllm/op/mcp/__init__.py +2 -0
- flowllm/op/mcp/ant_op.py +42 -0
- flowllm/op/mcp/base_sse_mcp_op.py +28 -0
- flowllm/op/parallel_op.py +5 -1
- flowllm/op/search/__init__.py +1 -2
- flowllm/op/search/dashscope_search_op.py +73 -121
- flowllm/op/search/tavily_search_op.py +69 -80
- flowllm/op/sequential_op.py +4 -0
- flowllm/schema/flow_stream_chunk.py +11 -0
- flowllm/schema/message.py +2 -0
- flowllm/schema/service_config.py +8 -3
- flowllm/schema/tool_call.py +53 -4
- flowllm/service/__init__.py +0 -1
- flowllm/service/base_service.py +31 -14
- flowllm/service/http_service.py +46 -37
- flowllm/service/mcp_service.py +17 -23
- flowllm/storage/vector_store/__init__.py +1 -0
- flowllm/storage/vector_store/base_vector_store.py +99 -12
- flowllm/storage/vector_store/chroma_vector_store.py +250 -8
- flowllm/storage/vector_store/es_vector_store.py +291 -35
- flowllm/storage/vector_store/local_vector_store.py +206 -9
- flowllm/storage/vector_store/memory_vector_store.py +509 -0
- flowllm/utils/common_utils.py +54 -0
- flowllm/utils/logger_utils.py +28 -0
- flowllm/utils/miner_u_pdf_processor.py +726 -0
- {flowllm-0.1.2.dist-info → flowllm-0.1.5.dist-info}/METADATA +7 -6
- flowllm-0.1.5.dist-info/RECORD +98 -0
- flowllm/config/default.yaml +0 -77
- flowllm/config/empty.yaml +0 -37
- flowllm/flow/gallery/cmd_flow.py +0 -11
- flowllm/flow/gallery/code_tool_flow.py +0 -30
- flowllm/flow/gallery/dashscope_search_tool_flow.py +0 -34
- flowllm/flow/gallery/deepsearch_tool_flow.py +0 -39
- flowllm/flow/gallery/expression_tool_flow.py +0 -18
- flowllm/flow/gallery/tavily_search_tool_flow.py +0 -30
- flowllm/flow/gallery/terminate_tool_flow.py +0 -30
- flowllm/flow/parser/__init__.py +0 -0
- flowllm/op/agent/__init__.py +0 -0
- flowllm/op/agent/react_op.py +0 -83
- flowllm/op/base_ray_op.py +0 -313
- flowllm/op/code/__init__.py +0 -1
- flowllm/op/code/execute_code_op.py +0 -42
- flowllm/op/gallery/terminate_op.py +0 -29
- flowllm/op/search/dashscope_deep_research_op.py +0 -260
- flowllm/service/cmd_service.py +0 -15
- flowllm-0.1.2.dist-info/RECORD +0 -99
- {flowllm-0.1.2.dist-info → flowllm-0.1.5.dist-info}/WHEEL +0 -0
- {flowllm-0.1.2.dist-info → flowllm-0.1.5.dist-info}/entry_points.txt +0 -0
- {flowllm-0.1.2.dist-info → flowllm-0.1.5.dist-info}/licenses/LICENSE +0 -0
- {flowllm-0.1.2.dist-info → flowllm-0.1.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,47 @@
|
|
1
|
+
from abc import ABC, abstractmethod
|
2
|
+
|
3
|
+
from loguru import logger
|
4
|
+
|
5
|
+
from flowllm.op.base_llm_op import BaseLLMOp
|
6
|
+
from flowllm.schema.tool_call import ToolCall
|
7
|
+
from flowllm.storage.cache import DataCache
|
8
|
+
|
9
|
+
|
10
|
+
class BaseToolOp(BaseLLMOp, ABC):
|
11
|
+
|
12
|
+
def __init__(self,
|
13
|
+
enable_cache: bool = False,
|
14
|
+
cache_path: str = "cache",
|
15
|
+
cache_expire_hours: float = 0.1,
|
16
|
+
enable_print_output: bool = True,
|
17
|
+
**kwargs):
|
18
|
+
super().__init__(**kwargs)
|
19
|
+
|
20
|
+
self.enable_cache = enable_cache
|
21
|
+
self.cache_path: str = cache_path
|
22
|
+
self.cache_expire_hours: float = cache_expire_hours
|
23
|
+
self.enable_print_output: bool = enable_print_output
|
24
|
+
self._cache: DataCache | None = None
|
25
|
+
|
26
|
+
self.tool_call: ToolCall = self.build_tool_call()
|
27
|
+
self.input_dict: dict = {}
|
28
|
+
self.output_dict: dict = {}
|
29
|
+
|
30
|
+
@property
|
31
|
+
def cache(self):
|
32
|
+
if self.enable_cache and self._cache is None:
|
33
|
+
self._cache = DataCache(f"{self.cache_path}/{self.name}")
|
34
|
+
return self._cache
|
35
|
+
|
36
|
+
@abstractmethod
|
37
|
+
def build_tool_call(self) -> ToolCall:
|
38
|
+
...
|
39
|
+
|
40
|
+
def before_execute(self):
|
41
|
+
for key in self.tool_call.input_schema.keys():
|
42
|
+
self.input_dict[key] = self.context.get(key)
|
43
|
+
|
44
|
+
def after_execute(self):
|
45
|
+
self.context.update(self.output_dict)
|
46
|
+
if self.enable_print_output:
|
47
|
+
logger.info(f"{self.name}.output_dict={self.output_dict}")
|
flowllm/op/gallery/__init__.py
CHANGED
flowllm/op/gallery/mock_op.py
CHANGED
@@ -1,8 +1,9 @@
|
|
1
|
+
import asyncio
|
1
2
|
import time
|
2
3
|
|
3
4
|
from loguru import logger
|
4
5
|
|
5
|
-
from flowllm.context
|
6
|
+
from flowllm.context import C
|
6
7
|
from flowllm.op.base_llm_op import BaseLLMOp
|
7
8
|
|
8
9
|
|
@@ -10,8 +11,8 @@ from flowllm.op.base_llm_op import BaseLLMOp
|
|
10
11
|
class Mock1Op(BaseLLMOp):
|
11
12
|
def execute(self):
|
12
13
|
time.sleep(1)
|
13
|
-
a = self.context.a
|
14
|
-
b = self.context.b
|
14
|
+
a = self.context.get("a", 1)
|
15
|
+
b = self.context.get("b", 2)
|
15
16
|
logger.info(f"enter class={self.name}. a={a} b={b}")
|
16
17
|
|
17
18
|
self.context.response.answer = f"{self.name} {a} {b} answer=47"
|
@@ -28,15 +29,20 @@ class Mock3Op(Mock1Op):
|
|
28
29
|
|
29
30
|
|
30
31
|
@C.register_op()
|
31
|
-
class Mock4Op(
|
32
|
-
|
32
|
+
class Mock4Op(BaseLLMOp):
|
33
|
+
async def async_execute(self):
|
34
|
+
await asyncio.sleep(1)
|
35
|
+
a = self.context.get("a", 1)
|
36
|
+
b = self.context.get("b", 2)
|
37
|
+
logger.info(f"enter class={self.name}. a={a} b={b}")
|
38
|
+
self.context.response.answer = f"{self.name} {a} {b} answer=47"
|
33
39
|
|
34
40
|
|
35
41
|
@C.register_op()
|
36
|
-
class Mock5Op(
|
42
|
+
class Mock5Op(Mock4Op):
|
37
43
|
...
|
38
44
|
|
39
45
|
|
40
46
|
@C.register_op()
|
41
|
-
class Mock6Op(
|
47
|
+
class Mock6Op(Mock4Op):
|
42
48
|
...
|
@@ -0,0 +1,105 @@
|
|
1
|
+
import asyncio
|
2
|
+
import datetime
|
3
|
+
import json
|
4
|
+
import time
|
5
|
+
from typing import List, Dict
|
6
|
+
|
7
|
+
from loguru import logger
|
8
|
+
|
9
|
+
from flowllm.context.flow_context import FlowContext
|
10
|
+
from flowllm.context.service_context import C
|
11
|
+
from flowllm.op import BaseToolOp
|
12
|
+
from flowllm.schema.flow_response import FlowResponse
|
13
|
+
from flowllm.schema.message import Message, Role
|
14
|
+
from flowllm.schema.tool_call import ToolCall
|
15
|
+
|
16
|
+
|
17
|
+
@C.register_op(name="react_llm_op")
|
18
|
+
class ReactLLMOp(BaseToolOp):
|
19
|
+
file_path: str = __file__
|
20
|
+
|
21
|
+
def __init__(self, llm="qwen3_30b_instruct", **kwargs):
|
22
|
+
super().__init__(llm=llm, **kwargs)
|
23
|
+
|
24
|
+
def build_tool_call(self) -> ToolCall:
|
25
|
+
return ToolCall(**{
|
26
|
+
"name": "query_llm",
|
27
|
+
"description": "use this query to query an LLM",
|
28
|
+
"input_schema": {
|
29
|
+
"query": {
|
30
|
+
"type": "str",
|
31
|
+
"description": "search keyword",
|
32
|
+
"required": True
|
33
|
+
}
|
34
|
+
}
|
35
|
+
})
|
36
|
+
|
37
|
+
async def async_execute(self):
|
38
|
+
query: str = self.context.query
|
39
|
+
|
40
|
+
max_steps: int = int(self.op_params.get("max_steps", 10))
|
41
|
+
from flowllm.op import BaseToolOp
|
42
|
+
from flowllm.op.search import DashscopeSearchOp
|
43
|
+
|
44
|
+
tools: List[BaseToolOp] = [DashscopeSearchOp(save_answer=True)]
|
45
|
+
tool_dict: Dict[str, BaseToolOp] = {x.tool_call.name: x for x in tools}
|
46
|
+
for name, tool_call in tool_dict.items():
|
47
|
+
logger.info(f"name={name} "
|
48
|
+
f"tool_call={json.dumps(tool_call.tool_call.simple_input_dump(), ensure_ascii=False)}")
|
49
|
+
|
50
|
+
now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
51
|
+
user_prompt = self.prompt_format(prompt_name="role_prompt",
|
52
|
+
time=now_time,
|
53
|
+
tools=",".join(list(tool_dict.keys())),
|
54
|
+
query=query)
|
55
|
+
messages: List[Message] = [Message(role=Role.USER, content=user_prompt)]
|
56
|
+
logger.info(f"step.0 user_prompt={user_prompt}")
|
57
|
+
|
58
|
+
for i in range(max_steps):
|
59
|
+
assistant_message: Message = await self.llm.achat(messages, tools=[x.tool_call for x in tools])
|
60
|
+
messages.append(assistant_message)
|
61
|
+
logger.info(f"assistant.round{i}.reasoning_content={assistant_message.reasoning_content}\n"
|
62
|
+
f"content={assistant_message.content}\n"
|
63
|
+
f"tool.size={len(assistant_message.tool_calls)}")
|
64
|
+
|
65
|
+
if not assistant_message.tool_calls:
|
66
|
+
break
|
67
|
+
|
68
|
+
for j, tool_call in enumerate(assistant_message.tool_calls):
|
69
|
+
logger.info(f"submit step={i} tool_calls.name={tool_call.name} argument_dict={tool_call.argument_dict}")
|
70
|
+
|
71
|
+
if tool_call.name not in tool_dict:
|
72
|
+
logger.warning(f"step={i} no tool_call.name={tool_call.name}")
|
73
|
+
continue
|
74
|
+
|
75
|
+
self.submit_async_task(tool_dict[tool_call.name].copy().async_call,
|
76
|
+
context=self.context.copy(**tool_call.argument_dict))
|
77
|
+
time.sleep(1)
|
78
|
+
|
79
|
+
task_results = await self.join_async_task()
|
80
|
+
|
81
|
+
for j, tool_result in enumerate(task_results):
|
82
|
+
tool_call = assistant_message.tool_calls[j]
|
83
|
+
logger.info(f"submit step.index={i}.{j} tool_result={tool_result}")
|
84
|
+
if isinstance(tool_result, FlowResponse):
|
85
|
+
tool_result = tool_result.answer
|
86
|
+
else:
|
87
|
+
tool_result = str(tool_result)
|
88
|
+
tool_message = Message(role=Role.TOOL, content=tool_result, tool_call_id=tool_call.id)
|
89
|
+
messages.append(tool_message)
|
90
|
+
|
91
|
+
self.context.response.messages = messages
|
92
|
+
self.context.response.answer = messages[-1].content
|
93
|
+
|
94
|
+
|
95
|
+
async def main():
|
96
|
+
C.set_service_config().init_by_service_config()
|
97
|
+
context = FlowContext(query="茅台和五粮现在股价多少?")
|
98
|
+
|
99
|
+
op = ReactLLMOp()
|
100
|
+
result = await op.async_call(context=context)
|
101
|
+
print(result)
|
102
|
+
|
103
|
+
|
104
|
+
if __name__ == "__main__":
|
105
|
+
asyncio.run(main())
|
@@ -8,21 +8,28 @@ role_prompt: |
|
|
8
8
|
|
9
9
|
{query}
|
10
10
|
|
11
|
-
# write a complete and rigorous report to answer user's questions based on the context.
|
12
11
|
next_prompt: |
|
13
|
-
Think based on the current content and the user's question: Is the current context sufficient to answer the user's question?
|
14
|
-
|
12
|
+
Think based on the current content and the user's question: Is the current context sufficient to answer the user's question?
|
15
13
|
- If the current context is not sufficient to answer the user's question, consider what information is missing.
|
16
14
|
Re-plan and think about how to break down the missing information into subtasks.
|
17
15
|
For each subtask, determine what tools and parameters should be used for the query.
|
18
16
|
Please first provide the reasoning process, then give the tool call name and parameters.
|
19
|
-
|
20
|
-
- If the current context is sufficient to answer the user's question, use the **terminate** tool.
|
17
|
+
- If the current context is sufficient to answer the user's question, please integrate the context and provide a complete answer to the user's question.
|
21
18
|
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
19
|
+
role_prompt_zh: |
|
20
|
+
你是一个有用的助手。
|
21
|
+
当前时间是 {time}。
|
22
|
+
请根据用户的问题,主动选择最合适的工具或工具组合,包括 {tools} 等。
|
23
|
+
请先思考如何将问题分解为子任务,每个子任务应使用哪些工具和参数,最后提供工具调用名称和参数。
|
24
|
+
尝试多次使用相同的工具,但使用不同的参数,从多个角度获取信息。
|
25
|
+
请根据用户问题的语言来确定回复的语言。
|
26
|
+
|
27
27
|
{query}
|
28
28
|
|
29
|
+
next_prompt_zh: |
|
30
|
+
根据当前内容和用户的问题进行思考:当前上下文是否足以回答用户的问题?
|
31
|
+
- 如果当前上下文不足以回答用户的问题,请考虑缺少哪些信息。
|
32
|
+
重新规划并思考如何将缺失的信息分解为子任务。
|
33
|
+
对于每个子任务,确定应使用哪些工具和参数进行查询。
|
34
|
+
请先提供推理过程,然后给出工具调用名称和参数。
|
35
|
+
- 如果当前上下文足以回答用户的问题,请整合上下文,为用户的问题提供一个完整的答案。
|
@@ -0,0 +1,48 @@
|
|
1
|
+
import asyncio
|
2
|
+
from typing import List
|
3
|
+
|
4
|
+
from loguru import logger
|
5
|
+
|
6
|
+
from flowllm.context.flow_context import FlowContext
|
7
|
+
from flowllm.context.service_context import C
|
8
|
+
from flowllm.op import BaseToolOp
|
9
|
+
from flowllm.schema.message import Message, Role
|
10
|
+
from flowllm.schema.tool_call import ToolCall
|
11
|
+
|
12
|
+
|
13
|
+
@C.register_op(name="simple_llm_op")
|
14
|
+
class SimpleLLMOp(BaseToolOp):
|
15
|
+
|
16
|
+
def build_tool_call(self) -> ToolCall:
|
17
|
+
return ToolCall(**{
|
18
|
+
"name": "query_llm",
|
19
|
+
"description": "use this query to query an LLM",
|
20
|
+
"input_schema": {
|
21
|
+
"query": {
|
22
|
+
"type": "str",
|
23
|
+
"description": "search keyword",
|
24
|
+
"required": True
|
25
|
+
}
|
26
|
+
}
|
27
|
+
})
|
28
|
+
|
29
|
+
async def async_execute(self):
|
30
|
+
query: str = self.input_dict["query"]
|
31
|
+
logger.info(f"query={query}")
|
32
|
+
messages: List[Message] = [Message(role=Role.USER, content=query)]
|
33
|
+
|
34
|
+
assistant_message: Message = await self.llm.achat(messages)
|
35
|
+
self.context.response.answer = assistant_message.content
|
36
|
+
|
37
|
+
|
38
|
+
async def main():
|
39
|
+
C.set_service_config().init_by_service_config()
|
40
|
+
context = FlowContext(query="hello", stream_queue=asyncio.Queue())
|
41
|
+
|
42
|
+
op = SimpleLLMOp()
|
43
|
+
result = await op.async_call(context=context)
|
44
|
+
print(result)
|
45
|
+
|
46
|
+
|
47
|
+
if __name__ == "__main__":
|
48
|
+
asyncio.run(main())
|
@@ -0,0 +1,61 @@
|
|
1
|
+
import asyncio
|
2
|
+
from typing import List
|
3
|
+
|
4
|
+
from loguru import logger
|
5
|
+
|
6
|
+
from flowllm.context.flow_context import FlowContext
|
7
|
+
from flowllm.context.service_context import C
|
8
|
+
from flowllm.enumeration.chunk_enum import ChunkEnum
|
9
|
+
from flowllm.op import BaseToolOp
|
10
|
+
from flowllm.schema.message import Message, Role
|
11
|
+
from flowllm.schema.tool_call import ToolCall
|
12
|
+
|
13
|
+
|
14
|
+
@C.register_op(name="stream_llm_op")
|
15
|
+
class StreamLLMOp(BaseToolOp):
|
16
|
+
|
17
|
+
def build_tool_call(self) -> ToolCall:
|
18
|
+
return ToolCall(**{
|
19
|
+
"name": "query_llm",
|
20
|
+
"description": "use this query to query an LLM",
|
21
|
+
"input_schema": {
|
22
|
+
"query": {
|
23
|
+
"type": "str",
|
24
|
+
"description": "search keyword",
|
25
|
+
"required": True
|
26
|
+
}
|
27
|
+
}
|
28
|
+
})
|
29
|
+
|
30
|
+
async def async_execute(self):
|
31
|
+
query: str = self.input_dict["query"]
|
32
|
+
logger.info(f"query={query}")
|
33
|
+
messages: List[Message] = [Message(role=Role.USER, content=query)]
|
34
|
+
|
35
|
+
async for chunk, chunk_type in self.llm.astream_chat(messages): # noqa
|
36
|
+
if chunk_type == ChunkEnum.ANSWER:
|
37
|
+
await self.context.add_stream_answer(chunk)
|
38
|
+
|
39
|
+
await self.context.add_stream_done()
|
40
|
+
|
41
|
+
|
42
|
+
async def main():
|
43
|
+
C.set_service_config().init_by_service_config()
|
44
|
+
context = FlowContext(query="hello, introduce yourself.", stream_queue=asyncio.Queue())
|
45
|
+
|
46
|
+
op = StreamLLMOp()
|
47
|
+
task = asyncio.create_task(op.async_call(context=context))
|
48
|
+
|
49
|
+
while True:
|
50
|
+
stream_chunk = await context.stream_queue.get()
|
51
|
+
if stream_chunk.done:
|
52
|
+
print("\nend")
|
53
|
+
break
|
54
|
+
else:
|
55
|
+
print(stream_chunk.chunk, end="")
|
56
|
+
|
57
|
+
await task
|
58
|
+
|
59
|
+
|
60
|
+
if __name__ == "__main__":
|
61
|
+
asyncio.run(main())
|
flowllm/op/mcp/ant_op.py
ADDED
@@ -0,0 +1,42 @@
|
|
1
|
+
import asyncio
|
2
|
+
import json
|
3
|
+
import os
|
4
|
+
|
5
|
+
from flowllm.context import FlowContext, C
|
6
|
+
from flowllm.op.mcp.base_sse_mcp_op import BaseSSEMcpOp
|
7
|
+
|
8
|
+
|
9
|
+
@C.register_op()
|
10
|
+
class AntSearchOp(BaseSSEMcpOp):
|
11
|
+
|
12
|
+
def __init__(self, **kwargs):
|
13
|
+
host = os.getenv("FLOW_MCP_HOSTS", "").split(",")[0]
|
14
|
+
super().__init__(host=host, tool_name="search", **kwargs)
|
15
|
+
|
16
|
+
|
17
|
+
@C.register_op()
|
18
|
+
class AntInvestmentOp(BaseSSEMcpOp):
|
19
|
+
|
20
|
+
def __init__(self, **kwargs):
|
21
|
+
host = os.getenv("FLOW_MCP_HOSTS", "").split(",")[0]
|
22
|
+
super().__init__(host=host, tool_name="investment_analysis", **kwargs)
|
23
|
+
|
24
|
+
|
25
|
+
async def async_main():
|
26
|
+
op = AntSearchOp()
|
27
|
+
context = FlowContext(query="阿里巴巴怎么样?", entity="阿里巴巴")
|
28
|
+
await op.async_call(context=context)
|
29
|
+
print(json.dumps(op.tool_call.simple_input_dump(), ensure_ascii=False))
|
30
|
+
print(context.response.answer)
|
31
|
+
|
32
|
+
op = AntInvestmentOp()
|
33
|
+
context = FlowContext(entity="阿里巴巴", analysis_category="股票")
|
34
|
+
await op.async_call(context=context)
|
35
|
+
print(json.dumps(op.tool_call.simple_input_dump(), ensure_ascii=False))
|
36
|
+
print(context.response.answer)
|
37
|
+
|
38
|
+
|
39
|
+
if __name__ == "__main__":
|
40
|
+
C.prepare_sse_mcp().set_service_config().init_by_service_config()
|
41
|
+
|
42
|
+
asyncio.run(async_main())
|
@@ -0,0 +1,28 @@
|
|
1
|
+
from fastmcp import Client
|
2
|
+
from mcp.types import CallToolResult
|
3
|
+
|
4
|
+
from flowllm.context import C
|
5
|
+
from flowllm.op import BaseToolOp
|
6
|
+
from flowllm.schema.tool_call import ToolCall
|
7
|
+
|
8
|
+
|
9
|
+
class BaseSSEMcpOp(BaseToolOp):
|
10
|
+
|
11
|
+
def __init__(self, host: str = "", tool_name: str = "", **kwargs):
|
12
|
+
self.host: str = host
|
13
|
+
self.tool_name: str = tool_name
|
14
|
+
super().__init__(**kwargs)
|
15
|
+
|
16
|
+
def build_tool_call(self) -> ToolCall:
|
17
|
+
key = f"{self.host}/{self.tool_name}"
|
18
|
+
assert key in C.sse_mcp_dict, \
|
19
|
+
f"host={self.host} tool_name={self.tool_name} not found in mcp_tool_call_dict"
|
20
|
+
return C.sse_mcp_dict[key]
|
21
|
+
|
22
|
+
def default_execute(self):
|
23
|
+
self.context.response.answer = self.output_dict[f"{self.name}_result"] = f"{self.name} execute failed!"
|
24
|
+
|
25
|
+
async def async_execute(self):
|
26
|
+
async with Client(f"{self.host}/sse/") as client:
|
27
|
+
result: CallToolResult = await client.call_tool(self.tool_name, arguments=self.input_dict)
|
28
|
+
self.context.response.answer = self.output_dict[f"{self.name}_result"] = result.content[0].text
|
flowllm/op/parallel_op.py
CHANGED
@@ -12,9 +12,13 @@ class ParallelOp(BaseOp):
|
|
12
12
|
def execute(self):
|
13
13
|
for op in self.ops:
|
14
14
|
self.submit_task(op.__call__, context=self.context)
|
15
|
-
|
16
15
|
self.join_task(task_desc="parallel execution")
|
17
16
|
|
17
|
+
async def async_execute(self):
|
18
|
+
for op in self.ops:
|
19
|
+
self.submit_async_task(op.async_call, context=self.context)
|
20
|
+
return await self.join_async_task()
|
21
|
+
|
18
22
|
def __or__(self, op: BaseOp):
|
19
23
|
if isinstance(op, ParallelOp):
|
20
24
|
self.ops.extend(op.ops)
|
flowllm/op/search/__init__.py
CHANGED