flowllm 0.1.3__py3-none-any.whl → 0.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- flowllm/__init__.py +4 -3
- flowllm/app.py +2 -1
- flowllm/config/base.yaml +75 -0
- flowllm/config/fin_supply.yaml +39 -0
- flowllm/config/pydantic_config_parser.py +16 -1
- flowllm/context/__init__.py +2 -0
- flowllm/context/base_context.py +10 -20
- flowllm/context/flow_context.py +45 -2
- flowllm/context/service_context.py +69 -10
- flowllm/embedding_model/openai_compatible_embedding_model.py +1 -2
- flowllm/enumeration/chunk_enum.py +1 -0
- flowllm/flow/__init__.py +9 -0
- flowllm/flow/base_flow.py +44 -13
- flowllm/flow/expression/__init__.py +1 -0
- flowllm/flow/{parser → expression}/expression_parser.py +5 -2
- flowllm/flow/expression/expression_tool_flow.py +25 -0
- flowllm/flow/gallery/__init__.py +1 -8
- flowllm/flow/gallery/mock_tool_flow.py +46 -28
- flowllm/flow/tool_op_flow.py +97 -0
- flowllm/llm/base_llm.py +0 -2
- flowllm/op/__init__.py +3 -4
- flowllm/op/akshare/get_ak_a_code_op.py +1 -1
- flowllm/op/akshare/get_ak_a_info_op.py +1 -1
- flowllm/op/base_op.py +232 -16
- flowllm/op/base_tool_op.py +47 -0
- flowllm/op/gallery/__init__.py +0 -1
- flowllm/op/gallery/mock_op.py +13 -7
- flowllm/op/llm/__init__.py +3 -0
- flowllm/op/{agent/react_v2_op.py → llm/react_llm_op.py} +43 -24
- flowllm/op/llm/simple_llm_op.py +48 -0
- flowllm/op/llm/stream_llm_op.py +61 -0
- flowllm/op/mcp/__init__.py +2 -0
- flowllm/op/mcp/ant_op.py +42 -0
- flowllm/op/mcp/base_sse_mcp_op.py +28 -0
- flowllm/op/parallel_op.py +5 -1
- flowllm/op/search/__init__.py +1 -2
- flowllm/op/search/dashscope_search_op.py +73 -128
- flowllm/op/search/tavily_search_op.py +64 -82
- flowllm/op/sequential_op.py +4 -0
- flowllm/schema/flow_stream_chunk.py +11 -0
- flowllm/schema/service_config.py +8 -3
- flowllm/schema/tool_call.py +46 -1
- flowllm/service/__init__.py +0 -1
- flowllm/service/base_service.py +31 -14
- flowllm/service/http_service.py +45 -36
- flowllm/service/mcp_service.py +17 -23
- flowllm/storage/vector_store/__init__.py +1 -0
- flowllm/storage/vector_store/base_vector_store.py +99 -15
- flowllm/storage/vector_store/chroma_vector_store.py +250 -8
- flowllm/storage/vector_store/es_vector_store.py +288 -32
- flowllm/storage/vector_store/local_vector_store.py +206 -9
- flowllm/storage/vector_store/memory_vector_store.py +509 -0
- flowllm/utils/common_utils.py +55 -1
- flowllm/utils/miner_u_pdf_processor.py +726 -0
- {flowllm-0.1.3.dist-info → flowllm-0.1.6.dist-info}/METADATA +7 -6
- flowllm-0.1.6.dist-info/RECORD +98 -0
- flowllm/config/default.yaml +0 -77
- flowllm/config/empty.yaml +0 -37
- flowllm/flow/gallery/cmd_flow.py +0 -11
- flowllm/flow/gallery/code_tool_flow.py +0 -30
- flowllm/flow/gallery/dashscope_search_tool_flow.py +0 -34
- flowllm/flow/gallery/deepsearch_tool_flow.py +0 -39
- flowllm/flow/gallery/expression_tool_flow.py +0 -18
- flowllm/flow/gallery/tavily_search_tool_flow.py +0 -30
- flowllm/flow/gallery/terminate_tool_flow.py +0 -30
- flowllm/flow/parser/__init__.py +0 -0
- flowllm/op/agent/__init__.py +0 -1
- flowllm/op/agent/react_v1_op.py +0 -109
- flowllm/op/agent/react_v1_prompt.yaml +0 -54
- flowllm/op/base_ray_op.py +0 -313
- flowllm/op/code/__init__.py +0 -1
- flowllm/op/code/execute_code_op.py +0 -42
- flowllm/op/gallery/terminate_op.py +0 -29
- flowllm/op/search/dashscope_deep_research_op.py +0 -267
- flowllm/service/cmd_service.py +0 -15
- flowllm-0.1.3.dist-info/RECORD +0 -102
- /flowllm/op/{agent/react_v2_prompt.yaml → llm/react_llm_prompt.yaml} +0 -0
- {flowllm-0.1.3.dist-info → flowllm-0.1.6.dist-info}/WHEEL +0 -0
- {flowllm-0.1.3.dist-info → flowllm-0.1.6.dist-info}/entry_points.txt +0 -0
- {flowllm-0.1.3.dist-info → flowllm-0.1.6.dist-info}/licenses/LICENSE +0 -0
- {flowllm-0.1.3.dist-info → flowllm-0.1.6.dist-info}/top_level.txt +0 -0
@@ -1,109 +1,91 @@
|
|
1
|
+
import asyncio
|
1
2
|
import json
|
2
3
|
import os
|
3
|
-
import
|
4
|
-
from typing import
|
4
|
+
from functools import partial
|
5
|
+
from typing import Union, List
|
5
6
|
|
6
|
-
from loguru import logger
|
7
7
|
from tavily import TavilyClient
|
8
8
|
|
9
|
-
from flowllm.context
|
10
|
-
from flowllm.
|
11
|
-
from flowllm.
|
12
|
-
from flowllm.storage.cache.data_cache import DataCache
|
9
|
+
from flowllm.context import FlowContext, C
|
10
|
+
from flowllm.op.base_tool_op import BaseToolOp
|
11
|
+
from flowllm.schema.tool_call import ToolCall
|
13
12
|
|
14
13
|
|
15
14
|
@C.register_op()
|
16
|
-
class TavilySearchOp(
|
17
|
-
def __init__(self,
|
18
|
-
enable_print: bool = True,
|
19
|
-
enable_cache: bool = True,
|
20
|
-
cache_path: str = "./tavily_search_cache",
|
21
|
-
cache_expire_hours: float = 0.1,
|
22
|
-
topic: Literal["general", "news", "finance"] = "general",
|
23
|
-
max_retries: int = 3,
|
24
|
-
return_only_content: bool = True,
|
25
|
-
**kwargs):
|
15
|
+
class TavilySearchOp(BaseToolOp):
|
16
|
+
def __init__(self, save_answer: bool = False, **kwargs):
|
26
17
|
super().__init__(**kwargs)
|
27
|
-
|
28
|
-
self.
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
18
|
+
self.save_answer = save_answer
|
19
|
+
self._client: TavilyClient | None = None
|
20
|
+
|
21
|
+
def build_tool_call(self) -> ToolCall:
|
22
|
+
return ToolCall(**{
|
23
|
+
"name": "web_search",
|
24
|
+
"description": "Use search keywords to retrieve relevant information from the internet. If there are multiple search keywords, please use each keyword separately to call this tool.",
|
25
|
+
"input_schema": {
|
26
|
+
"query": {
|
27
|
+
"type": "str",
|
28
|
+
"description": "search keyword",
|
29
|
+
"required": True
|
30
|
+
}
|
31
|
+
}
|
32
|
+
})
|
39
33
|
|
40
34
|
@property
|
41
|
-
def
|
42
|
-
if self.
|
43
|
-
self.
|
44
|
-
return self.
|
35
|
+
def client(self):
|
36
|
+
if self._client is None:
|
37
|
+
self._client = TavilyClient(api_key=os.environ["FLOW_TAVILY_API_KEY"])
|
38
|
+
return self._client
|
45
39
|
|
46
|
-
def
|
47
|
-
|
48
|
-
logger.info("response=\n" + json.dumps(response, indent=2, ensure_ascii=False))
|
40
|
+
def default_execute(self):
|
41
|
+
self.output_dict["tavily_search_result"] = "tavily search failed!"
|
49
42
|
|
50
|
-
|
43
|
+
async def search(self, query: str):
|
44
|
+
loop = asyncio.get_event_loop()
|
45
|
+
func = partial(self.client.search, query=query)
|
46
|
+
task = loop.run_in_executor(executor=C.thread_pool, func=func) # noqa
|
47
|
+
return await task
|
51
48
|
|
52
|
-
def
|
53
|
-
|
54
|
-
|
49
|
+
async def extract(self, urls: Union[List[str], str]):
|
50
|
+
loop = asyncio.get_event_loop()
|
51
|
+
func = partial(self.client.extract, urls=urls, format="text")
|
52
|
+
task = loop.run_in_executor(executor=C.thread_pool, func=func) # noqa
|
53
|
+
return await task
|
54
|
+
|
55
|
+
async def async_execute(self):
|
56
|
+
query: str = self.input_dict["query"]
|
55
57
|
|
56
|
-
# Check cache first
|
57
58
|
if self.enable_cache and self.cache:
|
58
59
|
cached_result = self.cache.load(query)
|
59
60
|
if cached_result:
|
60
|
-
|
61
|
-
if self.return_only_content:
|
62
|
-
self.context.tavily_search_result = json.dumps(final_result, ensure_ascii=False, indent=2)
|
63
|
-
else:
|
64
|
-
self.context.tavily_search_result = final_result
|
61
|
+
self.output_dict["tavily_search_result"] = json.dumps(cached_result, ensure_ascii=False, indent=2)
|
65
62
|
return
|
66
63
|
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
url_info_dict = {item["url"]: item for item in response["results"]}
|
71
|
-
response_extract = self._client.extract(urls=[item["url"] for item in response["results"]],
|
72
|
-
format="text")
|
73
|
-
|
74
|
-
final_result = {}
|
75
|
-
for item in response_extract["results"]:
|
76
|
-
url = item["url"]
|
77
|
-
final_result[url] = url_info_dict[url]
|
78
|
-
final_result[url]["raw_content"] = item["raw_content"]
|
79
|
-
|
80
|
-
# Cache the result if enabled
|
81
|
-
if self.enable_cache and self.cache:
|
82
|
-
self.cache.save(query, final_result, expire_hours=self.cache_expire_hours)
|
83
|
-
|
84
|
-
final_result = self.post_process(final_result)
|
85
|
-
|
86
|
-
if self.return_only_content:
|
87
|
-
self.context.tavily_search_result = json.dumps(final_result, ensure_ascii=False, indent=2)
|
88
|
-
else:
|
89
|
-
self.context.tavily_search_result = final_result
|
90
|
-
return
|
64
|
+
response = await self.search(query=query)
|
65
|
+
url_info_dict = {item["url"]: item for item in response["results"]}
|
66
|
+
response_extract = await self.extract(urls=[item["url"] for item in response["results"]])
|
91
67
|
|
92
|
-
|
93
|
-
|
94
|
-
|
68
|
+
final_result = {}
|
69
|
+
for item in response_extract["results"]:
|
70
|
+
url = item["url"]
|
71
|
+
final_result[url] = url_info_dict[url]
|
72
|
+
final_result[url]["raw_content"] = item["raw_content"]
|
95
73
|
|
96
|
-
self.
|
74
|
+
if self.enable_cache and self.cache is not None:
|
75
|
+
self.cache.save(query, final_result, expire_hours=self.cache_expire_hours)
|
97
76
|
|
77
|
+
self.output_dict["tavily_search_result"] = json.dumps(final_result, ensure_ascii=False, indent=2)
|
78
|
+
if self.save_answer:
|
79
|
+
self.context.response.answer = self.output_dict["tavily_search_result"]
|
98
80
|
|
99
|
-
|
100
|
-
|
81
|
+
async def async_main():
|
82
|
+
C.set_service_config().init_by_service_config()
|
101
83
|
|
102
|
-
|
84
|
+
op = TavilySearchOp()
|
85
|
+
context = FlowContext(query="what is AI?")
|
86
|
+
await op.async_call(context=context)
|
87
|
+
print(context.tavily_search_result)
|
103
88
|
|
104
|
-
C.set_default_service_config().init_by_service_config()
|
105
89
|
|
106
|
-
|
107
|
-
|
108
|
-
op(context=context)
|
109
|
-
print(context.tavily_search_result)
|
90
|
+
if __name__ == "__main__":
|
91
|
+
asyncio.run(async_main())
|
flowllm/op/sequential_op.py
CHANGED
@@ -13,6 +13,10 @@ class SequentialOp(BaseOp):
|
|
13
13
|
for op in self.ops:
|
14
14
|
op.__call__(self.context)
|
15
15
|
|
16
|
+
async def async_execute(self):
|
17
|
+
for op in self.ops:
|
18
|
+
await op.async_call(self.context)
|
19
|
+
|
16
20
|
def __rshift__(self, op: BaseOp):
|
17
21
|
if isinstance(op, SequentialOp):
|
18
22
|
self.ops.extend(op.ops)
|
@@ -0,0 +1,11 @@
|
|
1
|
+
from pydantic import Field, BaseModel
|
2
|
+
|
3
|
+
from flowllm.enumeration.chunk_enum import ChunkEnum
|
4
|
+
|
5
|
+
|
6
|
+
class FlowStreamChunk(BaseModel):
|
7
|
+
flow_id: str = Field(default="")
|
8
|
+
chunk_type: ChunkEnum = Field(default=ChunkEnum.ANSWER)
|
9
|
+
chunk: str | bytes = Field(default="")
|
10
|
+
done: bool = Field(default=False)
|
11
|
+
metadata: dict = Field(default_factory=dict)
|
flowllm/schema/service_config.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
from typing import Dict
|
1
|
+
from typing import Dict, List
|
2
2
|
|
3
3
|
from pydantic import BaseModel, Field
|
4
4
|
|
@@ -6,7 +6,7 @@ from flowllm.schema.tool_call import ToolCall
|
|
6
6
|
|
7
7
|
|
8
8
|
class MCPConfig(BaseModel):
|
9
|
-
transport: str = Field(default="", description="stdio/http/sse
|
9
|
+
transport: str = Field(default="", description="stdio/http/sse")
|
10
10
|
host: str = Field(default="0.0.0.0")
|
11
11
|
port: int = Field(default=8001)
|
12
12
|
|
@@ -25,11 +25,14 @@ class CmdConfig(BaseModel):
|
|
25
25
|
|
26
26
|
class FlowConfig(ToolCall):
|
27
27
|
flow_content: str = Field(default="")
|
28
|
-
|
28
|
+
use_async: bool = Field(default=True)
|
29
|
+
service_type: str = Field(default="http+mcp", description="http+mcp/http/mcp")
|
30
|
+
stream: bool = Field(default=False)
|
29
31
|
|
30
32
|
class OpConfig(BaseModel):
|
31
33
|
backend: str = Field(default="")
|
32
34
|
language: str = Field(default="")
|
35
|
+
max_retries: int = Field(default=1)
|
33
36
|
raise_exception: bool = Field(default=True)
|
34
37
|
prompt_path: str = Field(default="")
|
35
38
|
llm: str = Field(default="")
|
@@ -61,6 +64,8 @@ class ServiceConfig(BaseModel):
|
|
61
64
|
language: str = Field(default="")
|
62
65
|
thread_pool_max_workers: int = Field(default=16)
|
63
66
|
ray_max_workers: int = Field(default=1)
|
67
|
+
import_config: str = Field(default="", description="Import the configuration in the same path as the base")
|
68
|
+
disabled_flows: List[str] = Field(default_factory=list)
|
64
69
|
|
65
70
|
cmd: CmdConfig = Field(default_factory=CmdConfig)
|
66
71
|
mcp: MCPConfig = Field(default_factory=MCPConfig)
|
flowllm/schema/tool_call.py
CHANGED
@@ -1,7 +1,8 @@
|
|
1
1
|
import json
|
2
2
|
from typing import Dict, List
|
3
3
|
|
4
|
-
from
|
4
|
+
from mcp.types import Tool
|
5
|
+
from pydantic import BaseModel, Field, model_validator
|
5
6
|
|
6
7
|
|
7
8
|
class ParamAttrs(BaseModel):
|
@@ -64,6 +65,17 @@ class ToolCall(BaseModel):
|
|
64
65
|
input_schema: Dict[str, ParamAttrs] = Field(default_factory=dict)
|
65
66
|
output_schema: Dict[str, ParamAttrs] = Field(default_factory=dict)
|
66
67
|
|
68
|
+
@model_validator(mode="before")
|
69
|
+
@classmethod
|
70
|
+
def init_tool_call(cls, data: dict):
|
71
|
+
tool_type = data.get("type", "")
|
72
|
+
tool_type_dict = data.get(tool_type, {})
|
73
|
+
|
74
|
+
for key in ["name", "arguments"]:
|
75
|
+
if key not in data:
|
76
|
+
data[key] = tool_type_dict.get(key, "")
|
77
|
+
return data
|
78
|
+
|
67
79
|
@property
|
68
80
|
def argument_dict(self) -> dict:
|
69
81
|
return json.loads(self.arguments)
|
@@ -120,3 +132,36 @@ class ToolCall(BaseModel):
|
|
120
132
|
raise NotImplementedError(f"version {version} not supported")
|
121
133
|
|
122
134
|
return self
|
135
|
+
|
136
|
+
@classmethod
|
137
|
+
def from_mcp_tool(cls, tool: Tool) -> "ToolCall":
|
138
|
+
input_schema = {}
|
139
|
+
properties = tool.inputSchema["properties"]
|
140
|
+
required = tool.inputSchema["required"]
|
141
|
+
for name, attr_dict in properties.items():
|
142
|
+
param_attrs = ParamAttrs()
|
143
|
+
|
144
|
+
if name in required:
|
145
|
+
param_attrs.required = True
|
146
|
+
param_attrs.type = attr_dict.get("type", "str")
|
147
|
+
param_attrs.description = attr_dict.get("description", "")
|
148
|
+
if "enum" in attr_dict:
|
149
|
+
param_attrs.enum = attr_dict["enum"]
|
150
|
+
input_schema[name] = param_attrs
|
151
|
+
|
152
|
+
return cls(name=tool.name,
|
153
|
+
description=tool.description,
|
154
|
+
input_schema=input_schema)
|
155
|
+
|
156
|
+
|
157
|
+
if __name__ == "__main__":
|
158
|
+
tool_call = ToolCall(**{
|
159
|
+
"id": "call_0fb6077ad56f4647b0b04a",
|
160
|
+
"function": {
|
161
|
+
"arguments": "{\"symbol\": \"ZETA\"}",
|
162
|
+
"name": "get_stock_info"
|
163
|
+
},
|
164
|
+
"type": "function",
|
165
|
+
"index": 0
|
166
|
+
})
|
167
|
+
print(tool_call.simple_output_dump())
|
flowllm/service/__init__.py
CHANGED
flowllm/service/base_service.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
from abc import
|
1
|
+
from abc import ABC
|
2
2
|
from typing import Dict, Optional
|
3
3
|
|
4
4
|
from loguru import logger
|
@@ -6,16 +6,19 @@ from pydantic import create_model, Field
|
|
6
6
|
|
7
7
|
from flowllm.config.pydantic_config_parser import PydanticConfigParser
|
8
8
|
from flowllm.context.service_context import C
|
9
|
+
from flowllm.flow.base_tool_flow import BaseToolFlow
|
9
10
|
from flowllm.schema.flow_request import FlowRequest
|
10
11
|
from flowllm.schema.service_config import ServiceConfig
|
11
12
|
from flowllm.schema.tool_call import ParamAttrs
|
12
|
-
from flowllm.utils.common_utils import snake_to_camel
|
13
|
+
from flowllm.utils.common_utils import snake_to_camel, print_banner
|
13
14
|
|
14
15
|
|
15
16
|
class BaseService(ABC):
|
16
17
|
TYPE_MAPPING = {
|
17
18
|
"str": str,
|
19
|
+
"string": str,
|
18
20
|
"int": int,
|
21
|
+
"integer": int,
|
19
22
|
"float": float,
|
20
23
|
"bool": bool,
|
21
24
|
"list": list,
|
@@ -24,10 +27,16 @@ class BaseService(ABC):
|
|
24
27
|
|
25
28
|
def __init__(self, service_config: ServiceConfig):
|
26
29
|
self.service_config = service_config
|
27
|
-
|
28
30
|
self.mcp_config = self.service_config.mcp
|
29
31
|
self.http_config = self.service_config.http
|
32
|
+
|
33
|
+
def __enter__(self):
|
34
|
+
C.prepare_sse_mcp()
|
30
35
|
C.init_by_service_config(self.service_config)
|
36
|
+
return self
|
37
|
+
|
38
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
39
|
+
C.stop_by_service_config()
|
31
40
|
|
32
41
|
@classmethod
|
33
42
|
def get_service(cls, *args, parser: type[PydanticConfigParser] = PydanticConfigParser) -> "BaseService":
|
@@ -49,20 +58,28 @@ class BaseService(ABC):
|
|
49
58
|
|
50
59
|
return create_model(f"{snake_to_camel(flow_name)}Model", __base__=FlowRequest, **fields)
|
51
60
|
|
52
|
-
def
|
61
|
+
def integrate_flow(self, tool_flow: BaseToolFlow):
|
62
|
+
...
|
63
|
+
|
64
|
+
def integrate_stream_flow(self, tool_flow: BaseToolFlow):
|
53
65
|
...
|
54
66
|
|
55
|
-
def
|
67
|
+
def integrate_flows(self):
|
56
68
|
for tool_flow_name in C.tool_flow_names:
|
57
|
-
|
58
|
-
|
69
|
+
tool_flow: BaseToolFlow = C.get_tool_flow(tool_flow_name)
|
70
|
+
if tool_flow.stream:
|
71
|
+
self.integrate_stream_flow(tool_flow)
|
72
|
+
logger.info(f"integrate stream_endpoint={tool_flow_name}")
|
59
73
|
|
60
|
-
|
61
|
-
|
74
|
+
else:
|
75
|
+
self.integrate_flow(tool_flow)
|
76
|
+
logger.info(f"integrate endpoint={tool_flow_name}")
|
62
77
|
|
63
|
-
def
|
64
|
-
|
78
|
+
def __call__(self, logo: str = ""):
|
79
|
+
self.integrate_flows()
|
80
|
+
if logo:
|
81
|
+
print_banner(name=logo, service_config=self.service_config, width=400)
|
82
|
+
self.execute()
|
65
83
|
|
66
|
-
|
67
|
-
|
68
|
-
...
|
84
|
+
def execute(self):
|
85
|
+
...
|
flowllm/service/http_service.py
CHANGED
@@ -1,14 +1,16 @@
|
|
1
1
|
import asyncio
|
2
|
-
from
|
2
|
+
from typing import AsyncGenerator
|
3
3
|
|
4
4
|
import uvicorn
|
5
5
|
from fastapi import FastAPI
|
6
6
|
from fastapi.middleware.cors import CORSMiddleware
|
7
|
+
from fastapi.responses import StreamingResponse
|
7
8
|
from loguru import logger
|
8
9
|
|
9
10
|
from flowllm.context.service_context import C
|
10
11
|
from flowllm.flow.base_tool_flow import BaseToolFlow
|
11
12
|
from flowllm.schema.flow_response import FlowResponse
|
13
|
+
from flowllm.schema.flow_stream_chunk import FlowStreamChunk
|
12
14
|
from flowllm.service.base_service import BaseService
|
13
15
|
|
14
16
|
|
@@ -18,60 +20,67 @@ class HttpService(BaseService):
|
|
18
20
|
def __init__(self, *args, **kwargs):
|
19
21
|
super().__init__(*args, **kwargs)
|
20
22
|
self.app = FastAPI(title="FlowLLM", description="HTTP API for FlowLLM")
|
23
|
+
self.app.add_middleware(CORSMiddleware,
|
24
|
+
allow_origins=["*"],
|
25
|
+
allow_credentials=True,
|
26
|
+
allow_methods=["*"],
|
27
|
+
allow_headers=["*"])
|
21
28
|
|
22
|
-
# Add CORS middleware
|
23
|
-
self.app.add_middleware(
|
24
|
-
CORSMiddleware,
|
25
|
-
allow_origins=["*"],
|
26
|
-
allow_credentials=True,
|
27
|
-
allow_methods=["*"],
|
28
|
-
allow_headers=["*"],
|
29
|
-
)
|
30
|
-
|
31
|
-
# Add health check endpoint
|
32
29
|
self.app.get("/health")(self.health_check)
|
33
30
|
|
34
31
|
@staticmethod
|
35
32
|
def health_check():
|
36
33
|
return {"status": "healthy"}
|
37
34
|
|
38
|
-
def
|
39
|
-
|
40
|
-
|
35
|
+
def integrate_flow(self, tool_flow: BaseToolFlow):
|
36
|
+
if "http" not in tool_flow.service_type:
|
37
|
+
return
|
38
|
+
|
39
|
+
request_model = self._create_pydantic_model(tool_flow.name, tool_flow.tool_call.input_schema)
|
41
40
|
|
42
41
|
async def execute_endpoint(request: request_model) -> FlowResponse:
|
43
|
-
|
44
|
-
response: FlowResponse = await loop.run_in_executor(
|
45
|
-
executor=C.thread_pool,
|
46
|
-
func=partial(tool_flow.__call__, **request.model_dump())) # noqa
|
42
|
+
return await tool_flow(**request.model_dump())
|
47
43
|
|
48
|
-
|
44
|
+
self.app.post(f"/{tool_flow.name}", response_model=FlowResponse)(execute_endpoint)
|
49
45
|
|
50
|
-
|
51
|
-
|
46
|
+
@staticmethod
|
47
|
+
def gen_stream_response(queue: asyncio.Queue):
|
48
|
+
async def generate_stream() -> AsyncGenerator[bytes, None]:
|
49
|
+
while True:
|
50
|
+
stream_chunk: FlowStreamChunk = await queue.get()
|
51
|
+
if stream_chunk.done:
|
52
|
+
yield f"data:[DONE]\n\n".encode('utf-8')
|
53
|
+
break
|
54
|
+
else:
|
55
|
+
yield f"data:{stream_chunk.model_dump_json()}\n\n".encode("utf-8")
|
52
56
|
|
53
|
-
|
54
|
-
super().integrate_tool_flows()
|
57
|
+
return StreamingResponse(generate_stream(), media_type="text/event-stream")
|
55
58
|
|
56
|
-
|
57
|
-
|
59
|
+
def integrate_stream_flow(self, tool_flow: BaseToolFlow):
|
60
|
+
if "http" not in tool_flow.service_type:
|
61
|
+
return
|
58
62
|
|
59
|
-
|
60
|
-
tool_flow_schemas = []
|
61
|
-
for name, tool_flow in C.tool_flow_dict.items():
|
62
|
-
assert isinstance(tool_flow, BaseToolFlow)
|
63
|
-
tool_flow_schemas.append(tool_flow.tool_call.simple_input_dump())
|
64
|
-
return tool_flow_schemas
|
63
|
+
request_model = self._create_pydantic_model(tool_flow.name, tool_flow.tool_call.input_schema)
|
65
64
|
|
66
|
-
|
65
|
+
async def execute_stream_endpoint(request: request_model) -> StreamingResponse:
|
66
|
+
stream_queue = asyncio.Queue()
|
67
|
+
asyncio.create_task(tool_flow(stream_queue=stream_queue, **request.model_dump()))
|
68
|
+
return self.gen_stream_response(stream_queue)
|
69
|
+
|
70
|
+
self.app.post(f"/{tool_flow.name}")(execute_stream_endpoint)
|
71
|
+
|
72
|
+
def integrate_flows(self):
|
73
|
+
super().integrate_flows()
|
74
|
+
|
75
|
+
async def execute_endpoint() -> list:
|
76
|
+
loop = asyncio.get_event_loop()
|
77
|
+
return await loop.run_in_executor(executor=C.thread_pool, func=C.list_flow_schemas) # noqa
|
67
78
|
|
68
79
|
endpoint_path = "list"
|
69
|
-
self.app.get("/"
|
80
|
+
self.app.get(f"/{endpoint_path}", response_model=list)(execute_endpoint)
|
70
81
|
logger.info(f"integrate endpoint={endpoint_path}")
|
71
82
|
|
72
|
-
def
|
73
|
-
self.integrate_tool_flows()
|
74
|
-
|
83
|
+
def execute(self):
|
75
84
|
uvicorn.run(self.app,
|
76
85
|
host=self.http_config.host,
|
77
86
|
port=self.http_config.port,
|
flowllm/service/mcp_service.py
CHANGED
@@ -1,12 +1,9 @@
|
|
1
|
-
import asyncio
|
2
|
-
from functools import partial
|
3
|
-
|
4
1
|
from fastmcp import FastMCP
|
5
2
|
from fastmcp.tools import FunctionTool
|
6
3
|
|
7
|
-
from
|
8
|
-
from
|
9
|
-
from
|
4
|
+
from .base_service import BaseService
|
5
|
+
from ..context.service_context import C
|
6
|
+
from ..flow.base_tool_flow import BaseToolFlow
|
10
7
|
|
11
8
|
|
12
9
|
@C.register_service("mcp")
|
@@ -16,32 +13,29 @@ class MCPService(BaseService):
|
|
16
13
|
super().__init__(*args, **kwargs)
|
17
14
|
self.mcp = FastMCP(name="FlowLLM")
|
18
15
|
|
19
|
-
def
|
20
|
-
|
21
|
-
|
16
|
+
def integrate_flow(self, tool_flow: BaseToolFlow):
|
17
|
+
if "mcp" not in tool_flow.service_type:
|
18
|
+
return
|
19
|
+
|
20
|
+
request_model = self._create_pydantic_model(tool_flow.name, tool_flow.tool_call.input_schema)
|
22
21
|
|
23
|
-
async def
|
24
|
-
|
25
|
-
loop = asyncio.get_event_loop()
|
26
|
-
response = await loop.run_in_executor(
|
27
|
-
executor=C.thread_pool,
|
28
|
-
func=partial(tool_flow.__call__, **request.model_dump())) # noqa
|
22
|
+
async def execute_tool(**kwargs) -> str:
|
23
|
+
response = await tool_flow(**request_model(**kwargs).model_dump())
|
29
24
|
return response.answer
|
30
25
|
|
26
|
+
# tool_flow.tool_call.name
|
31
27
|
tool = FunctionTool(name=tool_flow.name, # noqa
|
32
28
|
description=tool_flow.tool_call.description, # noqa
|
33
|
-
fn=
|
29
|
+
fn=execute_tool,
|
34
30
|
parameters=tool_flow.tool_call.input_schema)
|
35
31
|
self.mcp.add_tool(tool)
|
36
32
|
|
37
|
-
def
|
38
|
-
self.integrate_tool_flows()
|
39
|
-
|
33
|
+
def execute(self):
|
40
34
|
if self.mcp_config.transport == "sse":
|
41
|
-
self.mcp.run(transport="sse", host=self.mcp_config.host, port=self.mcp_config.port)
|
42
|
-
|
43
|
-
self.mcp.run(transport="http", host=self.mcp_config.host, port=self.mcp_config.port)
|
35
|
+
self.mcp.run(transport="sse", host=self.mcp_config.host, port=self.mcp_config.port, show_banner=False)
|
36
|
+
elif self.mcp_config.transport == "http":
|
37
|
+
self.mcp.run(transport="http", host=self.mcp_config.host, port=self.mcp_config.port, show_banner=False)
|
44
38
|
elif self.mcp_config.transport == "stdio":
|
45
|
-
self.mcp.run(transport="stdio")
|
39
|
+
self.mcp.run(transport="stdio", show_banner=False)
|
46
40
|
else:
|
47
41
|
raise ValueError(f"unsupported mcp transport: {self.mcp_config.transport}")
|