flowllm 0.1.0__py3-none-any.whl → 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (141) hide show
  1. flowllm/__init__.py +21 -0
  2. flowllm/app.py +15 -0
  3. flowllm/client/__init__.py +25 -0
  4. flowllm/client/async_http_client.py +81 -0
  5. flowllm/client/http_client.py +81 -0
  6. flowllm/client/mcp_client.py +133 -0
  7. flowllm/client/sync_mcp_client.py +116 -0
  8. flowllm/config/__init__.py +1 -0
  9. flowllm/config/default.yaml +77 -0
  10. flowllm/config/empty.yaml +37 -0
  11. flowllm/config/pydantic_config_parser.py +242 -0
  12. flowllm/context/base_context.py +79 -0
  13. flowllm/context/flow_context.py +16 -0
  14. llmflow/op/prompt_mixin.py → flowllm/context/prompt_handler.py +25 -14
  15. flowllm/context/registry.py +30 -0
  16. flowllm/context/service_context.py +147 -0
  17. flowllm/embedding_model/__init__.py +1 -0
  18. {llmflow → flowllm}/embedding_model/base_embedding_model.py +93 -2
  19. {llmflow → flowllm}/embedding_model/openai_compatible_embedding_model.py +71 -13
  20. flowllm/flow/__init__.py +1 -0
  21. flowllm/flow/base_flow.py +72 -0
  22. flowllm/flow/base_tool_flow.py +15 -0
  23. flowllm/flow/gallery/__init__.py +8 -0
  24. flowllm/flow/gallery/cmd_flow.py +11 -0
  25. flowllm/flow/gallery/code_tool_flow.py +30 -0
  26. flowllm/flow/gallery/dashscope_search_tool_flow.py +34 -0
  27. flowllm/flow/gallery/deepsearch_tool_flow.py +39 -0
  28. flowllm/flow/gallery/expression_tool_flow.py +18 -0
  29. flowllm/flow/gallery/mock_tool_flow.py +67 -0
  30. flowllm/flow/gallery/tavily_search_tool_flow.py +30 -0
  31. flowllm/flow/gallery/terminate_tool_flow.py +30 -0
  32. flowllm/flow/parser/expression_parser.py +171 -0
  33. flowllm/llm/__init__.py +2 -0
  34. {llmflow → flowllm}/llm/base_llm.py +100 -18
  35. flowllm/llm/litellm_llm.py +455 -0
  36. flowllm/llm/openai_compatible_llm.py +439 -0
  37. flowllm/op/__init__.py +11 -0
  38. llmflow/op/react/react_v1_op.py → flowllm/op/agent/react_op.py +17 -22
  39. flowllm/op/akshare/__init__.py +3 -0
  40. flowllm/op/akshare/get_ak_a_code_op.py +108 -0
  41. flowllm/op/akshare/get_ak_a_code_prompt.yaml +21 -0
  42. flowllm/op/akshare/get_ak_a_info_op.py +140 -0
  43. flowllm/op/base_llm_op.py +64 -0
  44. flowllm/op/base_op.py +148 -0
  45. flowllm/op/base_ray_op.py +313 -0
  46. flowllm/op/code/__init__.py +1 -0
  47. flowllm/op/code/execute_code_op.py +42 -0
  48. flowllm/op/gallery/__init__.py +2 -0
  49. flowllm/op/gallery/mock_op.py +42 -0
  50. flowllm/op/gallery/terminate_op.py +29 -0
  51. flowllm/op/parallel_op.py +23 -0
  52. flowllm/op/search/__init__.py +3 -0
  53. flowllm/op/search/dashscope_deep_research_op.py +260 -0
  54. flowllm/op/search/dashscope_search_op.py +179 -0
  55. flowllm/op/search/dashscope_search_prompt.yaml +13 -0
  56. flowllm/op/search/tavily_search_op.py +102 -0
  57. flowllm/op/sequential_op.py +21 -0
  58. flowllm/schema/flow_request.py +12 -0
  59. flowllm/schema/flow_response.py +12 -0
  60. flowllm/schema/message.py +35 -0
  61. flowllm/schema/service_config.py +72 -0
  62. flowllm/schema/tool_call.py +118 -0
  63. {llmflow → flowllm}/schema/vector_node.py +1 -0
  64. flowllm/service/__init__.py +3 -0
  65. flowllm/service/base_service.py +68 -0
  66. flowllm/service/cmd_service.py +15 -0
  67. flowllm/service/http_service.py +79 -0
  68. flowllm/service/mcp_service.py +47 -0
  69. flowllm/storage/__init__.py +1 -0
  70. flowllm/storage/cache/__init__.py +1 -0
  71. flowllm/storage/cache/cache_data_handler.py +104 -0
  72. flowllm/storage/cache/data_cache.py +375 -0
  73. flowllm/storage/vector_store/__init__.py +3 -0
  74. flowllm/storage/vector_store/base_vector_store.py +44 -0
  75. {llmflow → flowllm/storage}/vector_store/chroma_vector_store.py +11 -10
  76. {llmflow → flowllm/storage}/vector_store/es_vector_store.py +11 -11
  77. llmflow/vector_store/file_vector_store.py → flowllm/storage/vector_store/local_vector_store.py +110 -11
  78. flowllm/utils/common_utils.py +52 -0
  79. flowllm/utils/fetch_url.py +117 -0
  80. flowllm/utils/llm_utils.py +28 -0
  81. flowllm/utils/ridge_v2.py +54 -0
  82. {llmflow → flowllm}/utils/timer.py +5 -4
  83. {flowllm-0.1.0.dist-info → flowllm-0.1.2.dist-info}/METADATA +45 -388
  84. flowllm-0.1.2.dist-info/RECORD +99 -0
  85. flowllm-0.1.2.dist-info/entry_points.txt +2 -0
  86. {flowllm-0.1.0.dist-info → flowllm-0.1.2.dist-info}/licenses/LICENSE +1 -1
  87. flowllm-0.1.2.dist-info/top_level.txt +1 -0
  88. flowllm-0.1.0.dist-info/RECORD +0 -66
  89. flowllm-0.1.0.dist-info/entry_points.txt +0 -3
  90. flowllm-0.1.0.dist-info/top_level.txt +0 -1
  91. llmflow/app.py +0 -53
  92. llmflow/config/config_parser.py +0 -80
  93. llmflow/config/mock_config.yaml +0 -58
  94. llmflow/embedding_model/__init__.py +0 -5
  95. llmflow/enumeration/agent_state.py +0 -8
  96. llmflow/llm/__init__.py +0 -5
  97. llmflow/llm/openai_compatible_llm.py +0 -283
  98. llmflow/mcp_server.py +0 -110
  99. llmflow/op/__init__.py +0 -10
  100. llmflow/op/base_op.py +0 -125
  101. llmflow/op/mock_op.py +0 -40
  102. llmflow/op/vector_store/__init__.py +0 -13
  103. llmflow/op/vector_store/recall_vector_store_op.py +0 -48
  104. llmflow/op/vector_store/update_vector_store_op.py +0 -28
  105. llmflow/op/vector_store/vector_store_action_op.py +0 -46
  106. llmflow/pipeline/pipeline.py +0 -94
  107. llmflow/pipeline/pipeline_context.py +0 -37
  108. llmflow/schema/app_config.py +0 -69
  109. llmflow/schema/experience.py +0 -144
  110. llmflow/schema/message.py +0 -68
  111. llmflow/schema/request.py +0 -32
  112. llmflow/schema/response.py +0 -29
  113. llmflow/service/__init__.py +0 -0
  114. llmflow/service/llmflow_service.py +0 -96
  115. llmflow/tool/__init__.py +0 -9
  116. llmflow/tool/base_tool.py +0 -80
  117. llmflow/tool/code_tool.py +0 -43
  118. llmflow/tool/dashscope_search_tool.py +0 -162
  119. llmflow/tool/mcp_tool.py +0 -77
  120. llmflow/tool/tavily_search_tool.py +0 -109
  121. llmflow/tool/terminate_tool.py +0 -23
  122. llmflow/utils/__init__.py +0 -0
  123. llmflow/utils/common_utils.py +0 -17
  124. llmflow/utils/file_handler.py +0 -25
  125. llmflow/utils/http_client.py +0 -156
  126. llmflow/utils/op_utils.py +0 -102
  127. llmflow/utils/registry.py +0 -33
  128. llmflow/vector_store/__init__.py +0 -7
  129. llmflow/vector_store/base_vector_store.py +0 -136
  130. {llmflow → flowllm/context}/__init__.py +0 -0
  131. {llmflow/config → flowllm/enumeration}/__init__.py +0 -0
  132. {llmflow → flowllm}/enumeration/chunk_enum.py +0 -0
  133. {llmflow → flowllm}/enumeration/http_enum.py +0 -0
  134. {llmflow → flowllm}/enumeration/role.py +0 -0
  135. {llmflow/enumeration → flowllm/flow/parser}/__init__.py +0 -0
  136. {llmflow/op/react → flowllm/op/agent}/__init__.py +0 -0
  137. /llmflow/op/react/react_v1_prompt.yaml → /flowllm/op/agent/react_prompt.yaml +0 -0
  138. {llmflow/pipeline → flowllm/schema}/__init__.py +0 -0
  139. {llmflow/schema → flowllm/utils}/__init__.py +0 -0
  140. {llmflow → flowllm}/utils/singleton.py +0 -0
  141. {flowllm-0.1.0.dist-info → flowllm-0.1.2.dist-info}/WHEEL +0 -0
@@ -0,0 +1,102 @@
1
+ import json
2
+ import os
3
+ import time
4
+ from typing import Literal
5
+
6
+ from loguru import logger
7
+ from tavily import TavilyClient
8
+
9
+ from flowllm.context.flow_context import FlowContext
10
+ from flowllm.context.service_context import C
11
+ from flowllm.op.base_op import BaseOp
12
+ from flowllm.storage.cache.data_cache import DataCache
13
+
14
+
15
+ @C.register_op()
16
+ class TavilySearchOp(BaseOp):
17
+ def __init__(self,
18
+ enable_print: bool = True,
19
+ enable_cache: bool = False,
20
+ cache_path: str = "./web_search_cache",
21
+ cache_expire_hours: float = 0.1,
22
+ topic: Literal["general", "news", "finance"] = "general",
23
+ max_retries: int = 3,
24
+ return_only_content: bool = True,
25
+ **kwargs):
26
+ super().__init__(**kwargs)
27
+
28
+ self.enable_print = enable_print
29
+ self.enable_cache = enable_cache
30
+ self.cache_expire_hours = cache_expire_hours
31
+ self.topic = topic
32
+ self.max_retries = max_retries
33
+ self.return_only_content = return_only_content
34
+
35
+ # Initialize DataCache if caching is enabled
36
+ self.cache = DataCache(cache_path) if self.enable_cache else None
37
+ self._client = TavilyClient(api_key=os.getenv("FLOW_TAVILY_API_KEY", ""))
38
+
39
+ def post_process(self, response):
40
+ if self.enable_print:
41
+ logger.info("response=\n" + json.dumps(response, indent=2, ensure_ascii=False))
42
+
43
+ return response
44
+
45
+ def execute(self):
46
+ # Get query from context
47
+ query: str = self.context.query
48
+
49
+ # Check cache first
50
+ if self.enable_cache and self.cache:
51
+ cached_result = self.cache.load(query)
52
+ if cached_result:
53
+ final_result = self.post_process(cached_result)
54
+ if self.return_only_content:
55
+ self.context.tavily_search_result = json.dumps(final_result, ensure_ascii=False, indent=2)
56
+ else:
57
+ self.context.tavily_search_result = final_result
58
+ return
59
+
60
+ for i in range(self.max_retries):
61
+ try:
62
+ response = self._client.search(query=query, topic=self.topic)
63
+ url_info_dict = {item["url"]: item for item in response["results"]}
64
+ response_extract = self._client.extract(urls=[item["url"] for item in response["results"]],
65
+ format="text")
66
+
67
+ final_result = {}
68
+ for item in response_extract["results"]:
69
+ url = item["url"]
70
+ final_result[url] = url_info_dict[url]
71
+ final_result[url]["raw_content"] = item["raw_content"]
72
+
73
+ # Cache the result if enabled
74
+ if self.enable_cache and self.cache:
75
+ self.cache.save(query, final_result, expire_hours=self.cache_expire_hours)
76
+
77
+ final_result = self.post_process(final_result)
78
+
79
+ if self.return_only_content:
80
+ self.context.tavily_search_result = json.dumps(final_result, ensure_ascii=False, indent=2)
81
+ else:
82
+ self.context.tavily_search_result = final_result
83
+ return
84
+
85
+ except Exception as e:
86
+ logger.exception(f"tavily search with query={query} encounter error with e={e.args}")
87
+ time.sleep(i + 1)
88
+
89
+ self.context.tavily_search_result = "tavily search failed!"
90
+
91
+
92
+ if __name__ == "__main__":
93
+ from flowllm.utils.common_utils import load_env
94
+
95
+ load_env()
96
+
97
+ C.set_default_service_config().init_by_service_config()
98
+
99
+ op = TavilySearchOp(enable_cache=True)
100
+ context = FlowContext(query="A股医药为什么一直涨")
101
+ op(context=context)
102
+ print(context.tavily_search_result)
@@ -0,0 +1,21 @@
1
+ from typing import List
2
+
3
+ from flowllm.op.base_op import BaseOp
4
+
5
+
6
+ class SequentialOp(BaseOp):
7
+
8
+ def __init__(self, ops: List[BaseOp], **kwargs):
9
+ super().__init__(**kwargs)
10
+ self.ops = ops
11
+
12
+ def execute(self):
13
+ for op in self.ops:
14
+ op.__call__(self.context)
15
+
16
+ def __rshift__(self, op: BaseOp):
17
+ if isinstance(op, SequentialOp):
18
+ self.ops.extend(op.ops)
19
+ else:
20
+ self.ops.append(op)
21
+ return self
@@ -0,0 +1,12 @@
1
+ from typing import List
2
+
3
+ from pydantic import Field, BaseModel
4
+
5
+ from flowllm.schema.message import Message
6
+
7
+
8
+ class FlowRequest(BaseModel, extra="allow"):
9
+ query: str = Field(default="")
10
+ messages: List[Message] = Field(default_factory=list)
11
+ workspace_id: str = Field(default="")
12
+ metadata: dict = Field(default_factory=dict)
@@ -0,0 +1,12 @@
1
+ from typing import List
2
+
3
+ from pydantic import Field, BaseModel
4
+
5
+ from flowllm.schema.message import Message
6
+
7
+
8
+ class FlowResponse(BaseModel):
9
+ answer: str = Field(default="")
10
+ messages: List[Message] = Field(default_factory=list)
11
+ success: bool = Field(default=True)
12
+ metadata: dict = Field(default_factory=dict)
@@ -0,0 +1,35 @@
1
+ from typing import List
2
+
3
+ from pydantic import BaseModel, Field
4
+
5
+ from flowllm.enumeration.role import Role
6
+ from flowllm.schema.tool_call import ToolCall
7
+
8
+
9
+ class Message(BaseModel):
10
+ role: Role = Field(default=Role.USER)
11
+ content: str | bytes = Field(default="")
12
+ reasoning_content: str = Field(default="")
13
+ tool_calls: List[ToolCall] = Field(default_factory=list)
14
+ tool_call_id: str = Field(default="")
15
+ metadata: dict = Field(default_factory=dict)
16
+
17
+ def simple_dump(self, add_reason_content: bool = True) -> dict:
18
+ result: dict
19
+ if self.content:
20
+ result = {"role": self.role.value, "content": self.content}
21
+ elif add_reason_content and self.reasoning_content:
22
+ result = {"role": self.role.value, "content": self.reasoning_content}
23
+ else:
24
+ result = {"role": self.role.value, "content": ""}
25
+
26
+ if self.tool_calls:
27
+ result["tool_calls"] = [x.simple_output_dump() for x in self.tool_calls]
28
+ return result
29
+
30
+
31
+ class Trajectory(BaseModel):
32
+ task_id: str = Field(default="")
33
+ messages: List[Message] = Field(default_factory=list)
34
+ score: float = Field(default=0.0)
35
+ metadata: dict = Field(default_factory=dict)
@@ -0,0 +1,72 @@
1
+ from typing import Dict
2
+
3
+ from pydantic import BaseModel, Field
4
+
5
+ from flowllm.schema.tool_call import ToolCall
6
+
7
+
8
+ class MCPConfig(BaseModel):
9
+ transport: str = Field(default="", description="stdio/http/sse/streamable-http")
10
+ host: str = Field(default="0.0.0.0")
11
+ port: int = Field(default=8001)
12
+
13
+
14
+ class HttpConfig(BaseModel):
15
+ host: str = Field(default="0.0.0.0")
16
+ port: int = Field(default=8001)
17
+ timeout_keep_alive: int = Field(default=3600)
18
+ limit_concurrency: int = Field(default=64)
19
+
20
+
21
+ class CmdConfig(BaseModel):
22
+ flow: str = Field(default="")
23
+ params: dict = Field(default_factory=dict)
24
+
25
+
26
+ class FlowConfig(ToolCall):
27
+ flow_content: str = Field(default="")
28
+ service_type: str = Field(default="all", description="all/http/mcp/cmd")
29
+
30
+ class OpConfig(BaseModel):
31
+ backend: str = Field(default="")
32
+ language: str = Field(default="")
33
+ raise_exception: bool = Field(default=True)
34
+ prompt_path: str = Field(default="")
35
+ llm: str = Field(default="")
36
+ embedding_model: str = Field(default="")
37
+ vector_store: str = Field(default="")
38
+ params: dict = Field(default_factory=dict)
39
+
40
+
41
+ class LLMConfig(BaseModel):
42
+ backend: str = Field(default="")
43
+ model_name: str = Field(default="")
44
+ params: dict = Field(default_factory=dict)
45
+
46
+
47
+ class EmbeddingModelConfig(BaseModel):
48
+ backend: str = Field(default="")
49
+ model_name: str = Field(default="")
50
+ params: dict = Field(default_factory=dict)
51
+
52
+
53
+ class VectorStoreConfig(BaseModel):
54
+ backend: str = Field(default="")
55
+ embedding_model: str = Field(default="")
56
+ params: dict = Field(default_factory=dict)
57
+
58
+
59
+ class ServiceConfig(BaseModel):
60
+ backend: str = Field(default="")
61
+ language: str = Field(default="")
62
+ thread_pool_max_workers: int = Field(default=16)
63
+ ray_max_workers: int = Field(default=1)
64
+
65
+ cmd: CmdConfig = Field(default_factory=CmdConfig)
66
+ mcp: MCPConfig = Field(default_factory=MCPConfig)
67
+ http: HttpConfig = Field(default_factory=HttpConfig)
68
+ flow: Dict[str, FlowConfig] = Field(default_factory=dict)
69
+ op: Dict[str, OpConfig] = Field(default_factory=dict)
70
+ llm: Dict[str, LLMConfig] = Field(default_factory=dict)
71
+ embedding_model: Dict[str, EmbeddingModelConfig] = Field(default_factory=dict)
72
+ vector_store: Dict[str, VectorStoreConfig] = Field(default_factory=dict)
@@ -0,0 +1,118 @@
1
+ import json
2
+ from typing import Dict, List
3
+
4
+ from pydantic import BaseModel, Field
5
+
6
+
7
+ class ParamAttrs(BaseModel):
8
+ type: str = Field(default="str", description="tool parameter type")
9
+ description: str = Field(default="", description="tool parameter description")
10
+ required: bool = Field(default=True, description="tool parameter required")
11
+ enum: List[str] | None = Field(default=None, description="tool parameter enum")
12
+
13
+ def simple_dump(self) -> dict:
14
+ result: dict = {
15
+ "type": self.type,
16
+ "description": self.description,
17
+ }
18
+
19
+ if self.enum:
20
+ result["enum"] = self.enum
21
+
22
+ return result
23
+
24
+ class ToolCall(BaseModel):
25
+ """
26
+ input:
27
+ {
28
+ "type": "function",
29
+ "function": {
30
+ "name": "get_current_weather",
31
+ "description": "It is very useful when you want to check the weather of a specified city.",
32
+ "parameters": {
33
+ "type": "object",
34
+ "properties": {
35
+ "location": {
36
+ "type": "string",
37
+ "description": "Cities or counties, such as Beijing, Hangzhou, Yuhang District, etc.",
38
+ }
39
+ },
40
+ "required": ["location"]
41
+ }
42
+ }
43
+ }
44
+ output:
45
+ {
46
+ "index": 0
47
+ "id": "call_6596dafa2a6a46f7a217da",
48
+ "function": {
49
+ "arguments": "{\"location\": \"Beijing\"}",
50
+ "name": "get_current_weather"
51
+ },
52
+ "type": "function",
53
+ }
54
+ """
55
+
56
+ index: int = Field(default=0)
57
+ id: str = Field(default="")
58
+ type: str = Field(default="function")
59
+ name: str = Field(default="")
60
+
61
+ arguments: dict = Field(default_factory=dict, description="tool execution arguments")
62
+
63
+ description: str = Field(default="")
64
+ input_schema: Dict[str, ParamAttrs] = Field(default_factory=dict)
65
+ output_schema: Dict[str, ParamAttrs] = Field(default_factory=dict)
66
+
67
+ def simple_input_dump(self, version: str = "v1") -> dict:
68
+ if version == "v1":
69
+ required_list = [name for name, tool_param in self.input_schema.items() if tool_param.required]
70
+ properties = {name: tool_param.simple_dump() for name, tool_param in self.input_schema.items()}
71
+
72
+ return {
73
+ "type": self.type,
74
+ self.type: {
75
+ "name": self.name,
76
+ "description": self.description,
77
+ "parameters": {
78
+ "type": "object",
79
+ "properties": properties,
80
+ "required": required_list
81
+ },
82
+ },
83
+ }
84
+
85
+ else:
86
+ raise NotImplementedError(f"version {version} not supported")
87
+
88
+ def simple_output_dump(self, version: str = "v1") -> dict:
89
+ if version == "v1":
90
+ return {
91
+ "index": self.index,
92
+ "id": self.id,
93
+ self.type: {
94
+ "arguments": json.dumps(self.arguments, ensure_ascii=False),
95
+ "name": self.name
96
+ },
97
+ "type": self.type,
98
+ }
99
+ else:
100
+ raise NotImplementedError(f"version {version} not supported")
101
+
102
+ def update_by_output(self, data: dict, version: str = "v1"):
103
+ if version == "v1":
104
+ self.index = data.get("index", 0)
105
+ self.id = data.get("id", "")
106
+ tool_type = data.get("type", "")
107
+ tool_type_dict = data.get(tool_type, {})
108
+ if tool_type_dict:
109
+ name = tool_type_dict.get("name", "")
110
+ arguments = tool_type_dict.get("arguments", "")
111
+ if name:
112
+ self.name = name
113
+ if arguments:
114
+ self.arguments = json.loads(arguments)
115
+ else:
116
+ raise NotImplementedError(f"version {version} not supported")
117
+
118
+ return self
@@ -1,5 +1,6 @@
1
1
  from typing import List
2
2
  from uuid import uuid4
3
+
3
4
  from pydantic import BaseModel, Field
4
5
 
5
6
 
@@ -0,0 +1,3 @@
1
+ from .cmd_service import CmdService
2
+ from .http_service import HttpService
3
+ from .mcp_service import MCPService
@@ -0,0 +1,68 @@
1
+ from abc import abstractmethod, ABC
2
+ from typing import Dict, Optional
3
+
4
+ from loguru import logger
5
+ from pydantic import create_model, Field
6
+
7
+ from flowllm.config.pydantic_config_parser import PydanticConfigParser
8
+ from flowllm.context.service_context import C
9
+ from flowllm.schema.flow_request import FlowRequest
10
+ from flowllm.schema.service_config import ServiceConfig
11
+ from flowllm.schema.tool_call import ParamAttrs
12
+ from flowllm.utils.common_utils import snake_to_camel
13
+
14
+
15
+ class BaseService(ABC):
16
+ TYPE_MAPPING = {
17
+ "str": str,
18
+ "int": int,
19
+ "float": float,
20
+ "bool": bool,
21
+ "list": list,
22
+ "dict": dict,
23
+ }
24
+
25
+ def __init__(self, service_config: ServiceConfig):
26
+ self.service_config = service_config
27
+
28
+ self.mcp_config = self.service_config.mcp
29
+ self.http_config = self.service_config.http
30
+ C.init_by_service_config(self.service_config)
31
+
32
+ @classmethod
33
+ def get_service(cls, *args, parser: type[PydanticConfigParser] = PydanticConfigParser) -> "BaseService":
34
+ config_parser = parser(ServiceConfig)
35
+ service_config: ServiceConfig = config_parser.parse_args(*args)
36
+ service_cls = C.resolve_service(service_config.backend)
37
+ return service_cls(service_config)
38
+
39
+ def _create_pydantic_model(self, flow_name: str, input_schema: Dict[str, ParamAttrs]):
40
+ fields = {}
41
+
42
+ for param_name, param_config in input_schema.items():
43
+ field_type = self.TYPE_MAPPING.get(param_config.type, str)
44
+
45
+ if not param_config.required:
46
+ fields[param_name] = (Optional[field_type], Field(default=None, description=param_config.description))
47
+ else:
48
+ fields[param_name] = (field_type, Field(default=..., description=param_config.description))
49
+
50
+ return create_model(f"{snake_to_camel(flow_name)}Model", __base__=FlowRequest, **fields)
51
+
52
+ def integrate_tool_flow(self, tool_flow_name: str):
53
+ ...
54
+
55
+ def integrate_tool_flows(self):
56
+ for tool_flow_name in C.tool_flow_names:
57
+ self.integrate_tool_flow(tool_flow_name)
58
+ logger.info(f"integrate flow_endpoint={tool_flow_name}")
59
+
60
+ def __enter__(self):
61
+ return self
62
+
63
+ def __exit__(self, exc_type, exc_val, exc_tb):
64
+ ...
65
+
66
+ @abstractmethod
67
+ def __call__(self):
68
+ ...
@@ -0,0 +1,15 @@
1
+ from loguru import logger
2
+
3
+ from flowllm.context.service_context import C
4
+ from flowllm.flow.gallery import CmdFlow
5
+ from flowllm.service.base_service import BaseService
6
+
7
+
8
+ @C.register_service("cmd")
9
+ class CmdService(BaseService):
10
+
11
+ def __call__(self):
12
+ flow = CmdFlow(flow=self.service_config.cmd.flow)
13
+ response = flow.__call__(**self.service_config.cmd.params)
14
+ if response.answer:
15
+ logger.info(f"final_answer={response.answer}")
@@ -0,0 +1,79 @@
1
+ import asyncio
2
+ from functools import partial
3
+
4
+ import uvicorn
5
+ from fastapi import FastAPI
6
+ from fastapi.middleware.cors import CORSMiddleware
7
+ from loguru import logger
8
+
9
+ from flowllm.context.service_context import C
10
+ from flowllm.flow.base_tool_flow import BaseToolFlow
11
+ from flowllm.schema.flow_response import FlowResponse
12
+ from flowllm.service.base_service import BaseService
13
+
14
+
15
+ @C.register_service("http")
16
+ class HttpService(BaseService):
17
+
18
+ def __init__(self, *args, **kwargs):
19
+ super().__init__(*args, **kwargs)
20
+ self.app = FastAPI(title="FlowLLM", description="HTTP API for FlowLLM")
21
+
22
+ # Add CORS middleware
23
+ self.app.add_middleware(
24
+ CORSMiddleware,
25
+ allow_origins=["*"],
26
+ allow_credentials=True,
27
+ allow_methods=["*"],
28
+ allow_headers=["*"],
29
+ )
30
+
31
+ # Add health check endpoint
32
+ self.app.get("/health")(self.health_check)
33
+
34
+ @staticmethod
35
+ def health_check():
36
+ return {"status": "healthy"}
37
+
38
+ def integrate_tool_flow(self, tool_flow_name: str):
39
+ tool_flow: BaseToolFlow = C.get_tool_flow(tool_flow_name)
40
+ request_model = self._create_pydantic_model(tool_flow_name, tool_flow.tool_call.input_schema)
41
+
42
+ async def execute_endpoint(request: request_model) -> FlowResponse:
43
+ loop = asyncio.get_event_loop()
44
+ response: FlowResponse = await loop.run_in_executor(
45
+ executor=C.thread_pool,
46
+ func=partial(tool_flow.__call__, **request.model_dump())) # noqa
47
+
48
+ return response
49
+
50
+ endpoint_path = f"/{tool_flow.name}"
51
+ self.app.post(endpoint_path, response_model=FlowResponse)(execute_endpoint)
52
+
53
+ def integrate_tool_flows(self):
54
+ super().integrate_tool_flows()
55
+
56
+ async def execute_endpoint() -> list:
57
+ loop = asyncio.get_event_loop()
58
+
59
+ def list_tool_flows() -> list:
60
+ tool_flow_schemas = []
61
+ for name, tool_flow in C.tool_flow_dict.items():
62
+ assert isinstance(tool_flow, BaseToolFlow)
63
+ tool_flow_schemas.append(tool_flow.tool_call.simple_input_dump())
64
+ return tool_flow_schemas
65
+
66
+ return await loop.run_in_executor(executor=C.thread_pool, func=list_tool_flows) # noqa
67
+
68
+ endpoint_path = "/list"
69
+ self.app.get(endpoint_path, response_model=list)(execute_endpoint)
70
+ logger.info(f"integrate endpoint={endpoint_path}")
71
+
72
+ def __call__(self):
73
+ self.integrate_tool_flows()
74
+
75
+ uvicorn.run(self.app,
76
+ host=self.http_config.host,
77
+ port=self.http_config.port,
78
+ timeout_keep_alive=self.http_config.timeout_keep_alive,
79
+ limit_concurrency=self.http_config.limit_concurrency)
@@ -0,0 +1,47 @@
1
+ import asyncio
2
+ from functools import partial
3
+
4
+ from fastmcp import FastMCP
5
+ from fastmcp.tools import FunctionTool
6
+
7
+ from flowllm.context.service_context import C
8
+ from flowllm.flow.base_tool_flow import BaseToolFlow
9
+ from flowllm.service.base_service import BaseService
10
+
11
+
12
+ @C.register_service("mcp")
13
+ class MCPService(BaseService):
14
+
15
+ def __init__(self, *args, **kwargs):
16
+ super().__init__(*args, **kwargs)
17
+ self.mcp = FastMCP(name="FlowLLM")
18
+
19
+ def integrate_tool_flow(self, tool_flow_name: str):
20
+ tool_flow: BaseToolFlow = C.get_tool_flow(tool_flow_name)
21
+ request_model = self._create_pydantic_model(tool_flow_name, tool_flow.tool_call.input_schema)
22
+
23
+ async def execute_flow_async(**kwargs) -> str:
24
+ request: request_model = request_model(**kwargs)
25
+ loop = asyncio.get_event_loop()
26
+ response = await loop.run_in_executor(
27
+ executor=C.thread_pool,
28
+ func=partial(tool_flow.__call__, **request.model_dump())) # noqa
29
+ return response.answer
30
+
31
+ tool = FunctionTool(name=tool_flow.name, # noqa
32
+ description=tool_flow.tool_call.description, # noqa
33
+ fn=execute_flow_async,
34
+ parameters=tool_flow.tool_call.input_schema)
35
+ self.mcp.add_tool(tool)
36
+
37
+ def __call__(self):
38
+ self.integrate_tool_flows()
39
+
40
+ if self.mcp_config.transport == "sse":
41
+ self.mcp.run(transport="sse", host=self.mcp_config.host, port=self.mcp_config.port)
42
+ if self.mcp_config.transport == "http":
43
+ self.mcp.run(transport="http", host=self.mcp_config.host, port=self.mcp_config.port)
44
+ elif self.mcp_config.transport == "stdio":
45
+ self.mcp.run(transport="stdio")
46
+ else:
47
+ raise ValueError(f"unsupported mcp transport: {self.mcp_config.transport}")
@@ -0,0 +1 @@
1
+ from flowllm.storage import vector_store
@@ -0,0 +1 @@
1
+ from .data_cache import DataCache