flowllm 0.1.1__py3-none-any.whl → 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. flowllm/__init__.py +15 -6
  2. flowllm/app.py +4 -14
  3. flowllm/client/__init__.py +25 -0
  4. flowllm/client/async_http_client.py +81 -0
  5. flowllm/client/http_client.py +81 -0
  6. flowllm/client/mcp_client.py +133 -0
  7. flowllm/client/sync_mcp_client.py +116 -0
  8. flowllm/config/__init__.py +1 -0
  9. flowllm/config/{default_config.yaml → default.yaml} +3 -8
  10. flowllm/config/empty.yaml +37 -0
  11. flowllm/config/pydantic_config_parser.py +17 -17
  12. flowllm/context/base_context.py +27 -7
  13. flowllm/context/flow_context.py +6 -18
  14. flowllm/context/registry.py +5 -1
  15. flowllm/context/service_context.py +81 -37
  16. flowllm/embedding_model/__init__.py +1 -1
  17. flowllm/embedding_model/base_embedding_model.py +91 -0
  18. flowllm/embedding_model/openai_compatible_embedding_model.py +63 -5
  19. flowllm/flow/__init__.py +1 -0
  20. flowllm/flow/base_flow.py +72 -0
  21. flowllm/flow/base_tool_flow.py +15 -0
  22. flowllm/flow/gallery/__init__.py +8 -0
  23. flowllm/flow/gallery/cmd_flow.py +11 -0
  24. flowllm/flow/gallery/code_tool_flow.py +30 -0
  25. flowllm/flow/gallery/dashscope_search_tool_flow.py +34 -0
  26. flowllm/flow/gallery/deepsearch_tool_flow.py +39 -0
  27. flowllm/flow/gallery/expression_tool_flow.py +18 -0
  28. flowllm/flow/gallery/mock_tool_flow.py +67 -0
  29. flowllm/flow/gallery/tavily_search_tool_flow.py +30 -0
  30. flowllm/flow/gallery/terminate_tool_flow.py +30 -0
  31. flowllm/flow/parser/__init__.py +0 -0
  32. flowllm/{flow_engine/simple_flow_engine.py → flow/parser/expression_parser.py} +25 -67
  33. flowllm/llm/__init__.py +2 -1
  34. flowllm/llm/base_llm.py +94 -4
  35. flowllm/llm/litellm_llm.py +455 -0
  36. flowllm/llm/openai_compatible_llm.py +205 -5
  37. flowllm/op/__init__.py +11 -3
  38. flowllm/op/agent/__init__.py +0 -0
  39. flowllm/op/agent/react_op.py +83 -0
  40. flowllm/op/agent/react_prompt.yaml +28 -0
  41. flowllm/op/akshare/__init__.py +3 -0
  42. flowllm/op/akshare/get_ak_a_code_op.py +14 -22
  43. flowllm/op/akshare/get_ak_a_info_op.py +17 -20
  44. flowllm/op/{llm_base_op.py → base_llm_op.py} +6 -5
  45. flowllm/op/base_op.py +14 -35
  46. flowllm/op/base_ray_op.py +313 -0
  47. flowllm/op/code/__init__.py +1 -0
  48. flowllm/op/code/execute_code_op.py +42 -0
  49. flowllm/op/gallery/__init__.py +2 -0
  50. flowllm/op/{mock_op.py → gallery/mock_op.py} +4 -4
  51. flowllm/op/gallery/terminate_op.py +29 -0
  52. flowllm/op/parallel_op.py +2 -9
  53. flowllm/op/search/__init__.py +3 -0
  54. flowllm/op/search/dashscope_deep_research_op.py +260 -0
  55. flowllm/op/search/dashscope_search_op.py +179 -0
  56. flowllm/op/search/dashscope_search_prompt.yaml +13 -0
  57. flowllm/op/search/tavily_search_op.py +102 -0
  58. flowllm/op/sequential_op.py +1 -9
  59. flowllm/schema/flow_request.py +12 -0
  60. flowllm/schema/service_config.py +12 -16
  61. flowllm/schema/tool_call.py +13 -5
  62. flowllm/schema/vector_node.py +1 -0
  63. flowllm/service/__init__.py +3 -2
  64. flowllm/service/base_service.py +50 -41
  65. flowllm/service/cmd_service.py +15 -0
  66. flowllm/service/http_service.py +34 -42
  67. flowllm/service/mcp_service.py +13 -11
  68. flowllm/storage/cache/__init__.py +1 -0
  69. flowllm/storage/cache/cache_data_handler.py +104 -0
  70. flowllm/{utils/dataframe_cache.py → storage/cache/data_cache.py} +136 -92
  71. flowllm/storage/vector_store/__init__.py +3 -3
  72. flowllm/storage/vector_store/es_vector_store.py +1 -2
  73. flowllm/storage/vector_store/local_vector_store.py +0 -1
  74. flowllm/utils/common_utils.py +9 -21
  75. flowllm/utils/fetch_url.py +16 -12
  76. flowllm/utils/llm_utils.py +28 -0
  77. flowllm/utils/ridge_v2.py +54 -0
  78. {flowllm-0.1.1.dist-info → flowllm-0.1.2.dist-info}/METADATA +43 -390
  79. flowllm-0.1.2.dist-info/RECORD +99 -0
  80. flowllm-0.1.2.dist-info/entry_points.txt +2 -0
  81. flowllm/flow_engine/__init__.py +0 -1
  82. flowllm/flow_engine/base_flow_engine.py +0 -34
  83. flowllm-0.1.1.dist-info/RECORD +0 -62
  84. flowllm-0.1.1.dist-info/entry_points.txt +0 -4
  85. {flowllm-0.1.1.dist-info → flowllm-0.1.2.dist-info}/WHEEL +0 -0
  86. {flowllm-0.1.1.dist-info → flowllm-0.1.2.dist-info}/licenses/LICENSE +0 -0
  87. {flowllm-0.1.1.dist-info → flowllm-0.1.2.dist-info}/top_level.txt +0 -0
@@ -2,27 +2,15 @@ import uuid
2
2
 
3
3
  from flowllm.context.base_context import BaseContext
4
4
  from flowllm.schema.flow_response import FlowResponse
5
- from flowllm.schema.service_config import ServiceConfig
6
5
 
7
6
 
8
7
  class FlowContext(BaseContext):
9
8
 
10
- def __init__(self, flow_id: str = uuid.uuid4().hex, **kwargs):
9
+ def __init__(self,
10
+ flow_id: str = uuid.uuid4().hex,
11
+ response: FlowResponse = None,
12
+ **kwargs):
11
13
  super().__init__(**kwargs)
12
- self.flow_id: str = flow_id
13
-
14
- @property
15
- def service_config(self) -> ServiceConfig:
16
- return self._data.get("service_config")
17
-
18
- @service_config.setter
19
- def service_config(self, service_config: ServiceConfig):
20
- self._data["service_config"] = service_config
21
14
 
22
- @property
23
- def response(self) -> FlowResponse:
24
- return self._data.get("response")
25
-
26
- @response.setter
27
- def response(self, response: FlowResponse):
28
- self._data["response"] = response
15
+ self.flow_id: str = flow_id
16
+ self.response: FlowResponse = FlowResponse() if response is None else response
@@ -6,13 +6,17 @@ from flowllm.utils.common_utils import camel_to_snake
6
6
 
7
7
  class Registry(BaseContext):
8
8
 
9
- def __init__(self, registry_name: str, enable_log: bool = True, **kwargs):
9
+ def __init__(self, registry_name: str, enable_log: bool = True, register_flow_module: bool = True, **kwargs):
10
10
  super().__init__(**kwargs)
11
11
  self.registry_name: str = registry_name
12
12
  self.enable_log: bool = enable_log
13
+ self.register_flow_module: bool = register_flow_module
13
14
 
14
15
  def register(self, name: str = ""):
15
16
  def decorator(cls):
17
+ if not self.register_flow_module and cls.__module__.startswith("flowllm"):
18
+ return cls
19
+
16
20
  class_name = name if name else camel_to_snake(cls.__name__)
17
21
  if self.enable_log:
18
22
  if class_name in self._data:
@@ -1,9 +1,15 @@
1
+ import os
1
2
  import uuid
2
3
  from concurrent.futures import ThreadPoolExecutor
3
- from typing import Dict
4
+ from inspect import isclass
5
+ from typing import Dict, List
6
+
7
+ import ray
8
+ from loguru import logger
4
9
 
5
10
  from flowllm.context.base_context import BaseContext
6
11
  from flowllm.context.registry import Registry
12
+ from flowllm.schema.service_config import ServiceConfig, EmbeddingModelConfig
7
13
  from flowllm.utils.singleton import singleton
8
14
 
9
15
 
@@ -12,41 +18,80 @@ class ServiceContext(BaseContext):
12
18
 
13
19
  def __init__(self, service_id: str = uuid.uuid4().hex, **kwargs):
14
20
  super().__init__(**kwargs)
15
- self.service_id: str = service_id
16
- self.registry_dict: Dict[str, Registry] = \
17
- {k: Registry(k) for k in ["embedding_model", "llm", "vector_store", "op", "flow_engine", "service"]}
18
-
19
- @property
20
- def language(self) -> str:
21
- return self._data.get("language", "")
22
-
23
- @language.setter
24
- def language(self, value: str):
25
- self._data["language"] = value
26
-
27
- @property
28
- def thread_pool(self) -> ThreadPoolExecutor:
29
- return self._data["thread_pool"]
30
21
 
31
- @thread_pool.setter
32
- def thread_pool(self, thread_pool: ThreadPoolExecutor):
33
- self._data["thread_pool"] = thread_pool
22
+ self.service_id: str = service_id
23
+ self.service_config: ServiceConfig | None = None
24
+ self.language: str = ""
25
+ self.thread_pool: ThreadPoolExecutor | None = None
26
+ self.vector_store_dict: dict = {}
27
+
28
+ self.registry_dict: Dict[str, Registry] = {}
29
+ use_framework: bool = os.environ.get("FLOW_USE_FRAMEWORK", "").lower() == "true"
30
+ for key in ["embedding_model", "llm", "vector_store", "op", "tool_flow", "service"]:
31
+ enable_log = True
32
+ register_flow_module = True
33
+
34
+ if use_framework:
35
+ enable_log = False
36
+ if key in ["op", "tool_flow"]:
37
+ register_flow_module = False
38
+ self.registry_dict[key] = Registry(key, enable_log=enable_log, register_flow_module=register_flow_module)
39
+
40
+ self.tool_flow_dict: dict = {}
41
+
42
+ def set_default_service_config(self):
43
+ from flowllm.config.pydantic_config_parser import PydanticConfigParser
44
+
45
+ config_parser = PydanticConfigParser(ServiceConfig)
46
+ self.service_config = config_parser.parse_args("config=default")
47
+ return self
48
+
49
+ def init_by_service_config(self, service_config: ServiceConfig = None):
50
+ if service_config:
51
+ self.service_config = service_config
52
+
53
+ self.language = self.service_config.language
54
+ self.thread_pool = ThreadPoolExecutor(max_workers=self.service_config.thread_pool_max_workers)
55
+ if self.service_config.ray_max_workers > 1:
56
+ ray.init(num_cpus=self.service_config.ray_max_workers)
57
+
58
+ # add vector store
59
+ for name, config in self.service_config.vector_store.items():
60
+ vector_store_cls = self.resolve_vector_store(config.backend)
61
+ embedding_model_config: EmbeddingModelConfig = self.service_config.embedding_model[config.embedding_model]
62
+ embedding_model_cls = self.resolve_embedding_model(embedding_model_config.backend)
63
+ embedding_model = embedding_model_cls(model_name=embedding_model_config.model_name,
64
+ **embedding_model_config.params)
65
+ self.vector_store_dict[name] = vector_store_cls(embedding_model=embedding_model, **config.params)
66
+
67
+ from flowllm.flow.base_tool_flow import BaseToolFlow
68
+ from flowllm.flow.gallery import ExpressionToolFlow
69
+
70
+ # add tool flow cls
71
+ for name, tool_flow_cls in self.registry_dict["tool_flow"].items():
72
+ if not isclass(tool_flow_cls):
73
+ continue
74
+
75
+ tool_flow: BaseToolFlow = tool_flow_cls()
76
+ self.tool_flow_dict[tool_flow.name] = tool_flow
77
+ logger.info(f"add diy tool_flow: {tool_flow.name}")
78
+
79
+ # add tool flow config
80
+ for name, flow_config in self.service_config.flow.items():
81
+ flow_config.name = name
82
+ tool_flow: BaseToolFlow = ExpressionToolFlow(flow_config=flow_config)
83
+ self.tool_flow_dict[tool_flow.name] = tool_flow
84
+ logger.info(f"add expression tool_flow:{tool_flow.name}")
34
85
 
35
86
  def get_vector_store(self, name: str = "default"):
36
- vector_store_dict: dict = self._data["vector_store_dict"]
37
- if name not in vector_store_dict:
38
- raise KeyError(f"vector store {name} not found")
39
-
40
- return vector_store_dict[name]
87
+ return self.vector_store_dict[name]
41
88
 
42
- def set_vector_store(self, name: str, vector_store):
43
- if "vector_store_dict" not in self._data:
44
- self.set_vector_stores({})
89
+ def get_tool_flow(self, name: str = "default"):
90
+ return self.tool_flow_dict[name]
45
91
 
46
- self._data["vector_store_dict"][name] = vector_store
47
-
48
- def set_vector_stores(self, vector_store_dict: dict):
49
- self._data["vector_store_dict"] = vector_store_dict
92
+ @property
93
+ def tool_flow_names(self) -> List[str]:
94
+ return sorted(self.tool_flow_dict.keys())
50
95
 
51
96
  """
52
97
  register models
@@ -64,8 +109,8 @@ class ServiceContext(BaseContext):
64
109
  def register_op(self, name: str = ""):
65
110
  return self.registry_dict["op"].register(name=name)
66
111
 
67
- def register_flow_engine(self, name: str = ""):
68
- return self.registry_dict["flow_engine"].register(name=name)
112
+ def register_tool_flow(self, name: str = ""):
113
+ return self.registry_dict["tool_flow"].register(name=name)
69
114
 
70
115
  def register_service(self, name: str = ""):
71
116
  return self.registry_dict["service"].register(name=name)
@@ -90,14 +135,13 @@ class ServiceContext(BaseContext):
90
135
  assert name in self.registry_dict["op"], f"op={name} not found!"
91
136
  return self.registry_dict["op"][name]
92
137
 
93
- def resolve_flow_engine(self, name: str):
94
- assert name in self.registry_dict["flow_engine"], f"flow={name} not found!"
95
- return self.registry_dict["flow_engine"][name]
138
+ def resolve_tool_flow(self, name: str):
139
+ assert name in self.registry_dict["tool_flow"], f"tool_flow={name} not found!"
140
+ return self.registry_dict["tool_flow"][name]
96
141
 
97
142
  def resolve_service(self, name: str):
98
143
  assert name in self.registry_dict["service"], f"service={name} not found!"
99
144
  return self.registry_dict["service"][name]
100
145
 
101
146
 
102
-
103
147
  C = ServiceContext()
@@ -1 +1 @@
1
- from flowllm.embedding_model.openai_compatible_embedding_model import OpenAICompatibleEmbeddingModel
1
+ from .openai_compatible_embedding_model import OpenAICompatibleEmbeddingModel
@@ -1,3 +1,4 @@
1
+ import asyncio
1
2
  from abc import ABC
2
3
  from typing import List
3
4
 
@@ -37,6 +38,21 @@ class BaseEmbeddingModel(BaseModel, ABC):
37
38
  """
38
39
  raise NotImplementedError
39
40
 
41
+ async def _get_embeddings_async(self, input_text: str | List[str]):
42
+ """
43
+ Abstract async method to get embeddings from the model.
44
+
45
+ This method must be implemented by concrete subclasses to provide
46
+ the actual async embedding functionality.
47
+
48
+ Args:
49
+ input_text: Single text string or list of text strings to embed
50
+
51
+ Returns:
52
+ Embedding vector(s) corresponding to the input text(s)
53
+ """
54
+ raise NotImplementedError
55
+
40
56
  def get_embeddings(self, input_text: str | List[str]):
41
57
  """
42
58
  Get embeddings with retry logic and error handling.
@@ -64,6 +80,33 @@ class BaseEmbeddingModel(BaseModel, ABC):
64
80
  # Return None if all retries failed and raise_exception is False
65
81
  return None
66
82
 
83
+ async def get_embeddings_async(self, input_text: str | List[str]):
84
+ """
85
+ Get embeddings asynchronously with retry logic and error handling.
86
+
87
+ This method wraps the _get_embeddings_async method with automatic retry
88
+ functionality in case of failures.
89
+
90
+ Args:
91
+ input_text: Single text string or list of text strings to embed
92
+
93
+ Returns:
94
+ Embedding vector(s) or None if all retries failed and raise_exception is False
95
+ """
96
+ # Retry loop with exponential backoff potential
97
+ for i in range(self.max_retries):
98
+ try:
99
+ return await self._get_embeddings_async(input_text)
100
+
101
+ except Exception as e:
102
+ logger.exception(f"embedding model name={self.model_name} encounter error with e={e.args}")
103
+ # If this is the last retry and raise_exception is True, re-raise the exception
104
+ if i == self.max_retries - 1 and self.raise_exception:
105
+ raise e
106
+
107
+ # Return None if all retries failed and raise_exception is False
108
+ return None
109
+
67
110
  def get_node_embeddings(self, nodes: VectorNode | List[VectorNode]):
68
111
  """
69
112
  Generate embeddings for VectorNode objects and update their vector fields.
@@ -102,3 +145,51 @@ class BaseEmbeddingModel(BaseModel, ABC):
102
145
 
103
146
  else:
104
147
  raise TypeError(f"unsupported type={type(nodes)}")
148
+
149
+ async def get_node_embeddings_async(self, nodes: VectorNode | List[VectorNode]):
150
+ """
151
+ Generate embeddings asynchronously for VectorNode objects and update their vector fields.
152
+
153
+ This method handles both single nodes and lists of nodes, with automatic
154
+ batching for efficient processing of large node lists.
155
+
156
+ Args:
157
+ nodes: Single VectorNode or list of VectorNode objects to embed
158
+
159
+ Returns:
160
+ The same node(s) with updated vector fields containing embeddings
161
+
162
+ Raises:
163
+ RuntimeError: If unsupported node type is provided
164
+ """
165
+ # Handle single VectorNode
166
+ if isinstance(nodes, VectorNode):
167
+ nodes.vector = await self.get_embeddings_async(nodes.content)
168
+ return nodes
169
+
170
+ # Handle list of VectorNodes with batch processing
171
+ elif isinstance(nodes, list):
172
+ # Process nodes in batches to respect max_batch_size limits
173
+ batch_tasks = []
174
+ for i in range(0, len(nodes), self.max_batch_size):
175
+ batch_nodes = nodes[i:i + self.max_batch_size]
176
+ batch_content = [node.content for node in batch_nodes]
177
+ batch_tasks.append(self.get_embeddings_async(batch_content))
178
+
179
+ # Execute all batch tasks concurrently
180
+ batch_results = await asyncio.gather(*batch_tasks)
181
+
182
+ # Flatten the results
183
+ embeddings = [emb for batch_result in batch_results for emb in batch_result]
184
+
185
+ # Validate that we got the expected number of embeddings
186
+ if len(embeddings) != len(nodes):
187
+ logger.warning(f"embeddings.size={len(embeddings)} <> nodes.size={len(nodes)}")
188
+ else:
189
+ # Assign embeddings to corresponding nodes
190
+ for node, embedding in zip(nodes, embeddings):
191
+ node.vector = embedding
192
+ return nodes
193
+
194
+ else:
195
+ raise TypeError(f"unsupported type={type(nodes)}")
@@ -1,7 +1,7 @@
1
1
  import os
2
2
  from typing import Literal, List
3
3
 
4
- from openai import OpenAI
4
+ from openai import OpenAI, AsyncOpenAI
5
5
  from pydantic import Field, PrivateAttr, model_validator
6
6
 
7
7
  from flowllm.context.service_context import C
@@ -26,21 +26,23 @@ class OpenAICompatibleEmbeddingModel(BaseEmbeddingModel):
26
26
  dimensions: int = Field(default=1024, description="Dimensionality of the embedding vectors")
27
27
  encoding_format: Literal["float", "base64"] = Field(default="float", description="Encoding format for embeddings")
28
28
 
29
- # Private OpenAI client instance
29
+ # Private OpenAI client instances
30
30
  _client: OpenAI = PrivateAttr()
31
+ _async_client: AsyncOpenAI = PrivateAttr()
31
32
 
32
33
  @model_validator(mode="after")
33
34
  def init_client(self):
34
35
  """
35
- Initialize the OpenAI client after model validation.
36
+ Initialize the OpenAI clients after model validation.
36
37
 
37
38
  This method is called automatically after Pydantic model validation
38
- to set up the OpenAI client with the provided API key and base URL.
39
+ to set up both sync and async OpenAI clients with the provided API key and base URL.
39
40
 
40
41
  Returns:
41
42
  self: The model instance for method chaining
42
43
  """
43
44
  self._client = OpenAI(api_key=self.api_key, base_url=self.base_url)
45
+ self._async_client = AsyncOpenAI(api_key=self.api_key, base_url=self.base_url)
44
46
  return self
45
47
 
46
48
  def _get_embeddings(self, input_text: str | List[str]):
@@ -78,6 +80,41 @@ class OpenAICompatibleEmbeddingModel(BaseEmbeddingModel):
78
80
  else:
79
81
  raise RuntimeError(f"unsupported type={type(input_text)}")
80
82
 
83
+ async def _get_embeddings_async(self, input_text: str | List[str]):
84
+ """
85
+ Get embeddings asynchronously from the OpenAI-compatible API.
86
+
87
+ This method implements the abstract _get_embeddings_async method from BaseEmbeddingModel
88
+ by calling the OpenAI-compatible embeddings API asynchronously.
89
+
90
+ Args:
91
+ input_text: Single text string or list of text strings to embed
92
+
93
+ Returns:
94
+ Embedding vector(s) corresponding to the input text(s)
95
+
96
+ Raises:
97
+ RuntimeError: If unsupported input type is provided
98
+ """
99
+ completion = await self._async_client.embeddings.create(
100
+ model=self.model_name,
101
+ input=input_text,
102
+ dimensions=self.dimensions,
103
+ encoding_format=self.encoding_format
104
+ )
105
+
106
+ if isinstance(input_text, str):
107
+ return completion.data[0].embedding
108
+
109
+ elif isinstance(input_text, list):
110
+ result_emb = [[] for _ in range(len(input_text))]
111
+ for emb in completion.data:
112
+ result_emb[emb.index] = emb.embedding
113
+ return result_emb
114
+
115
+ else:
116
+ raise RuntimeError(f"unsupported type={type(input_text)}")
117
+
81
118
 
82
119
  def main():
83
120
  from flowllm.utils.common_utils import load_env
@@ -91,5 +128,26 @@ def main():
91
128
  print(res2)
92
129
 
93
130
 
131
+ async def async_main():
132
+ from flowllm.utils.common_utils import load_env
133
+
134
+ load_env()
135
+ model = OpenAICompatibleEmbeddingModel(dimensions=64, model_name="text-embedding-v4")
136
+
137
+ # Test async single text embedding
138
+ res1 = await model.get_embeddings_async(
139
+ "The clothes are of good quality and look good, definitely worth the wait. I love them.")
140
+
141
+ # Test async batch text embedding
142
+ res2 = await model.get_embeddings_async(["aa", "bb"])
143
+
144
+ print("Async results:")
145
+ print(res1)
146
+ print(res2)
147
+
148
+
94
149
  if __name__ == "__main__":
95
- main()
150
+ # main()
151
+ import asyncio
152
+
153
+ asyncio.run(async_main())
@@ -0,0 +1 @@
1
+ from . import gallery
@@ -0,0 +1,72 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import Optional
3
+
4
+ from loguru import logger
5
+
6
+ from flowllm.context.flow_context import FlowContext
7
+ from flowllm.op.base_op import BaseOp
8
+ from flowllm.op.parallel_op import ParallelOp
9
+ from flowllm.op.sequential_op import SequentialOp
10
+ from flowllm.schema.flow_response import FlowResponse
11
+ from flowllm.utils.common_utils import camel_to_snake
12
+
13
+
14
+ class BaseFlow(ABC):
15
+
16
+ def __init__(self, name: str = "", **kwargs):
17
+ self.name: str = name or camel_to_snake(self.__class__.__name__)
18
+ self.flow_params: dict = kwargs
19
+
20
+ self.flow_op: Optional[BaseOp] = self.build_flow()
21
+ self.print_flow()
22
+
23
+ @abstractmethod
24
+ def build_flow(self):
25
+ ...
26
+
27
+ def print_flow(self):
28
+ assert self.flow_op is not None, "flow_content is not parsed!"
29
+ logger.info(f"---------- start print flow={self.name} ----------")
30
+ self._print_operation_tree(self.flow_op, indent=0)
31
+ logger.info(f"---------- end print flow={self.name} ----------")
32
+
33
+ def _print_operation_tree(self, op: BaseOp, indent: int):
34
+ """
35
+ Recursively print the operation tree structure.
36
+
37
+ Args:
38
+ op: The operation to print
39
+ indent: Current indentation level
40
+ """
41
+ prefix = " " * indent
42
+ if isinstance(op, SequentialOp):
43
+ logger.info(f"{prefix}Sequential Execution:")
44
+ for i, sub_op in enumerate(op.ops):
45
+ logger.info(f"{prefix} Step {i + 1}:")
46
+ self._print_operation_tree(sub_op, indent + 2)
47
+
48
+ elif isinstance(op, ParallelOp):
49
+ logger.info(f"{prefix}Parallel Execution:")
50
+ for i, sub_op in enumerate(op.ops):
51
+ logger.info(f"{prefix} Branch {i + 1}:")
52
+ self._print_operation_tree(sub_op, indent + 2)
53
+
54
+ else:
55
+ logger.info(f"{prefix}Operation: {op.name}")
56
+
57
+ def return_callback(self, context: FlowContext):
58
+ return context.response
59
+
60
+ def __call__(self, **kwargs) -> FlowResponse:
61
+ context = FlowContext(**kwargs)
62
+ logger.info(f"request.params={kwargs}")
63
+
64
+ try:
65
+ self.flow_op(context=context)
66
+
67
+ except Exception as e:
68
+ logger.exception(f"flow_name={self.name} encounter error={e.args}")
69
+ context.response.success = False
70
+ context.response.answer = str(e.args)
71
+
72
+ return self.return_callback(context=context)
@@ -0,0 +1,15 @@
1
+ from abc import ABC, abstractmethod
2
+
3
+ from flowllm.flow.base_flow import BaseFlow
4
+ from flowllm.schema.tool_call import ToolCall
5
+
6
+
7
+ class BaseToolFlow(BaseFlow, ABC):
8
+
9
+ def __init__(self, **kwargs):
10
+ super().__init__(**kwargs)
11
+ self.tool_call: ToolCall = self.build_tool_call()
12
+
13
+ @abstractmethod
14
+ def build_tool_call(self) -> ToolCall:
15
+ ...
@@ -0,0 +1,8 @@
1
+ from .cmd_flow import CmdFlow
2
+ from .code_tool_flow import CodeToolFlow
3
+ from .dashscope_search_tool_flow import DashscopeSearchToolFlow
4
+ from .deepsearch_tool_flow import DeepSearchToolFlow
5
+ from .expression_tool_flow import ExpressionToolFlow
6
+ from .mock_tool_flow import MockToolFlow
7
+ from .tavily_search_tool_flow import TavilySearchToolFlow
8
+ from .terminate_tool_flow import TerminateToolFlow
@@ -0,0 +1,11 @@
1
+ from flowllm.flow.base_flow import BaseFlow
2
+ from flowllm.flow.parser.expression_parser import ExpressionParser
3
+
4
+
5
+ class CmdFlow(BaseFlow):
6
+
7
+ def build_flow(self):
8
+ flow: str = self.flow_params["flow"]
9
+ assert flow, "add `flow=<op_flow>` in cmd!"
10
+ parser = ExpressionParser(flow)
11
+ return parser.parse_flow()
@@ -0,0 +1,30 @@
1
+ from flowllm.context.flow_context import FlowContext
2
+ from flowllm.context.service_context import C
3
+ from flowllm.flow.base_tool_flow import BaseToolFlow
4
+ from flowllm.op.code.execute_code_op import ExecuteCodeOp
5
+ from flowllm.schema.tool_call import ToolCall
6
+
7
+
8
+ @C.register_tool_flow()
9
+ class CodeToolFlow(BaseToolFlow):
10
+
11
+ def build_flow(self):
12
+ return ExecuteCodeOp()
13
+
14
+ def build_tool_call(self) -> ToolCall:
15
+ return ToolCall(**{
16
+ "name": "python_execute",
17
+ "description": "Execute python code can be used in scenarios such as analysis or calculation, and the final result can be printed using the `print` function.",
18
+ "input_schema": {
19
+ "code": {
20
+ "type": "str",
21
+ "description": "code to be executed. Please do not execute any matplotlib code here.",
22
+ "required": True
23
+ }
24
+ }
25
+ })
26
+
27
+ def return_callback(self, context: FlowContext):
28
+ context.response.answer = context.code_result
29
+ return context.response
30
+
@@ -0,0 +1,34 @@
1
+ from flowllm.context.flow_context import FlowContext
2
+ from flowllm.context.service_context import C
3
+ from flowllm.flow.base_tool_flow import BaseToolFlow
4
+ from flowllm.op.search import DashscopeSearchOp
5
+ from flowllm.schema.tool_call import ToolCall
6
+
7
+
8
+ @C.register_tool_flow()
9
+ class DashscopeSearchToolFlow(BaseToolFlow):
10
+
11
+ def build_flow(self):
12
+ return DashscopeSearchOp()
13
+
14
+ def build_tool_call(self) -> ToolCall:
15
+ return ToolCall(**{
16
+ "name": "web_search",
17
+ "description": "Use search keywords to retrieve relevant information from the internet. If there are multiple search keywords, please use each keyword separately to call this tool.",
18
+ "input_schema": {
19
+ "query": {
20
+ "type": "str",
21
+ "description": "search keyword",
22
+ "required": True
23
+ }
24
+ }
25
+ })
26
+
27
+ def return_callback(self, context: FlowContext):
28
+ context.response.answer = context.dashscope_search_result
29
+ return context.response
30
+
31
+
32
+ if __name__ == "__main__":
33
+ flow = DashscopeSearchToolFlow()
34
+ flow(query="what is AI?")
@@ -0,0 +1,39 @@
1
+ from flowllm.context.flow_context import FlowContext
2
+ from flowllm.context.service_context import C
3
+ from flowllm.flow.base_tool_flow import BaseToolFlow
4
+ from flowllm.op.search.dashscope_deep_research_op import DashscopeDeepResearchOp
5
+ from flowllm.schema.tool_call import ToolCall
6
+
7
+
8
+ @C.register_tool_flow()
9
+ class DeepSearchToolFlow(BaseToolFlow):
10
+
11
+ def build_flow(self):
12
+ return DashscopeDeepResearchOp()
13
+
14
+ def build_tool_call(self) -> ToolCall:
15
+ return ToolCall(**{
16
+ "name": "deep_search",
17
+ "description": "Perform deep research on a topic using Dashscope's qwen-deep-research model. This tool will conduct multi-phase research including model questioning, web research, and result generation.",
18
+ "input_schema": {
19
+ "query": {
20
+ "type": "str",
21
+ "description": "Research topic or question",
22
+ "required": True
23
+ }
24
+ }
25
+ })
26
+
27
+ def return_callback(self, context: FlowContext):
28
+ context.response.answer = context.dashscope_deep_research_result
29
+ return context.response
30
+
31
+
32
+ if __name__ == "__main__":
33
+ from flowllm.utils.common_utils import load_env
34
+
35
+ load_env()
36
+
37
+ flow = DeepSearchToolFlow()
38
+ result = flow(query="中国电解铝行业值得投资吗,有哪些值得投资的标的,各个标的之间需要对比优劣势")
39
+ print(result.answer)
@@ -0,0 +1,18 @@
1
+ from flowllm.flow.base_tool_flow import BaseToolFlow
2
+ from flowllm.flow.parser.expression_parser import ExpressionParser
3
+ from flowllm.schema.service_config import FlowConfig
4
+ from flowllm.schema.tool_call import ToolCall
5
+
6
+
7
+ class ExpressionToolFlow(BaseToolFlow):
8
+
9
+ def __init__(self, flow_config: FlowConfig = None, **kwargs):
10
+ self.flow_config: FlowConfig = flow_config
11
+ super().__init__(name=flow_config.name, **kwargs)
12
+
13
+ def build_flow(self):
14
+ parser = ExpressionParser(self.flow_config.flow_content)
15
+ return parser.parse_flow()
16
+
17
+ def build_tool_call(self) -> ToolCall:
18
+ return ToolCall(**self.flow_config.model_dump())