flowllm 0.1.0__py3-none-any.whl → 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- flowllm/__init__.py +21 -0
- flowllm/app.py +15 -0
- flowllm/client/__init__.py +25 -0
- flowllm/client/async_http_client.py +81 -0
- flowllm/client/http_client.py +81 -0
- flowllm/client/mcp_client.py +133 -0
- flowllm/client/sync_mcp_client.py +116 -0
- flowllm/config/__init__.py +1 -0
- flowllm/config/default.yaml +77 -0
- flowllm/config/empty.yaml +37 -0
- flowllm/config/pydantic_config_parser.py +242 -0
- flowllm/context/base_context.py +79 -0
- flowllm/context/flow_context.py +16 -0
- llmflow/op/prompt_mixin.py → flowllm/context/prompt_handler.py +25 -14
- flowllm/context/registry.py +30 -0
- flowllm/context/service_context.py +147 -0
- flowllm/embedding_model/__init__.py +1 -0
- {llmflow → flowllm}/embedding_model/base_embedding_model.py +93 -2
- {llmflow → flowllm}/embedding_model/openai_compatible_embedding_model.py +71 -13
- flowllm/flow/__init__.py +1 -0
- flowllm/flow/base_flow.py +72 -0
- flowllm/flow/base_tool_flow.py +15 -0
- flowllm/flow/gallery/__init__.py +8 -0
- flowllm/flow/gallery/cmd_flow.py +11 -0
- flowllm/flow/gallery/code_tool_flow.py +30 -0
- flowllm/flow/gallery/dashscope_search_tool_flow.py +34 -0
- flowllm/flow/gallery/deepsearch_tool_flow.py +39 -0
- flowllm/flow/gallery/expression_tool_flow.py +18 -0
- flowllm/flow/gallery/mock_tool_flow.py +67 -0
- flowllm/flow/gallery/tavily_search_tool_flow.py +30 -0
- flowllm/flow/gallery/terminate_tool_flow.py +30 -0
- flowllm/flow/parser/expression_parser.py +171 -0
- flowllm/llm/__init__.py +2 -0
- {llmflow → flowllm}/llm/base_llm.py +100 -18
- flowllm/llm/litellm_llm.py +455 -0
- flowllm/llm/openai_compatible_llm.py +439 -0
- flowllm/op/__init__.py +11 -0
- llmflow/op/react/react_v1_op.py → flowllm/op/agent/react_op.py +17 -22
- flowllm/op/akshare/__init__.py +3 -0
- flowllm/op/akshare/get_ak_a_code_op.py +108 -0
- flowllm/op/akshare/get_ak_a_code_prompt.yaml +21 -0
- flowllm/op/akshare/get_ak_a_info_op.py +140 -0
- flowllm/op/base_llm_op.py +64 -0
- flowllm/op/base_op.py +148 -0
- flowllm/op/base_ray_op.py +313 -0
- flowllm/op/code/__init__.py +1 -0
- flowllm/op/code/execute_code_op.py +42 -0
- flowllm/op/gallery/__init__.py +2 -0
- flowllm/op/gallery/mock_op.py +42 -0
- flowllm/op/gallery/terminate_op.py +29 -0
- flowllm/op/parallel_op.py +23 -0
- flowllm/op/search/__init__.py +3 -0
- flowllm/op/search/dashscope_deep_research_op.py +260 -0
- flowllm/op/search/dashscope_search_op.py +179 -0
- flowllm/op/search/dashscope_search_prompt.yaml +13 -0
- flowllm/op/search/tavily_search_op.py +102 -0
- flowllm/op/sequential_op.py +21 -0
- flowllm/schema/flow_request.py +12 -0
- flowllm/schema/flow_response.py +12 -0
- flowllm/schema/message.py +35 -0
- flowllm/schema/service_config.py +72 -0
- flowllm/schema/tool_call.py +118 -0
- {llmflow → flowllm}/schema/vector_node.py +1 -0
- flowllm/service/__init__.py +3 -0
- flowllm/service/base_service.py +68 -0
- flowllm/service/cmd_service.py +15 -0
- flowllm/service/http_service.py +79 -0
- flowllm/service/mcp_service.py +47 -0
- flowllm/storage/__init__.py +1 -0
- flowllm/storage/cache/__init__.py +1 -0
- flowllm/storage/cache/cache_data_handler.py +104 -0
- flowllm/storage/cache/data_cache.py +375 -0
- flowllm/storage/vector_store/__init__.py +3 -0
- flowllm/storage/vector_store/base_vector_store.py +44 -0
- {llmflow → flowllm/storage}/vector_store/chroma_vector_store.py +11 -10
- {llmflow → flowllm/storage}/vector_store/es_vector_store.py +11 -11
- llmflow/vector_store/file_vector_store.py → flowllm/storage/vector_store/local_vector_store.py +110 -11
- flowllm/utils/common_utils.py +52 -0
- flowllm/utils/fetch_url.py +117 -0
- flowllm/utils/llm_utils.py +28 -0
- flowllm/utils/ridge_v2.py +54 -0
- {llmflow → flowllm}/utils/timer.py +5 -4
- {flowllm-0.1.0.dist-info → flowllm-0.1.2.dist-info}/METADATA +45 -388
- flowllm-0.1.2.dist-info/RECORD +99 -0
- flowllm-0.1.2.dist-info/entry_points.txt +2 -0
- {flowllm-0.1.0.dist-info → flowllm-0.1.2.dist-info}/licenses/LICENSE +1 -1
- flowllm-0.1.2.dist-info/top_level.txt +1 -0
- flowllm-0.1.0.dist-info/RECORD +0 -66
- flowllm-0.1.0.dist-info/entry_points.txt +0 -3
- flowllm-0.1.0.dist-info/top_level.txt +0 -1
- llmflow/app.py +0 -53
- llmflow/config/config_parser.py +0 -80
- llmflow/config/mock_config.yaml +0 -58
- llmflow/embedding_model/__init__.py +0 -5
- llmflow/enumeration/agent_state.py +0 -8
- llmflow/llm/__init__.py +0 -5
- llmflow/llm/openai_compatible_llm.py +0 -283
- llmflow/mcp_server.py +0 -110
- llmflow/op/__init__.py +0 -10
- llmflow/op/base_op.py +0 -125
- llmflow/op/mock_op.py +0 -40
- llmflow/op/vector_store/__init__.py +0 -13
- llmflow/op/vector_store/recall_vector_store_op.py +0 -48
- llmflow/op/vector_store/update_vector_store_op.py +0 -28
- llmflow/op/vector_store/vector_store_action_op.py +0 -46
- llmflow/pipeline/pipeline.py +0 -94
- llmflow/pipeline/pipeline_context.py +0 -37
- llmflow/schema/app_config.py +0 -69
- llmflow/schema/experience.py +0 -144
- llmflow/schema/message.py +0 -68
- llmflow/schema/request.py +0 -32
- llmflow/schema/response.py +0 -29
- llmflow/service/__init__.py +0 -0
- llmflow/service/llmflow_service.py +0 -96
- llmflow/tool/__init__.py +0 -9
- llmflow/tool/base_tool.py +0 -80
- llmflow/tool/code_tool.py +0 -43
- llmflow/tool/dashscope_search_tool.py +0 -162
- llmflow/tool/mcp_tool.py +0 -77
- llmflow/tool/tavily_search_tool.py +0 -109
- llmflow/tool/terminate_tool.py +0 -23
- llmflow/utils/__init__.py +0 -0
- llmflow/utils/common_utils.py +0 -17
- llmflow/utils/file_handler.py +0 -25
- llmflow/utils/http_client.py +0 -156
- llmflow/utils/op_utils.py +0 -102
- llmflow/utils/registry.py +0 -33
- llmflow/vector_store/__init__.py +0 -7
- llmflow/vector_store/base_vector_store.py +0 -136
- {llmflow → flowllm/context}/__init__.py +0 -0
- {llmflow/config → flowllm/enumeration}/__init__.py +0 -0
- {llmflow → flowllm}/enumeration/chunk_enum.py +0 -0
- {llmflow → flowllm}/enumeration/http_enum.py +0 -0
- {llmflow → flowllm}/enumeration/role.py +0 -0
- {llmflow/enumeration → flowllm/flow/parser}/__init__.py +0 -0
- {llmflow/op/react → flowllm/op/agent}/__init__.py +0 -0
- /llmflow/op/react/react_v1_prompt.yaml → /flowllm/op/agent/react_prompt.yaml +0 -0
- {llmflow/pipeline → flowllm/schema}/__init__.py +0 -0
- {llmflow/schema → flowllm/utils}/__init__.py +0 -0
- {llmflow → flowllm}/utils/singleton.py +0 -0
- {flowllm-0.1.0.dist-info → flowllm-0.1.2.dist-info}/WHEEL +0 -0
@@ -1,10 +1,11 @@
|
|
1
|
+
import asyncio
|
1
2
|
from abc import ABC
|
2
3
|
from typing import List
|
3
4
|
|
4
5
|
from loguru import logger
|
5
6
|
from pydantic import BaseModel, Field
|
6
7
|
|
7
|
-
from
|
8
|
+
from flowllm.schema.vector_node import VectorNode
|
8
9
|
|
9
10
|
|
10
11
|
class BaseEmbeddingModel(BaseModel, ABC):
|
@@ -37,6 +38,21 @@ class BaseEmbeddingModel(BaseModel, ABC):
|
|
37
38
|
"""
|
38
39
|
raise NotImplementedError
|
39
40
|
|
41
|
+
async def _get_embeddings_async(self, input_text: str | List[str]):
|
42
|
+
"""
|
43
|
+
Abstract async method to get embeddings from the model.
|
44
|
+
|
45
|
+
This method must be implemented by concrete subclasses to provide
|
46
|
+
the actual async embedding functionality.
|
47
|
+
|
48
|
+
Args:
|
49
|
+
input_text: Single text string or list of text strings to embed
|
50
|
+
|
51
|
+
Returns:
|
52
|
+
Embedding vector(s) corresponding to the input text(s)
|
53
|
+
"""
|
54
|
+
raise NotImplementedError
|
55
|
+
|
40
56
|
def get_embeddings(self, input_text: str | List[str]):
|
41
57
|
"""
|
42
58
|
Get embeddings with retry logic and error handling.
|
@@ -64,6 +80,33 @@ class BaseEmbeddingModel(BaseModel, ABC):
|
|
64
80
|
# Return None if all retries failed and raise_exception is False
|
65
81
|
return None
|
66
82
|
|
83
|
+
async def get_embeddings_async(self, input_text: str | List[str]):
|
84
|
+
"""
|
85
|
+
Get embeddings asynchronously with retry logic and error handling.
|
86
|
+
|
87
|
+
This method wraps the _get_embeddings_async method with automatic retry
|
88
|
+
functionality in case of failures.
|
89
|
+
|
90
|
+
Args:
|
91
|
+
input_text: Single text string or list of text strings to embed
|
92
|
+
|
93
|
+
Returns:
|
94
|
+
Embedding vector(s) or None if all retries failed and raise_exception is False
|
95
|
+
"""
|
96
|
+
# Retry loop with exponential backoff potential
|
97
|
+
for i in range(self.max_retries):
|
98
|
+
try:
|
99
|
+
return await self._get_embeddings_async(input_text)
|
100
|
+
|
101
|
+
except Exception as e:
|
102
|
+
logger.exception(f"embedding model name={self.model_name} encounter error with e={e.args}")
|
103
|
+
# If this is the last retry and raise_exception is True, re-raise the exception
|
104
|
+
if i == self.max_retries - 1 and self.raise_exception:
|
105
|
+
raise e
|
106
|
+
|
107
|
+
# Return None if all retries failed and raise_exception is False
|
108
|
+
return None
|
109
|
+
|
67
110
|
def get_node_embeddings(self, nodes: VectorNode | List[VectorNode]):
|
68
111
|
"""
|
69
112
|
Generate embeddings for VectorNode objects and update their vector fields.
|
@@ -101,4 +144,52 @@ class BaseEmbeddingModel(BaseModel, ABC):
|
|
101
144
|
return nodes
|
102
145
|
|
103
146
|
else:
|
104
|
-
raise
|
147
|
+
raise TypeError(f"unsupported type={type(nodes)}")
|
148
|
+
|
149
|
+
async def get_node_embeddings_async(self, nodes: VectorNode | List[VectorNode]):
|
150
|
+
"""
|
151
|
+
Generate embeddings asynchronously for VectorNode objects and update their vector fields.
|
152
|
+
|
153
|
+
This method handles both single nodes and lists of nodes, with automatic
|
154
|
+
batching for efficient processing of large node lists.
|
155
|
+
|
156
|
+
Args:
|
157
|
+
nodes: Single VectorNode or list of VectorNode objects to embed
|
158
|
+
|
159
|
+
Returns:
|
160
|
+
The same node(s) with updated vector fields containing embeddings
|
161
|
+
|
162
|
+
Raises:
|
163
|
+
RuntimeError: If unsupported node type is provided
|
164
|
+
"""
|
165
|
+
# Handle single VectorNode
|
166
|
+
if isinstance(nodes, VectorNode):
|
167
|
+
nodes.vector = await self.get_embeddings_async(nodes.content)
|
168
|
+
return nodes
|
169
|
+
|
170
|
+
# Handle list of VectorNodes with batch processing
|
171
|
+
elif isinstance(nodes, list):
|
172
|
+
# Process nodes in batches to respect max_batch_size limits
|
173
|
+
batch_tasks = []
|
174
|
+
for i in range(0, len(nodes), self.max_batch_size):
|
175
|
+
batch_nodes = nodes[i:i + self.max_batch_size]
|
176
|
+
batch_content = [node.content for node in batch_nodes]
|
177
|
+
batch_tasks.append(self.get_embeddings_async(batch_content))
|
178
|
+
|
179
|
+
# Execute all batch tasks concurrently
|
180
|
+
batch_results = await asyncio.gather(*batch_tasks)
|
181
|
+
|
182
|
+
# Flatten the results
|
183
|
+
embeddings = [emb for batch_result in batch_results for emb in batch_result]
|
184
|
+
|
185
|
+
# Validate that we got the expected number of embeddings
|
186
|
+
if len(embeddings) != len(nodes):
|
187
|
+
logger.warning(f"embeddings.size={len(embeddings)} <> nodes.size={len(nodes)}")
|
188
|
+
else:
|
189
|
+
# Assign embeddings to corresponding nodes
|
190
|
+
for node, embedding in zip(nodes, embeddings):
|
191
|
+
node.vector = embedding
|
192
|
+
return nodes
|
193
|
+
|
194
|
+
else:
|
195
|
+
raise TypeError(f"unsupported type={type(nodes)}")
|
@@ -1,15 +1,14 @@
|
|
1
1
|
import os
|
2
2
|
from typing import Literal, List
|
3
3
|
|
4
|
-
from
|
5
|
-
from openai import OpenAI
|
4
|
+
from openai import OpenAI, AsyncOpenAI
|
6
5
|
from pydantic import Field, PrivateAttr, model_validator
|
7
6
|
|
8
|
-
from
|
9
|
-
from
|
7
|
+
from flowllm.context.service_context import C
|
8
|
+
from flowllm.embedding_model.base_embedding_model import BaseEmbeddingModel
|
10
9
|
|
11
10
|
|
12
|
-
@
|
11
|
+
@C.register_embedding_model("openai_compatible")
|
13
12
|
class OpenAICompatibleEmbeddingModel(BaseEmbeddingModel):
|
14
13
|
"""
|
15
14
|
OpenAI-compatible embedding model implementation.
|
@@ -19,29 +18,31 @@ class OpenAICompatibleEmbeddingModel(BaseEmbeddingModel):
|
|
19
18
|
other services that follow the same interface.
|
20
19
|
"""
|
21
20
|
# API configuration fields
|
22
|
-
api_key: str = Field(default_factory=lambda: os.getenv("
|
21
|
+
api_key: str = Field(default_factory=lambda: os.getenv("FLOW_EMBEDDING_API_KEY"),
|
23
22
|
description="API key for authentication")
|
24
|
-
base_url: str = Field(default_factory=lambda: os.getenv("
|
23
|
+
base_url: str = Field(default_factory=lambda: os.getenv("FLOW_EMBEDDING_BASE_URL"),
|
25
24
|
description="Base URL for the API endpoint")
|
26
25
|
model_name: str = Field(default="", description="Name of the embedding model to use")
|
27
26
|
dimensions: int = Field(default=1024, description="Dimensionality of the embedding vectors")
|
28
27
|
encoding_format: Literal["float", "base64"] = Field(default="float", description="Encoding format for embeddings")
|
29
28
|
|
30
|
-
# Private OpenAI client
|
29
|
+
# Private OpenAI client instances
|
31
30
|
_client: OpenAI = PrivateAttr()
|
31
|
+
_async_client: AsyncOpenAI = PrivateAttr()
|
32
32
|
|
33
33
|
@model_validator(mode="after")
|
34
34
|
def init_client(self):
|
35
35
|
"""
|
36
|
-
Initialize the OpenAI
|
36
|
+
Initialize the OpenAI clients after model validation.
|
37
37
|
|
38
38
|
This method is called automatically after Pydantic model validation
|
39
|
-
to set up
|
39
|
+
to set up both sync and async OpenAI clients with the provided API key and base URL.
|
40
40
|
|
41
41
|
Returns:
|
42
42
|
self: The model instance for method chaining
|
43
43
|
"""
|
44
44
|
self._client = OpenAI(api_key=self.api_key, base_url=self.base_url)
|
45
|
+
self._async_client = AsyncOpenAI(api_key=self.api_key, base_url=self.base_url)
|
45
46
|
return self
|
46
47
|
|
47
48
|
def _get_embeddings(self, input_text: str | List[str]):
|
@@ -79,9 +80,46 @@ class OpenAICompatibleEmbeddingModel(BaseEmbeddingModel):
|
|
79
80
|
else:
|
80
81
|
raise RuntimeError(f"unsupported type={type(input_text)}")
|
81
82
|
|
83
|
+
async def _get_embeddings_async(self, input_text: str | List[str]):
|
84
|
+
"""
|
85
|
+
Get embeddings asynchronously from the OpenAI-compatible API.
|
86
|
+
|
87
|
+
This method implements the abstract _get_embeddings_async method from BaseEmbeddingModel
|
88
|
+
by calling the OpenAI-compatible embeddings API asynchronously.
|
89
|
+
|
90
|
+
Args:
|
91
|
+
input_text: Single text string or list of text strings to embed
|
92
|
+
|
93
|
+
Returns:
|
94
|
+
Embedding vector(s) corresponding to the input text(s)
|
95
|
+
|
96
|
+
Raises:
|
97
|
+
RuntimeError: If unsupported input type is provided
|
98
|
+
"""
|
99
|
+
completion = await self._async_client.embeddings.create(
|
100
|
+
model=self.model_name,
|
101
|
+
input=input_text,
|
102
|
+
dimensions=self.dimensions,
|
103
|
+
encoding_format=self.encoding_format
|
104
|
+
)
|
105
|
+
|
106
|
+
if isinstance(input_text, str):
|
107
|
+
return completion.data[0].embedding
|
108
|
+
|
109
|
+
elif isinstance(input_text, list):
|
110
|
+
result_emb = [[] for _ in range(len(input_text))]
|
111
|
+
for emb in completion.data:
|
112
|
+
result_emb[emb.index] = emb.embedding
|
113
|
+
return result_emb
|
114
|
+
|
115
|
+
else:
|
116
|
+
raise RuntimeError(f"unsupported type={type(input_text)}")
|
117
|
+
|
82
118
|
|
83
119
|
def main():
|
84
|
-
|
120
|
+
from flowllm.utils.common_utils import load_env
|
121
|
+
|
122
|
+
load_env()
|
85
123
|
model = OpenAICompatibleEmbeddingModel(dimensions=64, model_name="text-embedding-v4")
|
86
124
|
res1 = model.get_embeddings(
|
87
125
|
"The clothes are of good quality and look good, definitely worth the wait. I love them.")
|
@@ -90,6 +128,26 @@ def main():
|
|
90
128
|
print(res2)
|
91
129
|
|
92
130
|
|
131
|
+
async def async_main():
|
132
|
+
from flowllm.utils.common_utils import load_env
|
133
|
+
|
134
|
+
load_env()
|
135
|
+
model = OpenAICompatibleEmbeddingModel(dimensions=64, model_name="text-embedding-v4")
|
136
|
+
|
137
|
+
# Test async single text embedding
|
138
|
+
res1 = await model.get_embeddings_async(
|
139
|
+
"The clothes are of good quality and look good, definitely worth the wait. I love them.")
|
140
|
+
|
141
|
+
# Test async batch text embedding
|
142
|
+
res2 = await model.get_embeddings_async(["aa", "bb"])
|
143
|
+
|
144
|
+
print("Async results:")
|
145
|
+
print(res1)
|
146
|
+
print(res2)
|
147
|
+
|
148
|
+
|
93
149
|
if __name__ == "__main__":
|
94
|
-
main()
|
95
|
-
|
150
|
+
# main()
|
151
|
+
import asyncio
|
152
|
+
|
153
|
+
asyncio.run(async_main())
|
flowllm/flow/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
from . import gallery
|
@@ -0,0 +1,72 @@
|
|
1
|
+
from abc import ABC, abstractmethod
|
2
|
+
from typing import Optional
|
3
|
+
|
4
|
+
from loguru import logger
|
5
|
+
|
6
|
+
from flowllm.context.flow_context import FlowContext
|
7
|
+
from flowllm.op.base_op import BaseOp
|
8
|
+
from flowllm.op.parallel_op import ParallelOp
|
9
|
+
from flowllm.op.sequential_op import SequentialOp
|
10
|
+
from flowllm.schema.flow_response import FlowResponse
|
11
|
+
from flowllm.utils.common_utils import camel_to_snake
|
12
|
+
|
13
|
+
|
14
|
+
class BaseFlow(ABC):
|
15
|
+
|
16
|
+
def __init__(self, name: str = "", **kwargs):
|
17
|
+
self.name: str = name or camel_to_snake(self.__class__.__name__)
|
18
|
+
self.flow_params: dict = kwargs
|
19
|
+
|
20
|
+
self.flow_op: Optional[BaseOp] = self.build_flow()
|
21
|
+
self.print_flow()
|
22
|
+
|
23
|
+
@abstractmethod
|
24
|
+
def build_flow(self):
|
25
|
+
...
|
26
|
+
|
27
|
+
def print_flow(self):
|
28
|
+
assert self.flow_op is not None, "flow_content is not parsed!"
|
29
|
+
logger.info(f"---------- start print flow={self.name} ----------")
|
30
|
+
self._print_operation_tree(self.flow_op, indent=0)
|
31
|
+
logger.info(f"---------- end print flow={self.name} ----------")
|
32
|
+
|
33
|
+
def _print_operation_tree(self, op: BaseOp, indent: int):
|
34
|
+
"""
|
35
|
+
Recursively print the operation tree structure.
|
36
|
+
|
37
|
+
Args:
|
38
|
+
op: The operation to print
|
39
|
+
indent: Current indentation level
|
40
|
+
"""
|
41
|
+
prefix = " " * indent
|
42
|
+
if isinstance(op, SequentialOp):
|
43
|
+
logger.info(f"{prefix}Sequential Execution:")
|
44
|
+
for i, sub_op in enumerate(op.ops):
|
45
|
+
logger.info(f"{prefix} Step {i + 1}:")
|
46
|
+
self._print_operation_tree(sub_op, indent + 2)
|
47
|
+
|
48
|
+
elif isinstance(op, ParallelOp):
|
49
|
+
logger.info(f"{prefix}Parallel Execution:")
|
50
|
+
for i, sub_op in enumerate(op.ops):
|
51
|
+
logger.info(f"{prefix} Branch {i + 1}:")
|
52
|
+
self._print_operation_tree(sub_op, indent + 2)
|
53
|
+
|
54
|
+
else:
|
55
|
+
logger.info(f"{prefix}Operation: {op.name}")
|
56
|
+
|
57
|
+
def return_callback(self, context: FlowContext):
|
58
|
+
return context.response
|
59
|
+
|
60
|
+
def __call__(self, **kwargs) -> FlowResponse:
|
61
|
+
context = FlowContext(**kwargs)
|
62
|
+
logger.info(f"request.params={kwargs}")
|
63
|
+
|
64
|
+
try:
|
65
|
+
self.flow_op(context=context)
|
66
|
+
|
67
|
+
except Exception as e:
|
68
|
+
logger.exception(f"flow_name={self.name} encounter error={e.args}")
|
69
|
+
context.response.success = False
|
70
|
+
context.response.answer = str(e.args)
|
71
|
+
|
72
|
+
return self.return_callback(context=context)
|
@@ -0,0 +1,15 @@
|
|
1
|
+
from abc import ABC, abstractmethod
|
2
|
+
|
3
|
+
from flowllm.flow.base_flow import BaseFlow
|
4
|
+
from flowllm.schema.tool_call import ToolCall
|
5
|
+
|
6
|
+
|
7
|
+
class BaseToolFlow(BaseFlow, ABC):
|
8
|
+
|
9
|
+
def __init__(self, **kwargs):
|
10
|
+
super().__init__(**kwargs)
|
11
|
+
self.tool_call: ToolCall = self.build_tool_call()
|
12
|
+
|
13
|
+
@abstractmethod
|
14
|
+
def build_tool_call(self) -> ToolCall:
|
15
|
+
...
|
@@ -0,0 +1,8 @@
|
|
1
|
+
from .cmd_flow import CmdFlow
|
2
|
+
from .code_tool_flow import CodeToolFlow
|
3
|
+
from .dashscope_search_tool_flow import DashscopeSearchToolFlow
|
4
|
+
from .deepsearch_tool_flow import DeepSearchToolFlow
|
5
|
+
from .expression_tool_flow import ExpressionToolFlow
|
6
|
+
from .mock_tool_flow import MockToolFlow
|
7
|
+
from .tavily_search_tool_flow import TavilySearchToolFlow
|
8
|
+
from .terminate_tool_flow import TerminateToolFlow
|
@@ -0,0 +1,11 @@
|
|
1
|
+
from flowllm.flow.base_flow import BaseFlow
|
2
|
+
from flowllm.flow.parser.expression_parser import ExpressionParser
|
3
|
+
|
4
|
+
|
5
|
+
class CmdFlow(BaseFlow):
|
6
|
+
|
7
|
+
def build_flow(self):
|
8
|
+
flow: str = self.flow_params["flow"]
|
9
|
+
assert flow, "add `flow=<op_flow>` in cmd!"
|
10
|
+
parser = ExpressionParser(flow)
|
11
|
+
return parser.parse_flow()
|
@@ -0,0 +1,30 @@
|
|
1
|
+
from flowllm.context.flow_context import FlowContext
|
2
|
+
from flowllm.context.service_context import C
|
3
|
+
from flowllm.flow.base_tool_flow import BaseToolFlow
|
4
|
+
from flowllm.op.code.execute_code_op import ExecuteCodeOp
|
5
|
+
from flowllm.schema.tool_call import ToolCall
|
6
|
+
|
7
|
+
|
8
|
+
@C.register_tool_flow()
|
9
|
+
class CodeToolFlow(BaseToolFlow):
|
10
|
+
|
11
|
+
def build_flow(self):
|
12
|
+
return ExecuteCodeOp()
|
13
|
+
|
14
|
+
def build_tool_call(self) -> ToolCall:
|
15
|
+
return ToolCall(**{
|
16
|
+
"name": "python_execute",
|
17
|
+
"description": "Execute python code can be used in scenarios such as analysis or calculation, and the final result can be printed using the `print` function.",
|
18
|
+
"input_schema": {
|
19
|
+
"code": {
|
20
|
+
"type": "str",
|
21
|
+
"description": "code to be executed. Please do not execute any matplotlib code here.",
|
22
|
+
"required": True
|
23
|
+
}
|
24
|
+
}
|
25
|
+
})
|
26
|
+
|
27
|
+
def return_callback(self, context: FlowContext):
|
28
|
+
context.response.answer = context.code_result
|
29
|
+
return context.response
|
30
|
+
|
@@ -0,0 +1,34 @@
|
|
1
|
+
from flowllm.context.flow_context import FlowContext
|
2
|
+
from flowllm.context.service_context import C
|
3
|
+
from flowllm.flow.base_tool_flow import BaseToolFlow
|
4
|
+
from flowllm.op.search import DashscopeSearchOp
|
5
|
+
from flowllm.schema.tool_call import ToolCall
|
6
|
+
|
7
|
+
|
8
|
+
@C.register_tool_flow()
|
9
|
+
class DashscopeSearchToolFlow(BaseToolFlow):
|
10
|
+
|
11
|
+
def build_flow(self):
|
12
|
+
return DashscopeSearchOp()
|
13
|
+
|
14
|
+
def build_tool_call(self) -> ToolCall:
|
15
|
+
return ToolCall(**{
|
16
|
+
"name": "web_search",
|
17
|
+
"description": "Use search keywords to retrieve relevant information from the internet. If there are multiple search keywords, please use each keyword separately to call this tool.",
|
18
|
+
"input_schema": {
|
19
|
+
"query": {
|
20
|
+
"type": "str",
|
21
|
+
"description": "search keyword",
|
22
|
+
"required": True
|
23
|
+
}
|
24
|
+
}
|
25
|
+
})
|
26
|
+
|
27
|
+
def return_callback(self, context: FlowContext):
|
28
|
+
context.response.answer = context.dashscope_search_result
|
29
|
+
return context.response
|
30
|
+
|
31
|
+
|
32
|
+
if __name__ == "__main__":
|
33
|
+
flow = DashscopeSearchToolFlow()
|
34
|
+
flow(query="what is AI?")
|
@@ -0,0 +1,39 @@
|
|
1
|
+
from flowllm.context.flow_context import FlowContext
|
2
|
+
from flowllm.context.service_context import C
|
3
|
+
from flowllm.flow.base_tool_flow import BaseToolFlow
|
4
|
+
from flowllm.op.search.dashscope_deep_research_op import DashscopeDeepResearchOp
|
5
|
+
from flowllm.schema.tool_call import ToolCall
|
6
|
+
|
7
|
+
|
8
|
+
@C.register_tool_flow()
|
9
|
+
class DeepSearchToolFlow(BaseToolFlow):
|
10
|
+
|
11
|
+
def build_flow(self):
|
12
|
+
return DashscopeDeepResearchOp()
|
13
|
+
|
14
|
+
def build_tool_call(self) -> ToolCall:
|
15
|
+
return ToolCall(**{
|
16
|
+
"name": "deep_search",
|
17
|
+
"description": "Perform deep research on a topic using Dashscope's qwen-deep-research model. This tool will conduct multi-phase research including model questioning, web research, and result generation.",
|
18
|
+
"input_schema": {
|
19
|
+
"query": {
|
20
|
+
"type": "str",
|
21
|
+
"description": "Research topic or question",
|
22
|
+
"required": True
|
23
|
+
}
|
24
|
+
}
|
25
|
+
})
|
26
|
+
|
27
|
+
def return_callback(self, context: FlowContext):
|
28
|
+
context.response.answer = context.dashscope_deep_research_result
|
29
|
+
return context.response
|
30
|
+
|
31
|
+
|
32
|
+
if __name__ == "__main__":
|
33
|
+
from flowllm.utils.common_utils import load_env
|
34
|
+
|
35
|
+
load_env()
|
36
|
+
|
37
|
+
flow = DeepSearchToolFlow()
|
38
|
+
result = flow(query="中国电解铝行业值得投资吗,有哪些值得投资的标的,各个标的之间需要对比优劣势")
|
39
|
+
print(result.answer)
|
@@ -0,0 +1,18 @@
|
|
1
|
+
from flowllm.flow.base_tool_flow import BaseToolFlow
|
2
|
+
from flowllm.flow.parser.expression_parser import ExpressionParser
|
3
|
+
from flowllm.schema.service_config import FlowConfig
|
4
|
+
from flowllm.schema.tool_call import ToolCall
|
5
|
+
|
6
|
+
|
7
|
+
class ExpressionToolFlow(BaseToolFlow):
|
8
|
+
|
9
|
+
def __init__(self, flow_config: FlowConfig = None, **kwargs):
|
10
|
+
self.flow_config: FlowConfig = flow_config
|
11
|
+
super().__init__(name=flow_config.name, **kwargs)
|
12
|
+
|
13
|
+
def build_flow(self):
|
14
|
+
parser = ExpressionParser(self.flow_config.flow_content)
|
15
|
+
return parser.parse_flow()
|
16
|
+
|
17
|
+
def build_tool_call(self) -> ToolCall:
|
18
|
+
return ToolCall(**self.flow_config.model_dump())
|
@@ -0,0 +1,67 @@
|
|
1
|
+
from flowllm.context.service_context import C
|
2
|
+
from flowllm.flow.base_tool_flow import BaseToolFlow
|
3
|
+
from flowllm.op.gallery import Mock1Op, Mock2Op, Mock3Op, Mock4Op, Mock5Op, Mock6Op
|
4
|
+
from flowllm.schema.tool_call import ToolCall, ParamAttrs
|
5
|
+
|
6
|
+
|
7
|
+
@C.register_tool_flow()
|
8
|
+
class MockToolFlow(BaseToolFlow):
|
9
|
+
|
10
|
+
def build_flow(self):
|
11
|
+
mock1_op = Mock1Op()
|
12
|
+
mock2_op = Mock2Op()
|
13
|
+
mock3_op = Mock3Op()
|
14
|
+
mock4_op = Mock4Op()
|
15
|
+
mock5_op = Mock5Op()
|
16
|
+
mock6_op = Mock6Op()
|
17
|
+
|
18
|
+
op = mock1_op >> ((mock4_op >> mock2_op) | mock5_op) >> (mock3_op | mock6_op)
|
19
|
+
return op
|
20
|
+
|
21
|
+
def build_tool_call(self) -> ToolCall:
|
22
|
+
return ToolCall(**{
|
23
|
+
"index": 0,
|
24
|
+
"id": "call_mock_tool_12345",
|
25
|
+
"type": "function",
|
26
|
+
"name": "mock_data_processor",
|
27
|
+
"description": "A mock tool that processes data through multiple operations and returns structured results",
|
28
|
+
"arguments": {
|
29
|
+
"input_data": "sample_data",
|
30
|
+
"processing_mode": "advanced",
|
31
|
+
"output_format": "json"
|
32
|
+
},
|
33
|
+
"input_schema": {
|
34
|
+
"input_data": ParamAttrs(
|
35
|
+
type="string",
|
36
|
+
description="The input data to be processed",
|
37
|
+
required=True
|
38
|
+
),
|
39
|
+
"processing_mode": ParamAttrs(
|
40
|
+
type="string",
|
41
|
+
description="Processing mode: basic, advanced, or expert",
|
42
|
+
required=False
|
43
|
+
),
|
44
|
+
"output_format": ParamAttrs(
|
45
|
+
type="string",
|
46
|
+
description="Output format: json, xml, or plain",
|
47
|
+
required=False
|
48
|
+
)
|
49
|
+
},
|
50
|
+
"output_schema": {
|
51
|
+
"result": ParamAttrs(
|
52
|
+
type="object",
|
53
|
+
description="Processed result data",
|
54
|
+
required=True
|
55
|
+
),
|
56
|
+
"status": ParamAttrs(
|
57
|
+
type="string",
|
58
|
+
description="Processing status: success, warning, or error",
|
59
|
+
required=True
|
60
|
+
),
|
61
|
+
"metadata": ParamAttrs(
|
62
|
+
type="object",
|
63
|
+
description="Additional metadata about the processing",
|
64
|
+
required=False
|
65
|
+
)
|
66
|
+
}
|
67
|
+
})
|
@@ -0,0 +1,30 @@
|
|
1
|
+
from flowllm.context.flow_context import FlowContext
|
2
|
+
from flowllm.context.service_context import C
|
3
|
+
from flowllm.flow.base_tool_flow import BaseToolFlow
|
4
|
+
from flowllm.op.search import TavilySearchOp
|
5
|
+
from flowllm.schema.tool_call import ToolCall
|
6
|
+
|
7
|
+
|
8
|
+
@C.register_tool_flow()
|
9
|
+
class TavilySearchToolFlow(BaseToolFlow):
|
10
|
+
|
11
|
+
def build_flow(self):
|
12
|
+
return TavilySearchOp()
|
13
|
+
|
14
|
+
def build_tool_call(self) -> ToolCall:
|
15
|
+
return ToolCall(**{
|
16
|
+
"name": "web_search",
|
17
|
+
"description": "Use search keywords to retrieve relevant information from the internet. If there are multiple search keywords, please use each keyword separately to call this tool.",
|
18
|
+
"input_schema": {
|
19
|
+
"query": {
|
20
|
+
"type": "str",
|
21
|
+
"description": "search keyword",
|
22
|
+
"required": True
|
23
|
+
}
|
24
|
+
}
|
25
|
+
})
|
26
|
+
|
27
|
+
def return_callback(self, context: FlowContext):
|
28
|
+
context.response.answer = context.tavily_search_result
|
29
|
+
return context.response
|
30
|
+
|
@@ -0,0 +1,30 @@
|
|
1
|
+
from flowllm.context.flow_context import FlowContext
|
2
|
+
from flowllm.context.service_context import C
|
3
|
+
from flowllm.flow.base_tool_flow import BaseToolFlow
|
4
|
+
from flowllm.op.gallery.terminate_op import TerminateOp
|
5
|
+
from flowllm.schema.tool_call import ToolCall
|
6
|
+
|
7
|
+
|
8
|
+
@C.register_tool_flow()
|
9
|
+
class TerminateToolFlow(BaseToolFlow):
|
10
|
+
|
11
|
+
def build_flow(self):
|
12
|
+
return TerminateOp()
|
13
|
+
|
14
|
+
def build_tool_call(self) -> ToolCall:
|
15
|
+
return ToolCall(**{
|
16
|
+
"name": "terminate",
|
17
|
+
"description": "If you can answer the user's question based on the context, be sure to use the **terminate** tool.",
|
18
|
+
"input_schema": {
|
19
|
+
"status": {
|
20
|
+
"type": "str",
|
21
|
+
"description": "Please determine whether the user's question has been completed. (success / failure)",
|
22
|
+
"required": True,
|
23
|
+
"enum": ["success", "failure"],
|
24
|
+
}
|
25
|
+
}
|
26
|
+
})
|
27
|
+
|
28
|
+
def return_callback(self, context: FlowContext):
|
29
|
+
context.response.answer = context.terminate_answer
|
30
|
+
return context.response
|