flowllm 0.1.2__tar.gz → 0.1.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. {flowllm-0.1.2 → flowllm-0.1.3}/PKG-INFO +1 -1
  2. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/__init__.py +5 -1
  3. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/context/service_context.py +5 -3
  4. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/flow/base_flow.py +4 -2
  5. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/flow/gallery/mock_tool_flow.py +0 -5
  6. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/flow/gallery/terminate_tool_flow.py +1 -1
  7. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/llm/litellm_llm.py +2 -1
  8. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/op/__init__.py +1 -0
  9. flowllm-0.1.3/flowllm/op/agent/__init__.py +1 -0
  10. flowllm-0.1.2/flowllm/op/agent/react_op.py → flowllm-0.1.3/flowllm/op/agent/react_v1_op.py +37 -11
  11. flowllm-0.1.2/flowllm/op/agent/react_prompt.yaml → flowllm-0.1.3/flowllm/op/agent/react_v1_prompt.yaml +26 -0
  12. flowllm-0.1.3/flowllm/op/agent/react_v2_op.py +86 -0
  13. flowllm-0.1.3/flowllm/op/agent/react_v2_prompt.yaml +35 -0
  14. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/op/base_llm_op.py +3 -2
  15. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/op/base_op.py +26 -9
  16. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/op/search/dashscope_deep_research_op.py +9 -2
  17. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/op/search/dashscope_search_op.py +9 -2
  18. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/op/search/tavily_search_op.py +10 -3
  19. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/schema/message.py +2 -0
  20. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/schema/tool_call.py +7 -3
  21. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/service/http_service.py +2 -2
  22. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/storage/vector_store/base_vector_store.py +3 -0
  23. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/storage/vector_store/es_vector_store.py +3 -3
  24. flowllm-0.1.3/flowllm/utils/logger_utils.py +28 -0
  25. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm.egg-info/PKG-INFO +1 -1
  26. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm.egg-info/SOURCES.txt +5 -2
  27. {flowllm-0.1.2 → flowllm-0.1.3}/pyproject.toml +1 -1
  28. flowllm-0.1.2/flowllm/utils/__init__.py +0 -0
  29. {flowllm-0.1.2 → flowllm-0.1.3}/LICENSE +0 -0
  30. {flowllm-0.1.2 → flowllm-0.1.3}/README.md +0 -0
  31. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/app.py +0 -0
  32. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/client/__init__.py +0 -0
  33. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/client/async_http_client.py +0 -0
  34. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/client/http_client.py +0 -0
  35. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/client/mcp_client.py +0 -0
  36. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/client/sync_mcp_client.py +0 -0
  37. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/config/__init__.py +0 -0
  38. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/config/default.yaml +0 -0
  39. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/config/empty.yaml +0 -0
  40. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/config/pydantic_config_parser.py +0 -0
  41. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/context/__init__.py +0 -0
  42. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/context/base_context.py +0 -0
  43. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/context/flow_context.py +0 -0
  44. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/context/prompt_handler.py +0 -0
  45. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/context/registry.py +0 -0
  46. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/embedding_model/__init__.py +0 -0
  47. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/embedding_model/base_embedding_model.py +0 -0
  48. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/embedding_model/openai_compatible_embedding_model.py +0 -0
  49. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/enumeration/__init__.py +0 -0
  50. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/enumeration/chunk_enum.py +0 -0
  51. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/enumeration/http_enum.py +0 -0
  52. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/enumeration/role.py +0 -0
  53. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/flow/__init__.py +0 -0
  54. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/flow/base_tool_flow.py +0 -0
  55. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/flow/gallery/__init__.py +0 -0
  56. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/flow/gallery/cmd_flow.py +0 -0
  57. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/flow/gallery/code_tool_flow.py +0 -0
  58. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/flow/gallery/dashscope_search_tool_flow.py +0 -0
  59. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/flow/gallery/deepsearch_tool_flow.py +0 -0
  60. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/flow/gallery/expression_tool_flow.py +0 -0
  61. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/flow/gallery/tavily_search_tool_flow.py +0 -0
  62. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/flow/parser/__init__.py +0 -0
  63. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/flow/parser/expression_parser.py +0 -0
  64. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/llm/__init__.py +0 -0
  65. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/llm/base_llm.py +0 -0
  66. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/llm/openai_compatible_llm.py +0 -0
  67. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/op/akshare/__init__.py +0 -0
  68. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/op/akshare/get_ak_a_code_op.py +0 -0
  69. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/op/akshare/get_ak_a_code_prompt.yaml +0 -0
  70. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/op/akshare/get_ak_a_info_op.py +0 -0
  71. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/op/base_ray_op.py +0 -0
  72. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/op/code/__init__.py +0 -0
  73. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/op/code/execute_code_op.py +0 -0
  74. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/op/gallery/__init__.py +0 -0
  75. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/op/gallery/mock_op.py +0 -0
  76. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/op/gallery/terminate_op.py +0 -0
  77. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/op/parallel_op.py +0 -0
  78. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/op/search/__init__.py +0 -0
  79. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/op/search/dashscope_search_prompt.yaml +0 -0
  80. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/op/sequential_op.py +0 -0
  81. {flowllm-0.1.2/flowllm/op/agent → flowllm-0.1.3/flowllm/schema}/__init__.py +0 -0
  82. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/schema/flow_request.py +0 -0
  83. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/schema/flow_response.py +0 -0
  84. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/schema/service_config.py +0 -0
  85. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/schema/vector_node.py +0 -0
  86. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/service/__init__.py +0 -0
  87. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/service/base_service.py +0 -0
  88. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/service/cmd_service.py +0 -0
  89. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/service/mcp_service.py +0 -0
  90. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/storage/__init__.py +0 -0
  91. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/storage/cache/__init__.py +0 -0
  92. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/storage/cache/cache_data_handler.py +0 -0
  93. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/storage/cache/data_cache.py +0 -0
  94. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/storage/vector_store/__init__.py +0 -0
  95. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/storage/vector_store/chroma_vector_store.py +0 -0
  96. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/storage/vector_store/local_vector_store.py +0 -0
  97. {flowllm-0.1.2/flowllm/schema → flowllm-0.1.3/flowllm/utils}/__init__.py +0 -0
  98. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/utils/common_utils.py +0 -0
  99. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/utils/fetch_url.py +0 -0
  100. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/utils/llm_utils.py +0 -0
  101. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/utils/ridge_v2.py +0 -0
  102. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/utils/singleton.py +0 -0
  103. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm/utils/timer.py +0 -0
  104. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm.egg-info/dependency_links.txt +0 -0
  105. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm.egg-info/entry_points.txt +0 -0
  106. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm.egg-info/requires.txt +0 -0
  107. {flowllm-0.1.2 → flowllm-0.1.3}/flowllm.egg-info/top_level.txt +0 -0
  108. {flowllm-0.1.2 → flowllm-0.1.3}/setup.cfg +0 -0
  109. {flowllm-0.1.2 → flowllm-0.1.3}/test/test_cache.py +0 -0
  110. {flowllm-0.1.2 → flowllm-0.1.3}/test/test_config.py +0 -0
  111. {flowllm-0.1.2 → flowllm-0.1.3}/test/test_dashscope_llm.py +0 -0
  112. {flowllm-0.1.2 → flowllm-0.1.3}/test/test_dataframe_cache.py +0 -0
  113. {flowllm-0.1.2 → flowllm-0.1.3}/test/test_simple_flow.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: flowllm
3
- Version: 0.1.2
3
+ Version: 0.1.3
4
4
  Summary: A flexible framework for building LLM-powered flows and mcp services
5
5
  Author-email: FlowLLM Team <flowllm@example.com>
6
6
  Maintainer-email: FlowLLM Team <flowllm@example.com>
@@ -1,5 +1,9 @@
1
1
  import os
2
2
 
3
+ from flowllm.utils.logger_utils import init_logger
4
+
5
+ init_logger()
6
+
3
7
  from flowllm.utils.common_utils import load_env
4
8
 
5
9
  load_env()
@@ -17,5 +21,5 @@ from flowllm import service
17
21
  from flowllm.context.service_context import C
18
22
  from flowllm.op import BaseOp, BaseRayOp, BaseLLMOp
19
23
 
20
- __version__ = "0.1.2"
24
+ __version__ = "0.1.3"
21
25
 
@@ -39,10 +39,12 @@ class ServiceContext(BaseContext):
39
39
 
40
40
  self.tool_flow_dict: dict = {}
41
41
 
42
- def set_default_service_config(self):
43
- from flowllm.config.pydantic_config_parser import PydanticConfigParser
42
+ def set_default_service_config(self, parser=None):
43
+ if parser is None:
44
+ from flowllm.config.pydantic_config_parser import PydanticConfigParser
45
+ parser = PydanticConfigParser
44
46
 
45
- config_parser = PydanticConfigParser(ServiceConfig)
47
+ config_parser = parser(ServiceConfig)
46
48
  self.service_config = config_parser.parse_args("config=default")
47
49
  return self
48
50
 
@@ -17,7 +17,7 @@ class BaseFlow(ABC):
17
17
  self.name: str = name or camel_to_snake(self.__class__.__name__)
18
18
  self.flow_params: dict = kwargs
19
19
 
20
- self.flow_op: Optional[BaseOp] = self.build_flow()
20
+ self.flow_op: BaseOp = self.build_flow()
21
21
  self.print_flow()
22
22
 
23
23
  @abstractmethod
@@ -55,6 +55,7 @@ class BaseFlow(ABC):
55
55
  logger.info(f"{prefix}Operation: {op.name}")
56
56
 
57
57
  def return_callback(self, context: FlowContext):
58
+ logger.info(f"context.response={context.response.model_dump_json()}")
58
59
  return context.response
59
60
 
60
61
  def __call__(self, **kwargs) -> FlowResponse:
@@ -62,7 +63,8 @@ class BaseFlow(ABC):
62
63
  logger.info(f"request.params={kwargs}")
63
64
 
64
65
  try:
65
- self.flow_op(context=context)
66
+ flow_op = self.build_flow()
67
+ flow_op(context=context)
66
68
 
67
69
  except Exception as e:
68
70
  logger.exception(f"flow_name={self.name} encounter error={e.args}")
@@ -25,11 +25,6 @@ class MockToolFlow(BaseToolFlow):
25
25
  "type": "function",
26
26
  "name": "mock_data_processor",
27
27
  "description": "A mock tool that processes data through multiple operations and returns structured results",
28
- "arguments": {
29
- "input_data": "sample_data",
30
- "processing_mode": "advanced",
31
- "output_format": "json"
32
- },
33
28
  "input_schema": {
34
29
  "input_data": ParamAttrs(
35
30
  type="string",
@@ -18,7 +18,7 @@ class TerminateToolFlow(BaseToolFlow):
18
18
  "input_schema": {
19
19
  "status": {
20
20
  "type": "str",
21
- "description": "Please determine whether the user's question has been completed. (success / failure)",
21
+ "description": "If the user's question can be answered, return success, otherwise return failure.",
22
22
  "required": True,
23
23
  "enum": ["success", "failure"],
24
24
  }
@@ -2,7 +2,6 @@ import asyncio
2
2
  import os
3
3
  from typing import List, Dict
4
4
 
5
- from litellm import completion, acompletion
6
5
  from loguru import logger
7
6
  from pydantic import Field, PrivateAttr, model_validator
8
7
 
@@ -98,6 +97,7 @@ class LiteLLMBaseLLM(BaseLLM):
98
97
  Yields:
99
98
  Tuple of (chunk_content, ChunkEnum) for each streaming piece
100
99
  """
100
+ from litellm import completion
101
101
  for i in range(self.max_retries):
102
102
  try:
103
103
  # Prepare parameters for LiteLLM
@@ -200,6 +200,7 @@ class LiteLLMBaseLLM(BaseLLM):
200
200
  Yields:
201
201
  Tuple of (chunk_content, ChunkEnum) for each streaming piece
202
202
  """
203
+ from litellm import acompletion
203
204
  for i in range(self.max_retries):
204
205
  try:
205
206
  # Prepare parameters for LiteLLM
@@ -9,3 +9,4 @@ from . import akshare
9
9
  from . import code
10
10
  from . import gallery
11
11
  from . import search
12
+ from . import agent
@@ -0,0 +1 @@
1
+ from .react_v1_op import ReactV1Op
@@ -1,32 +1,45 @@
1
1
  import datetime
2
+ import json
2
3
  import time
3
4
  from typing import List, Dict
4
5
 
5
6
  from loguru import logger
6
7
 
7
- from flowllm import C, BaseLLMOp
8
- from flowllm.flow.base_tool_flow import BaseToolFlow
9
- from flowllm.flow.gallery import DashscopeSearchToolFlow, CodeToolFlow, TerminateToolFlow
8
+ from flowllm.context.flow_context import FlowContext
9
+ from flowllm.context.service_context import C
10
+ from flowllm.op.base_llm_op import BaseLLMOp
11
+ from flowllm.schema.flow_response import FlowResponse
10
12
  from flowllm.schema.message import Message, Role
11
13
 
12
14
 
13
15
  @C.register_op()
14
- class ReactOp(BaseLLMOp):
15
- # TODO: test react op
16
+ class ReactV1Op(BaseLLMOp):
16
17
  file_path: str = __file__
17
18
 
18
19
  def execute(self):
19
20
  query: str = self.context.query
20
21
 
21
22
  max_steps: int = int(self.op_params.get("max_steps", 10))
23
+ from flowllm.flow.base_tool_flow import BaseToolFlow
24
+ from flowllm.flow.gallery import DashscopeSearchToolFlow, CodeToolFlow, TerminateToolFlow
25
+
22
26
  tools: List[BaseToolFlow] = [DashscopeSearchToolFlow(), CodeToolFlow(), TerminateToolFlow()]
23
- tool_dict: Dict[str, BaseToolFlow] = {x.name: x for x in tools}
27
+
28
+ """
29
+ NOTE : x.tool_call.name != x.name
30
+ `x.tool_call.name` is tool's namex.name is flow's name(unique service name)
31
+ """
32
+ tool_dict: Dict[str, BaseToolFlow] = {x.tool_call.name: x for x in tools}
33
+ for name, tool_call in tool_dict.items():
34
+ logger.info(f"name={name} "
35
+ f"tool_call={json.dumps(tool_call.tool_call.simple_input_dump(), ensure_ascii=False)}")
36
+
24
37
  now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
25
38
  has_terminate_tool = False
26
39
 
27
40
  user_prompt = self.prompt_format(prompt_name="role_prompt",
28
41
  time=now_time,
29
- tools=",".join([x.name for x in tools]),
42
+ tools=",".join(list(tool_dict.keys())),
30
43
  query=query)
31
44
  messages: List[Message] = [Message(role=Role.USER, content=user_prompt)]
32
45
  logger.info(f"step.0 user_prompt={user_prompt}")
@@ -59,6 +72,7 @@ class ReactOp(BaseLLMOp):
59
72
  logger.info(f"submit step={i} tool_calls.name={tool_call.name} argument_dict={tool_call.argument_dict}")
60
73
 
61
74
  if tool_call.name not in tool_dict:
75
+ logger.warning(f"step={i} no tool_call.name={tool_call.name}")
62
76
  continue
63
77
 
64
78
  self.submit_task(tool_dict[tool_call.name].__call__, **tool_call.argument_dict)
@@ -68,7 +82,10 @@ class ReactOp(BaseLLMOp):
68
82
  user_content_list = []
69
83
  for tool_result, tool_call in zip(self.join_task(), assistant_message.tool_calls):
70
84
  logger.info(f"submit step={i} tool_calls.name={tool_call.name} tool_result={tool_result}")
71
- assert isinstance(tool_result, str)
85
+ if isinstance(tool_result, FlowResponse):
86
+ tool_result = tool_result.answer
87
+ else:
88
+ tool_result = str(tool_result)
72
89
  user_content_list.append(f"<tool_response>\n{tool_result}\n</tool_response>")
73
90
  user_content_list.append(self.prompt_format(prompt_name="next_prompt"))
74
91
  assistant_message.tool_calls.clear()
@@ -76,8 +93,17 @@ class ReactOp(BaseLLMOp):
76
93
 
77
94
  else:
78
95
  assistant_message.tool_calls.clear()
79
- messages.append(Message(role=Role.USER, content=self.prompt_format(prompt_name="final_prompt")))
96
+ query = self.prompt_format(prompt_name="final_prompt", query=query)
97
+ messages.append(Message(role=Role.USER, content=query))
80
98
 
81
99
  # Store results in context instead of response
82
- self.context.messages = messages
83
- self.context.answer = messages[-1].content
100
+ self.context.response.messages = messages
101
+ self.context.response.answer = messages[-1].content
102
+
103
+
104
+ if __name__ == "__main__":
105
+ C.set_default_service_config().init_by_service_config()
106
+ context = FlowContext(query="茅台和五粮现在股价多少?")
107
+
108
+ op = ReactV1Op()
109
+ op(context=context)
@@ -26,3 +26,29 @@ final_prompt: |
26
26
  # User's Question
27
27
  {query}
28
28
 
29
+
30
+
31
+
32
+ role_prompt_zh: |
33
+ 你是一个有用的助手。
34
+ 当前时间是 {time}。
35
+ 请根据用户的问题,主动选择最合适的工具或工具组合,包括 {tools} 等。
36
+ 请先思考如何将问题分解为子任务,每个子任务应使用哪些工具和参数,最后提供工具调用名称和参数。
37
+ 尝试多次使用相同的工具,但使用不同的参数,从多个角度获取信息。
38
+ 请根据用户问题的语言来确定回复的语言。
39
+
40
+ {query}
41
+
42
+ next_prompt_zh: |
43
+ 根据当前内容和用户的问题进行思考:当前上下文是否足以回答用户的问题?
44
+ - 如果当前上下文不足以回答用户的问题,请考虑缺少哪些信息。
45
+ 重新规划并思考如何将缺失的信息分解为子任务。
46
+ 对于每个子任务,确定应使用哪些工具和参数进行查询。
47
+ 请先提供推理过程,然后给出工具调用名称和参数。
48
+ - 如果当前上下文足以回答用户的问题,请使用 **terminate** 工具。
49
+
50
+ final_prompt_zh: |
51
+ 请整合上下文,为用户的问题提供一个完整的答案。
52
+
53
+ # 用户的问题
54
+ {query}
@@ -0,0 +1,86 @@
1
+ import datetime
2
+ import json
3
+ import time
4
+ from typing import List, Dict
5
+
6
+ from loguru import logger
7
+
8
+ from flowllm.context.flow_context import FlowContext
9
+ from flowllm.context.service_context import C
10
+ from flowllm.op.base_llm_op import BaseLLMOp
11
+ from flowllm.schema.flow_response import FlowResponse
12
+ from flowllm.schema.message import Message, Role
13
+
14
+
15
+ @C.register_op()
16
+ class ReactV2Op(BaseLLMOp):
17
+ file_path: str = __file__
18
+
19
+ def execute(self):
20
+ query: str = self.context.query
21
+
22
+ max_steps: int = int(self.op_params.get("max_steps", 10))
23
+ from flowllm.flow.base_tool_flow import BaseToolFlow
24
+ from flowllm.flow.gallery import DashscopeSearchToolFlow, CodeToolFlow
25
+
26
+ tools: List[BaseToolFlow] = [DashscopeSearchToolFlow(), CodeToolFlow()]
27
+
28
+ """
29
+ NOTE : x.tool_call.name != x.name
30
+ `x.tool_call.name` is tool's namex.name is flow's name(unique service name)
31
+ """
32
+ tool_dict: Dict[str, BaseToolFlow] = {x.tool_call.name: x for x in tools}
33
+ for name, tool_call in tool_dict.items():
34
+ logger.info(f"name={name} "
35
+ f"tool_call={json.dumps(tool_call.tool_call.simple_input_dump(), ensure_ascii=False)}")
36
+
37
+ now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
38
+ has_terminate_tool = False
39
+
40
+ user_prompt = self.prompt_format(prompt_name="role_prompt",
41
+ time=now_time,
42
+ tools=",".join(list(tool_dict.keys())),
43
+ query=query)
44
+ messages: List[Message] = [Message(role=Role.USER, content=user_prompt)]
45
+ logger.info(f"step.0 user_prompt={user_prompt}")
46
+
47
+ for i in range(max_steps):
48
+ assistant_message: Message = self.llm.chat(messages, tools=[x.tool_call for x in tools])
49
+ messages.append(assistant_message)
50
+ logger.info(f"assistant.round{i}.reasoning_content={assistant_message.reasoning_content}\n"
51
+ f"content={assistant_message.content}\n"
52
+ f"tool.size={len(assistant_message.tool_calls)}")
53
+
54
+ if not assistant_message.tool_calls:
55
+ break
56
+
57
+ for j, tool_call in enumerate(assistant_message.tool_calls):
58
+ logger.info(f"submit step={i} tool_calls.name={tool_call.name} argument_dict={tool_call.argument_dict}")
59
+
60
+ if tool_call.name not in tool_dict:
61
+ logger.warning(f"step={i} no tool_call.name={tool_call.name}")
62
+ continue
63
+
64
+ self.submit_task(tool_dict[tool_call.name].__call__, **tool_call.argument_dict)
65
+ time.sleep(1)
66
+
67
+ for i, (tool_result, tool_call) in enumerate(zip(self.join_task(), assistant_message.tool_calls)):
68
+ logger.info(f"submit step={i} tool_calls.name={tool_call.name} tool_result={tool_result}")
69
+ if isinstance(tool_result, FlowResponse):
70
+ tool_result = tool_result.answer
71
+ else:
72
+ tool_result = str(tool_result)
73
+ tool_message = Message(role=Role.TOOL, content=tool_result, tool_call_id=tool_call.id)
74
+ messages.append(tool_message)
75
+
76
+ # Store results in context instead of response
77
+ self.context.response.messages = messages
78
+ self.context.response.answer = messages[-1].content
79
+
80
+
81
+ if __name__ == "__main__":
82
+ C.set_default_service_config().init_by_service_config()
83
+ context = FlowContext(query="茅台和五粮现在股价多少?")
84
+
85
+ op = ReactV2Op()
86
+ op(context=context)
@@ -0,0 +1,35 @@
1
+ role_prompt: |
2
+ You are a helpful assistant.
3
+ The current time is {time}.
4
+ Please proactively choose the most suitable tool or combination of tools based on the user's question, including {tools} etc.
5
+ Please first think about how to break down the problem into subtasks, what tools and parameters should be used for each subtask, and finally provide the tool call name and parameters.
6
+ Try calling the same tool multiple times with different parameters to obtain information from various perspectives.
7
+ Please determine the response language based on the language of the user's question.
8
+
9
+ {query}
10
+
11
+ next_prompt: |
12
+ Think based on the current content and the user's question: Is the current context sufficient to answer the user's question?
13
+ - If the current context is not sufficient to answer the user's question, consider what information is missing.
14
+ Re-plan and think about how to break down the missing information into subtasks.
15
+ For each subtask, determine what tools and parameters should be used for the query.
16
+ Please first provide the reasoning process, then give the tool call name and parameters.
17
+ - If the current context is sufficient to answer the user's question, please integrate the context and provide a complete answer to the user's question.
18
+
19
+ role_prompt_zh: |
20
+ 你是一个有用的助手。
21
+ 当前时间是 {time}。
22
+ 请根据用户的问题,主动选择最合适的工具或工具组合,包括 {tools} 等。
23
+ 请先思考如何将问题分解为子任务,每个子任务应使用哪些工具和参数,最后提供工具调用名称和参数。
24
+ 尝试多次使用相同的工具,但使用不同的参数,从多个角度获取信息。
25
+ 请根据用户问题的语言来确定回复的语言。
26
+
27
+ {query}
28
+
29
+ next_prompt_zh: |
30
+ 根据当前内容和用户的问题进行思考:当前上下文是否足以回答用户的问题?
31
+ - 如果当前上下文不足以回答用户的问题,请考虑缺少哪些信息。
32
+ 重新规划并思考如何将缺失的信息分解为子任务。
33
+ 对于每个子任务,确定应使用哪些工具和参数进行查询。
34
+ 请先提供推理过程,然后给出工具调用名称和参数。
35
+ - 如果当前上下文足以回答用户的问题,请整合上下文,为用户的问题提供一个完整的答案。
@@ -23,8 +23,9 @@ class BaseLLMOp(BaseOp, ABC):
23
23
  super().__init__(**kwargs)
24
24
 
25
25
  self.language: str = language or C.language
26
- self.prompt_path: Path = Path(prompt_path) if prompt_path else \
27
- Path(self.file_path).parent / self.name.replace("_op", "_prompt.yaml")
26
+ default_prompt_path = self.file_path.replace("op.py", "prompt.yaml")
27
+ self.prompt_path: Path = Path(prompt_path) if prompt_path else default_prompt_path
28
+
28
29
  self._llm: BaseLLM | str = llm
29
30
  self._embedding_model: BaseEmbeddingModel | str = embedding_model
30
31
  self._vector_store: BaseVectorStore | str = vector_store
@@ -16,11 +16,13 @@ class BaseOp(ABC):
16
16
  def __init__(self,
17
17
  name: str = "",
18
18
  raise_exception: bool = True,
19
+ enable_multithread: bool = True,
19
20
  **kwargs):
20
21
  super().__init__()
21
22
 
22
23
  self.name: str = name or camel_to_snake(self.__class__.__name__)
23
24
  self.raise_exception: bool = raise_exception
25
+ self.enable_multithread: bool = enable_multithread
24
26
  self.op_params: dict = kwargs
25
27
 
26
28
  self.task_list: List[Future] = []
@@ -48,19 +50,34 @@ class BaseOp(ABC):
48
50
  return self.context.response if self.context else None
49
51
 
50
52
  def submit_task(self, fn, *args, **kwargs):
51
- task = C.thread_pool.submit(fn, *args, **kwargs)
52
- self.task_list.append(task)
53
+ if self.enable_multithread:
54
+ task = C.thread_pool.submit(fn, *args, **kwargs)
55
+ self.task_list.append(task)
56
+
57
+ else:
58
+ result = fn(*args, **kwargs)
59
+ if result:
60
+ if isinstance(result, list):
61
+ result.extend(result)
62
+ else:
63
+ result.append(result)
64
+
53
65
  return self
54
66
 
55
67
  def join_task(self, task_desc: str = None) -> list:
56
68
  result = []
57
- for task in tqdm(self.task_list, desc=task_desc or self.name):
58
- t_result = task.result()
59
- if t_result:
60
- if isinstance(t_result, list):
61
- result.extend(t_result)
62
- else:
63
- result.append(t_result)
69
+ if self.enable_multithread:
70
+ for task in tqdm(self.task_list, desc=task_desc or self.name):
71
+ t_result = task.result()
72
+ if t_result:
73
+ if isinstance(t_result, list):
74
+ result.extend(t_result)
75
+ else:
76
+ result.append(t_result)
77
+
78
+ else:
79
+ result.extend(self.task_list)
80
+
64
81
  self.task_list.clear()
65
82
  return result
66
83
 
@@ -40,8 +40,15 @@ class DashscopeDeepResearchOp(BaseLLMOp):
40
40
  self.return_only_content = return_only_content
41
41
 
42
42
  # Ensure API key is available
43
- self.api_key = os.getenv("FLOW_DASHSCOPE_API_KEY")
44
- self.cache = DataCache(cache_path) if self.enable_cache else None
43
+ self.api_key = os.environ["FLOW_DASHSCOPE_API_KEY"]
44
+ self.cache_path: str = cache_path
45
+ self._cache: DataCache | None = None
46
+
47
+ @property
48
+ def cache(self):
49
+ if self.enable_cache and self._cache is None:
50
+ self._cache = DataCache(self.cache_path)
51
+ return self._cache
45
52
 
46
53
  def process_responses(self, responses, step_name):
47
54
  """Process streaming responses from the deep research model"""
@@ -45,8 +45,15 @@ class DashscopeSearchOp(BaseLLMOp):
45
45
  self.enable_role_prompt = enable_role_prompt
46
46
 
47
47
  # Ensure API key is available
48
- self.api_key = os.getenv("FLOW_DASHSCOPE_API_KEY")
49
- self.cache = DataCache(cache_path) if self.enable_cache else None
48
+ self.api_key = os.environ["FLOW_DASHSCOPE_API_KEY"]
49
+ self.cache_path: str = cache_path
50
+ self._cache: DataCache | None = None
51
+
52
+ @property
53
+ def cache(self):
54
+ if self.enable_cache and self._cache is None:
55
+ self._cache = DataCache(self.cache_path)
56
+ return self._cache
50
57
 
51
58
  @staticmethod
52
59
  def format_search_results(search_results: List[Dict[str, Any]]) -> str:
@@ -16,8 +16,8 @@ from flowllm.storage.cache.data_cache import DataCache
16
16
  class TavilySearchOp(BaseOp):
17
17
  def __init__(self,
18
18
  enable_print: bool = True,
19
- enable_cache: bool = False,
20
- cache_path: str = "./web_search_cache",
19
+ enable_cache: bool = True,
20
+ cache_path: str = "./tavily_search_cache",
21
21
  cache_expire_hours: float = 0.1,
22
22
  topic: Literal["general", "news", "finance"] = "general",
23
23
  max_retries: int = 3,
@@ -33,8 +33,15 @@ class TavilySearchOp(BaseOp):
33
33
  self.return_only_content = return_only_content
34
34
 
35
35
  # Initialize DataCache if caching is enabled
36
- self.cache = DataCache(cache_path) if self.enable_cache else None
37
36
  self._client = TavilyClient(api_key=os.getenv("FLOW_TAVILY_API_KEY", ""))
37
+ self.cache_path: str = cache_path
38
+ self._cache: DataCache | None = None
39
+
40
+ @property
41
+ def cache(self):
42
+ if self.enable_cache and self._cache is None:
43
+ self._cache = DataCache(self.cache_path)
44
+ return self._cache
38
45
 
39
46
  def post_process(self, response):
40
47
  if self.enable_print:
@@ -1,3 +1,4 @@
1
+ import datetime
1
2
  from typing import List
2
3
 
3
4
  from pydantic import BaseModel, Field
@@ -12,6 +13,7 @@ class Message(BaseModel):
12
13
  reasoning_content: str = Field(default="")
13
14
  tool_calls: List[ToolCall] = Field(default_factory=list)
14
15
  tool_call_id: str = Field(default="")
16
+ time_created: str = Field(default_factory=lambda: datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
15
17
  metadata: dict = Field(default_factory=dict)
16
18
 
17
19
  def simple_dump(self, add_reason_content: bool = True) -> dict:
@@ -58,12 +58,16 @@ class ToolCall(BaseModel):
58
58
  type: str = Field(default="function")
59
59
  name: str = Field(default="")
60
60
 
61
- arguments: dict = Field(default_factory=dict, description="tool execution arguments")
61
+ arguments: str = Field(default="", description="tool execution arguments")
62
62
 
63
63
  description: str = Field(default="")
64
64
  input_schema: Dict[str, ParamAttrs] = Field(default_factory=dict)
65
65
  output_schema: Dict[str, ParamAttrs] = Field(default_factory=dict)
66
66
 
67
+ @property
68
+ def argument_dict(self) -> dict:
69
+ return json.loads(self.arguments)
70
+
67
71
  def simple_input_dump(self, version: str = "v1") -> dict:
68
72
  if version == "v1":
69
73
  required_list = [name for name, tool_param in self.input_schema.items() if tool_param.required]
@@ -91,7 +95,7 @@ class ToolCall(BaseModel):
91
95
  "index": self.index,
92
96
  "id": self.id,
93
97
  self.type: {
94
- "arguments": json.dumps(self.arguments, ensure_ascii=False),
98
+ "arguments": self.arguments,
95
99
  "name": self.name
96
100
  },
97
101
  "type": self.type,
@@ -111,7 +115,7 @@ class ToolCall(BaseModel):
111
115
  if name:
112
116
  self.name = name
113
117
  if arguments:
114
- self.arguments = json.loads(arguments)
118
+ self.arguments = arguments
115
119
  else:
116
120
  raise NotImplementedError(f"version {version} not supported")
117
121
 
@@ -65,8 +65,8 @@ class HttpService(BaseService):
65
65
 
66
66
  return await loop.run_in_executor(executor=C.thread_pool, func=list_tool_flows) # noqa
67
67
 
68
- endpoint_path = "/list"
69
- self.app.get(endpoint_path, response_model=list)(execute_endpoint)
68
+ endpoint_path = "list"
69
+ self.app.get("/" + endpoint_path, response_model=list)(execute_endpoint)
70
70
  logger.info(f"integrate endpoint={endpoint_path}")
71
71
 
72
72
  def __call__(self):
@@ -24,6 +24,9 @@ class BaseVectorStore(BaseModel, ABC):
24
24
  def _iter_workspace_nodes(self, workspace_id: str, **kwargs) -> Iterable[VectorNode]:
25
25
  raise NotImplementedError
26
26
 
27
+ def iter_workspace_nodes(self, workspace_id: str, **kwargs) -> Iterable[VectorNode]:
28
+ return self._iter_workspace_nodes(workspace_id, **kwargs)
29
+
27
30
  def dump_workspace(self, workspace_id: str, path: str | Path = "", callback_fn=None, **kwargs):
28
31
  raise NotImplementedError
29
32
 
@@ -62,7 +62,7 @@ class EsVectorStore(LocalVectorStore):
62
62
  node.workspace_id = workspace_id
63
63
  node.unique_id = doc["_id"]
64
64
  if "_score" in doc:
65
- node.metadata["_score"] = doc["_score"] - 1
65
+ node.metadata["score"] = doc["_score"] - 1
66
66
  return node
67
67
 
68
68
  def add_term_filter(self, key: str, value):
@@ -111,7 +111,7 @@ class EsVectorStore(LocalVectorStore):
111
111
  self.retrieve_filters.clear()
112
112
  return nodes
113
113
 
114
- def insert(self, nodes: VectorNode | List[VectorNode], workspace_id: str, refresh: bool = False, **kwargs):
114
+ def insert(self, nodes: VectorNode | List[VectorNode], workspace_id: str, refresh: bool = True, **kwargs):
115
115
  if not self.exist_workspace(workspace_id=workspace_id):
116
116
  self.create_workspace(workspace_id=workspace_id)
117
117
 
@@ -140,7 +140,7 @@ class EsVectorStore(LocalVectorStore):
140
140
  if refresh:
141
141
  self.refresh(workspace_id=workspace_id)
142
142
 
143
- def delete(self, node_ids: str | List[str], workspace_id: str, refresh: bool = False, **kwargs):
143
+ def delete(self, node_ids: str | List[str], workspace_id: str, refresh: bool = True, **kwargs):
144
144
  if not self.exist_workspace(workspace_id=workspace_id):
145
145
  logger.warning(f"workspace_id={workspace_id} is not exists!")
146
146
  return
@@ -0,0 +1,28 @@
1
+ import os
2
+ import sys
3
+ from datetime import datetime
4
+
5
+
6
+ def init_logger():
7
+ from loguru import logger
8
+ logger.remove()
9
+
10
+ log_dir = "logs"
11
+ os.makedirs(log_dir, exist_ok=True)
12
+
13
+ current_ts = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
14
+
15
+ log_filename = f"{current_ts}.log"
16
+ log_filepath = os.path.join(log_dir, log_filename)
17
+
18
+ logger.add(log_filepath,
19
+ level="DEBUG",
20
+ rotation="00:00",
21
+ retention="7 days",
22
+ compression="zip",
23
+ encoding="utf-8")
24
+
25
+ logger.add(sink=sys.stdout,
26
+ level="INFO",
27
+ format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {message}",
28
+ colorize=True)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: flowllm
3
- Version: 0.1.2
3
+ Version: 0.1.3
4
4
  Summary: A flexible framework for building LLM-powered flows and mcp services
5
5
  Author-email: FlowLLM Team <flowllm@example.com>
6
6
  Maintainer-email: FlowLLM Team <flowllm@example.com>
@@ -56,8 +56,10 @@ flowllm/op/base_ray_op.py
56
56
  flowllm/op/parallel_op.py
57
57
  flowllm/op/sequential_op.py
58
58
  flowllm/op/agent/__init__.py
59
- flowllm/op/agent/react_op.py
60
- flowllm/op/agent/react_prompt.yaml
59
+ flowllm/op/agent/react_v1_op.py
60
+ flowllm/op/agent/react_v1_prompt.yaml
61
+ flowllm/op/agent/react_v2_op.py
62
+ flowllm/op/agent/react_v2_prompt.yaml
61
63
  flowllm/op/akshare/__init__.py
62
64
  flowllm/op/akshare/get_ak_a_code_op.py
63
65
  flowllm/op/akshare/get_ak_a_code_prompt.yaml
@@ -97,6 +99,7 @@ flowllm/utils/__init__.py
97
99
  flowllm/utils/common_utils.py
98
100
  flowllm/utils/fetch_url.py
99
101
  flowllm/utils/llm_utils.py
102
+ flowllm/utils/logger_utils.py
100
103
  flowllm/utils/ridge_v2.py
101
104
  flowllm/utils/singleton.py
102
105
  flowllm/utils/timer.py
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "flowllm"
7
- version = "0.1.2"
7
+ version = "0.1.3"
8
8
  description = "A flexible framework for building LLM-powered flows and mcp services"
9
9
  authors = [
10
10
  {name = "FlowLLM Team", email = "flowllm@example.com"}
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes