flowllm 0.1.3__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. flowllm/__init__.py +4 -3
  2. flowllm/app.py +2 -1
  3. flowllm/config/base.yaml +75 -0
  4. flowllm/config/fin_supply.yaml +39 -0
  5. flowllm/config/pydantic_config_parser.py +16 -1
  6. flowllm/context/__init__.py +2 -0
  7. flowllm/context/base_context.py +10 -20
  8. flowllm/context/flow_context.py +45 -2
  9. flowllm/context/service_context.py +69 -10
  10. flowllm/embedding_model/openai_compatible_embedding_model.py +1 -2
  11. flowllm/enumeration/chunk_enum.py +1 -0
  12. flowllm/flow/__init__.py +9 -0
  13. flowllm/flow/base_flow.py +44 -13
  14. flowllm/flow/expression/__init__.py +1 -0
  15. flowllm/flow/{parser → expression}/expression_parser.py +5 -2
  16. flowllm/flow/expression/expression_tool_flow.py +25 -0
  17. flowllm/flow/gallery/__init__.py +1 -8
  18. flowllm/flow/gallery/mock_tool_flow.py +46 -28
  19. flowllm/flow/tool_op_flow.py +97 -0
  20. flowllm/llm/base_llm.py +0 -2
  21. flowllm/op/__init__.py +3 -4
  22. flowllm/op/akshare/get_ak_a_code_op.py +1 -1
  23. flowllm/op/akshare/get_ak_a_info_op.py +1 -1
  24. flowllm/op/base_op.py +232 -16
  25. flowllm/op/base_tool_op.py +47 -0
  26. flowllm/op/gallery/__init__.py +0 -1
  27. flowllm/op/gallery/mock_op.py +13 -7
  28. flowllm/op/llm/__init__.py +3 -0
  29. flowllm/op/{agent/react_v2_op.py → llm/react_llm_op.py} +43 -24
  30. flowllm/op/llm/simple_llm_op.py +48 -0
  31. flowllm/op/llm/stream_llm_op.py +61 -0
  32. flowllm/op/mcp/__init__.py +2 -0
  33. flowllm/op/mcp/ant_op.py +42 -0
  34. flowllm/op/mcp/base_sse_mcp_op.py +28 -0
  35. flowllm/op/parallel_op.py +5 -1
  36. flowllm/op/search/__init__.py +1 -2
  37. flowllm/op/search/dashscope_search_op.py +73 -128
  38. flowllm/op/search/tavily_search_op.py +64 -82
  39. flowllm/op/sequential_op.py +4 -0
  40. flowllm/schema/flow_stream_chunk.py +11 -0
  41. flowllm/schema/service_config.py +8 -3
  42. flowllm/schema/tool_call.py +46 -1
  43. flowllm/service/__init__.py +0 -1
  44. flowllm/service/base_service.py +31 -14
  45. flowllm/service/http_service.py +45 -36
  46. flowllm/service/mcp_service.py +17 -23
  47. flowllm/storage/vector_store/__init__.py +1 -0
  48. flowllm/storage/vector_store/base_vector_store.py +99 -15
  49. flowllm/storage/vector_store/chroma_vector_store.py +250 -8
  50. flowllm/storage/vector_store/es_vector_store.py +288 -32
  51. flowllm/storage/vector_store/local_vector_store.py +206 -9
  52. flowllm/storage/vector_store/memory_vector_store.py +509 -0
  53. flowllm/utils/common_utils.py +55 -1
  54. flowllm/utils/miner_u_pdf_processor.py +726 -0
  55. {flowllm-0.1.3.dist-info → flowllm-0.1.6.dist-info}/METADATA +7 -6
  56. flowllm-0.1.6.dist-info/RECORD +98 -0
  57. flowllm/config/default.yaml +0 -77
  58. flowllm/config/empty.yaml +0 -37
  59. flowllm/flow/gallery/cmd_flow.py +0 -11
  60. flowllm/flow/gallery/code_tool_flow.py +0 -30
  61. flowllm/flow/gallery/dashscope_search_tool_flow.py +0 -34
  62. flowllm/flow/gallery/deepsearch_tool_flow.py +0 -39
  63. flowllm/flow/gallery/expression_tool_flow.py +0 -18
  64. flowllm/flow/gallery/tavily_search_tool_flow.py +0 -30
  65. flowllm/flow/gallery/terminate_tool_flow.py +0 -30
  66. flowllm/flow/parser/__init__.py +0 -0
  67. flowllm/op/agent/__init__.py +0 -1
  68. flowllm/op/agent/react_v1_op.py +0 -109
  69. flowllm/op/agent/react_v1_prompt.yaml +0 -54
  70. flowllm/op/base_ray_op.py +0 -313
  71. flowllm/op/code/__init__.py +0 -1
  72. flowllm/op/code/execute_code_op.py +0 -42
  73. flowllm/op/gallery/terminate_op.py +0 -29
  74. flowllm/op/search/dashscope_deep_research_op.py +0 -267
  75. flowllm/service/cmd_service.py +0 -15
  76. flowllm-0.1.3.dist-info/RECORD +0 -102
  77. /flowllm/op/{agent/react_v2_prompt.yaml → llm/react_llm_prompt.yaml} +0 -0
  78. {flowllm-0.1.3.dist-info → flowllm-0.1.6.dist-info}/WHEEL +0 -0
  79. {flowllm-0.1.3.dist-info → flowllm-0.1.6.dist-info}/entry_points.txt +0 -0
  80. {flowllm-0.1.3.dist-info → flowllm-0.1.6.dist-info}/licenses/LICENSE +0 -0
  81. {flowllm-0.1.3.dist-info → flowllm-0.1.6.dist-info}/top_level.txt +0 -0
@@ -1,3 +1,4 @@
1
+ import asyncio
1
2
  import datetime
2
3
  import json
3
4
  import time
@@ -7,36 +8,46 @@ from loguru import logger
7
8
 
8
9
  from flowllm.context.flow_context import FlowContext
9
10
  from flowllm.context.service_context import C
10
- from flowllm.op.base_llm_op import BaseLLMOp
11
+ from flowllm.op import BaseToolOp
11
12
  from flowllm.schema.flow_response import FlowResponse
12
13
  from flowllm.schema.message import Message, Role
14
+ from flowllm.schema.tool_call import ToolCall
13
15
 
14
16
 
15
- @C.register_op()
16
- class ReactV2Op(BaseLLMOp):
17
+ @C.register_op(name="react_llm_op")
18
+ class ReactLLMOp(BaseToolOp):
17
19
  file_path: str = __file__
18
20
 
19
- def execute(self):
21
+ def __init__(self, llm="qwen3_30b_instruct", **kwargs):
22
+ super().__init__(llm=llm, **kwargs)
23
+
24
+ def build_tool_call(self) -> ToolCall:
25
+ return ToolCall(**{
26
+ "name": "query_llm",
27
+ "description": "use this query to query an LLM",
28
+ "input_schema": {
29
+ "query": {
30
+ "type": "str",
31
+ "description": "search keyword",
32
+ "required": True
33
+ }
34
+ }
35
+ })
36
+
37
+ async def async_execute(self):
20
38
  query: str = self.context.query
21
39
 
22
40
  max_steps: int = int(self.op_params.get("max_steps", 10))
23
- from flowllm.flow.base_tool_flow import BaseToolFlow
24
- from flowllm.flow.gallery import DashscopeSearchToolFlow, CodeToolFlow
41
+ from flowllm.op import BaseToolOp
42
+ from flowllm.op.search import DashscopeSearchOp
25
43
 
26
- tools: List[BaseToolFlow] = [DashscopeSearchToolFlow(), CodeToolFlow()]
27
-
28
- """
29
- NOTE : x.tool_call.name != x.name
30
- `x.tool_call.name` is tool's namex.name is flow's name(unique service name)
31
- """
32
- tool_dict: Dict[str, BaseToolFlow] = {x.tool_call.name: x for x in tools}
44
+ tools: List[BaseToolOp] = [DashscopeSearchOp(save_answer=True)]
45
+ tool_dict: Dict[str, BaseToolOp] = {x.tool_call.name: x for x in tools}
33
46
  for name, tool_call in tool_dict.items():
34
47
  logger.info(f"name={name} "
35
48
  f"tool_call={json.dumps(tool_call.tool_call.simple_input_dump(), ensure_ascii=False)}")
36
49
 
37
50
  now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
38
- has_terminate_tool = False
39
-
40
51
  user_prompt = self.prompt_format(prompt_name="role_prompt",
41
52
  time=now_time,
42
53
  tools=",".join(list(tool_dict.keys())),
@@ -45,7 +56,7 @@ class ReactV2Op(BaseLLMOp):
45
56
  logger.info(f"step.0 user_prompt={user_prompt}")
46
57
 
47
58
  for i in range(max_steps):
48
- assistant_message: Message = self.llm.chat(messages, tools=[x.tool_call for x in tools])
59
+ assistant_message: Message = await self.llm.achat(messages, tools=[x.tool_call for x in tools])
49
60
  messages.append(assistant_message)
50
61
  logger.info(f"assistant.round{i}.reasoning_content={assistant_message.reasoning_content}\n"
51
62
  f"content={assistant_message.content}\n"
@@ -61,11 +72,15 @@ class ReactV2Op(BaseLLMOp):
61
72
  logger.warning(f"step={i} no tool_call.name={tool_call.name}")
62
73
  continue
63
74
 
64
- self.submit_task(tool_dict[tool_call.name].__call__, **tool_call.argument_dict)
75
+ self.submit_async_task(tool_dict[tool_call.name].copy().async_call,
76
+ context=self.context.copy(**tool_call.argument_dict))
65
77
  time.sleep(1)
66
78
 
67
- for i, (tool_result, tool_call) in enumerate(zip(self.join_task(), assistant_message.tool_calls)):
68
- logger.info(f"submit step={i} tool_calls.name={tool_call.name} tool_result={tool_result}")
79
+ task_results = await self.join_async_task()
80
+
81
+ for j, tool_result in enumerate(task_results):
82
+ tool_call = assistant_message.tool_calls[j]
83
+ logger.info(f"submit step.index={i}.{j} tool_result={tool_result}")
69
84
  if isinstance(tool_result, FlowResponse):
70
85
  tool_result = tool_result.answer
71
86
  else:
@@ -73,14 +88,18 @@ class ReactV2Op(BaseLLMOp):
73
88
  tool_message = Message(role=Role.TOOL, content=tool_result, tool_call_id=tool_call.id)
74
89
  messages.append(tool_message)
75
90
 
76
- # Store results in context instead of response
77
91
  self.context.response.messages = messages
78
92
  self.context.response.answer = messages[-1].content
79
93
 
80
94
 
81
- if __name__ == "__main__":
82
- C.set_default_service_config().init_by_service_config()
95
+ async def main():
96
+ C.set_service_config().init_by_service_config()
83
97
  context = FlowContext(query="茅台和五粮现在股价多少?")
84
98
 
85
- op = ReactV2Op()
86
- op(context=context)
99
+ op = ReactLLMOp()
100
+ result = await op.async_call(context=context)
101
+ print(result)
102
+
103
+
104
+ if __name__ == "__main__":
105
+ asyncio.run(main())
@@ -0,0 +1,48 @@
1
+ import asyncio
2
+ from typing import List
3
+
4
+ from loguru import logger
5
+
6
+ from flowllm.context.flow_context import FlowContext
7
+ from flowllm.context.service_context import C
8
+ from flowllm.op import BaseToolOp
9
+ from flowllm.schema.message import Message, Role
10
+ from flowllm.schema.tool_call import ToolCall
11
+
12
+
13
+ @C.register_op(name="simple_llm_op")
14
+ class SimpleLLMOp(BaseToolOp):
15
+
16
+ def build_tool_call(self) -> ToolCall:
17
+ return ToolCall(**{
18
+ "name": "query_llm",
19
+ "description": "use this query to query an LLM",
20
+ "input_schema": {
21
+ "query": {
22
+ "type": "str",
23
+ "description": "search keyword",
24
+ "required": True
25
+ }
26
+ }
27
+ })
28
+
29
+ async def async_execute(self):
30
+ query: str = self.input_dict["query"]
31
+ logger.info(f"query={query}")
32
+ messages: List[Message] = [Message(role=Role.USER, content=query)]
33
+
34
+ assistant_message: Message = await self.llm.achat(messages)
35
+ self.context.response.answer = assistant_message.content
36
+
37
+
38
+ async def main():
39
+ C.set_service_config().init_by_service_config()
40
+ context = FlowContext(query="hello", stream_queue=asyncio.Queue())
41
+
42
+ op = SimpleLLMOp()
43
+ result = await op.async_call(context=context)
44
+ print(result)
45
+
46
+
47
+ if __name__ == "__main__":
48
+ asyncio.run(main())
@@ -0,0 +1,61 @@
1
+ import asyncio
2
+ from typing import List
3
+
4
+ from loguru import logger
5
+
6
+ from flowllm.context.flow_context import FlowContext
7
+ from flowllm.context.service_context import C
8
+ from flowllm.enumeration.chunk_enum import ChunkEnum
9
+ from flowllm.op import BaseToolOp
10
+ from flowllm.schema.message import Message, Role
11
+ from flowllm.schema.tool_call import ToolCall
12
+
13
+
14
+ @C.register_op(name="stream_llm_op")
15
+ class StreamLLMOp(BaseToolOp):
16
+
17
+ def build_tool_call(self) -> ToolCall:
18
+ return ToolCall(**{
19
+ "name": "query_llm",
20
+ "description": "use this query to query an LLM",
21
+ "input_schema": {
22
+ "query": {
23
+ "type": "str",
24
+ "description": "search keyword",
25
+ "required": True
26
+ }
27
+ }
28
+ })
29
+
30
+ async def async_execute(self):
31
+ query: str = self.input_dict["query"]
32
+ logger.info(f"query={query}")
33
+ messages: List[Message] = [Message(role=Role.USER, content=query)]
34
+
35
+ async for chunk, chunk_type in self.llm.astream_chat(messages): # noqa
36
+ if chunk_type == ChunkEnum.ANSWER:
37
+ await self.context.add_stream_answer(chunk)
38
+
39
+ await self.context.add_stream_done()
40
+
41
+
42
+ async def main():
43
+ C.set_service_config().init_by_service_config()
44
+ context = FlowContext(query="hello, introduce yourself.", stream_queue=asyncio.Queue())
45
+
46
+ op = StreamLLMOp()
47
+ task = asyncio.create_task(op.async_call(context=context))
48
+
49
+ while True:
50
+ stream_chunk = await context.stream_queue.get()
51
+ if stream_chunk.done:
52
+ print("\nend")
53
+ break
54
+ else:
55
+ print(stream_chunk.chunk, end="")
56
+
57
+ await task
58
+
59
+
60
+ if __name__ == "__main__":
61
+ asyncio.run(main())
@@ -0,0 +1,2 @@
1
+ from .ant_op import AntSearchOp, AntInvestmentOp
2
+ from .base_sse_mcp_op import BaseSSEMcpOp
@@ -0,0 +1,42 @@
1
+ import asyncio
2
+ import json
3
+ import os
4
+
5
+ from flowllm.context import FlowContext, C
6
+ from flowllm.op.mcp.base_sse_mcp_op import BaseSSEMcpOp
7
+
8
+
9
+ @C.register_op()
10
+ class AntSearchOp(BaseSSEMcpOp):
11
+
12
+ def __init__(self, **kwargs):
13
+ host = os.getenv("FLOW_MCP_HOSTS", "").split(",")[0]
14
+ super().__init__(host=host, tool_name="search", **kwargs)
15
+
16
+
17
+ @C.register_op()
18
+ class AntInvestmentOp(BaseSSEMcpOp):
19
+
20
+ def __init__(self, **kwargs):
21
+ host = os.getenv("FLOW_MCP_HOSTS", "").split(",")[0]
22
+ super().__init__(host=host, tool_name="investment_analysis", **kwargs)
23
+
24
+
25
+ async def async_main():
26
+ op = AntSearchOp()
27
+ context = FlowContext(query="阿里巴巴怎么样?", entity="阿里巴巴")
28
+ await op.async_call(context=context)
29
+ print(json.dumps(op.tool_call.simple_input_dump(), ensure_ascii=False))
30
+ print(context.response.answer)
31
+
32
+ op = AntInvestmentOp()
33
+ context = FlowContext(entity="阿里巴巴", analysis_category="股票")
34
+ await op.async_call(context=context)
35
+ print(json.dumps(op.tool_call.simple_input_dump(), ensure_ascii=False))
36
+ print(context.response.answer)
37
+
38
+
39
+ if __name__ == "__main__":
40
+ C.prepare_sse_mcp().set_service_config().init_by_service_config()
41
+
42
+ asyncio.run(async_main())
@@ -0,0 +1,28 @@
1
+ from fastmcp import Client
2
+ from mcp.types import CallToolResult
3
+
4
+ from flowllm.context import C
5
+ from flowllm.op import BaseToolOp
6
+ from flowllm.schema.tool_call import ToolCall
7
+
8
+
9
+ class BaseSSEMcpOp(BaseToolOp):
10
+
11
+ def __init__(self, host: str = "", tool_name: str = "", **kwargs):
12
+ self.host: str = host
13
+ self.tool_name: str = tool_name
14
+ super().__init__(**kwargs)
15
+
16
+ def build_tool_call(self) -> ToolCall:
17
+ key = f"{self.host}/{self.tool_name}"
18
+ assert key in C.sse_mcp_dict, \
19
+ f"host={self.host} tool_name={self.tool_name} not found in mcp_tool_call_dict"
20
+ return C.sse_mcp_dict[key]
21
+
22
+ def default_execute(self):
23
+ self.context.response.answer = self.output_dict[f"{self.name}_result"] = f"{self.name} execute failed!"
24
+
25
+ async def async_execute(self):
26
+ async with Client(f"{self.host}/sse/") as client:
27
+ result: CallToolResult = await client.call_tool(self.tool_name, arguments=self.input_dict)
28
+ self.context.response.answer = self.output_dict[f"{self.name}_result"] = result.content[0].text
flowllm/op/parallel_op.py CHANGED
@@ -12,9 +12,13 @@ class ParallelOp(BaseOp):
12
12
  def execute(self):
13
13
  for op in self.ops:
14
14
  self.submit_task(op.__call__, context=self.context)
15
-
16
15
  self.join_task(task_desc="parallel execution")
17
16
 
17
+ async def async_execute(self):
18
+ for op in self.ops:
19
+ self.submit_async_task(op.async_call, context=self.context)
20
+ return await self.join_async_task()
21
+
18
22
  def __or__(self, op: BaseOp):
19
23
  if isinstance(op, ParallelOp):
20
24
  self.ops.extend(op.ops)
@@ -1,3 +1,2 @@
1
- from .dashscope_deep_research_op import DashscopeDeepResearchOp
2
1
  from .dashscope_search_op import DashscopeSearchOp
3
- from .tavily_search_op import TavilySearchOp
2
+ from .tavily_search_op import TavilySearchOp
@@ -1,18 +1,17 @@
1
+ import asyncio
1
2
  import os
2
- import time
3
3
  from typing import Dict, Any, List
4
4
 
5
5
  import dashscope
6
6
  from loguru import logger
7
7
 
8
- from flowllm.context.flow_context import FlowContext
9
- from flowllm.context.service_context import C
10
- from flowllm.op.base_llm_op import BaseLLMOp
11
- from flowllm.storage.cache.data_cache import DataCache
8
+ from flowllm.context import FlowContext, C
9
+ from flowllm.op.base_tool_op import BaseToolOp
10
+ from flowllm.schema.tool_call import ToolCall
12
11
 
13
12
 
14
13
  @C.register_op()
15
- class DashscopeSearchOp(BaseLLMOp):
14
+ class DashscopeSearchOp(BaseToolOp):
16
15
  file_path: str = __file__
17
16
 
18
17
  """
@@ -24,36 +23,31 @@ class DashscopeSearchOp(BaseLLMOp):
24
23
 
25
24
  def __init__(self,
26
25
  model: str = "qwen-plus",
27
- enable_print: bool = True,
28
- enable_cache: bool = False,
29
- cache_path: str = "./dashscope_search_cache",
30
- cache_expire_hours: float = 0.1,
31
- max_retries: int = 3,
32
26
  search_strategy: str = "max",
33
- return_only_content: bool = True,
34
27
  enable_role_prompt: bool = True,
28
+ save_answer: bool = False,
35
29
  **kwargs):
36
30
  super().__init__(**kwargs)
37
31
 
38
- self.model = model
39
- self.enable_print = enable_print
40
- self.enable_cache = enable_cache
41
- self.cache_expire_hours = cache_expire_hours
42
- self.max_retries = max_retries
43
- self.search_strategy = search_strategy
44
- self.return_only_content = return_only_content
45
- self.enable_role_prompt = enable_role_prompt
46
-
47
- # Ensure API key is available
48
- self.api_key = os.environ["FLOW_DASHSCOPE_API_KEY"]
49
- self.cache_path: str = cache_path
50
- self._cache: DataCache | None = None
51
-
52
- @property
53
- def cache(self):
54
- if self.enable_cache and self._cache is None:
55
- self._cache = DataCache(self.cache_path)
56
- return self._cache
32
+ self.model: str = model
33
+ self.search_strategy: str = search_strategy
34
+ self.enable_role_prompt: bool = enable_role_prompt
35
+ self.save_answer: bool = save_answer
36
+
37
+ self.api_key = os.getenv("FLOW_DASHSCOPE_API_KEY", "")
38
+
39
+ def build_tool_call(self) -> ToolCall:
40
+ return ToolCall(**{
41
+ "name": "web_search",
42
+ "description": "Use search keywords to retrieve relevant information from the internet. If there are multiple search keywords, please use each keyword separately to call this tool.",
43
+ "input_schema": {
44
+ "query": {
45
+ "type": "str",
46
+ "description": "search keyword",
47
+ "required": True
48
+ }
49
+ }
50
+ })
57
51
 
58
52
  @staticmethod
59
53
  def format_search_results(search_results: List[Dict[str, Any]]) -> str:
@@ -65,122 +59,73 @@ class DashscopeSearchOp(BaseLLMOp):
65
59
 
66
60
  return "\n".join(formatted_results)
67
61
 
68
- def post_process(self, response_data: dict) -> dict:
69
- """Post-process the response and optionally print results"""
70
- if self.enable_print:
71
- # Print search information
72
- if "search_results" in response_data:
73
- search_info = self.format_search_results(response_data["search_results"])
74
- logger.info(f"Search Information:\n{search_info}")
75
-
76
- # Print response content
77
- if "response_content" in response_data:
78
- logger.info("=" * 20 + " Response Content " + "=" * 20)
79
- logger.info(response_data["response_content"])
62
+ def default_execute(self):
63
+ self.output_dict["dashscope_search_result"] = "dashscope search failed!"
80
64
 
81
- return response_data
65
+ async def async_execute(self):
66
+ query: str = self.input_dict["query"]
82
67
 
83
- def execute(self):
84
- """Execute the Dashscope search operation"""
85
- # Get query from context - support multiple parameter names
86
- query = self.context.query
87
-
88
- # Check cache first
89
68
  if self.enable_cache and self.cache:
90
69
  cached_result = self.cache.load(query)
91
70
  if cached_result:
92
- result = self.post_process(cached_result)
93
- if self.return_only_content:
94
- self.context.dashscope_search_result = result["response_content"]
95
- else:
96
- self.context.dashscope_search_result = result
97
-
71
+ self.output_dict["dashscope_search_result"] = cached_result["response_content"]
98
72
  return
99
73
 
100
74
  if self.enable_role_prompt:
101
75
  user_query = self.prompt_format(prompt_name="role_prompt", query=query)
102
76
  else:
103
77
  user_query = query
78
+ logger.info(f"user_query={user_query}")
104
79
  messages: list = [{"role": "user", "content": user_query}]
105
80
 
106
- # Retry logic for API calls
107
- for attempt in range(self.max_retries):
108
- try:
109
- # Call Dashscope Generation API with search enabled
110
- response = dashscope.Generation.call(
111
- api_key=self.api_key,
112
- model=self.model,
113
- messages=messages,
114
- enable_search=True, # Enable web search
115
- search_options={
116
- "forced_search": True, # Force web search
117
- "enable_source": True, # Include search source information
118
- "enable_citation": False, # Enable citation markers
119
- "search_strategy": self.search_strategy, # Search strategy
120
- },
121
- result_format="message",
122
- )
123
-
124
- # Extract search results and response content
125
- search_results = []
126
- response_content = ""
127
-
128
- if hasattr(response, 'output') and response.output:
129
- # Extract search information
130
- if hasattr(response.output, 'search_info') and response.output.search_info:
131
- search_results = response.output.search_info.get("search_results", [])
132
-
133
- # Extract response content
134
- if (hasattr(response.output, 'choices') and
135
- response.output.choices and
136
- len(response.output.choices) > 0):
137
- response_content = response.output.choices[0].message.content
138
-
139
- # Prepare final result
140
- final_result = {
141
- "query": query,
142
- "search_results": search_results,
143
- "response_content": response_content,
144
- "model": self.model,
145
- "search_strategy": self.search_strategy
146
- }
147
-
148
- # Cache the result if enabled
149
- if self.enable_cache and self.cache:
150
- self.cache.save(query, final_result, expire_hours=self.cache_expire_hours)
151
-
152
- # Post-process and set context
153
- result = self.post_process(final_result)
154
- if self.return_only_content:
155
- self.context.dashscope_search_result = result["response_content"]
156
- else:
157
- self.context.dashscope_search_result = result
81
+ response = await dashscope.AioGeneration.call(
82
+ api_key=self.api_key,
83
+ model=self.model,
84
+ messages=messages,
85
+ enable_search=True, # Enable web search
86
+ search_options={
87
+ "forced_search": True, # Force web search
88
+ "enable_source": True, # Include search source information
89
+ "enable_citation": False, # Enable citation markers
90
+ "search_strategy": self.search_strategy, # Search strategy
91
+ },
92
+ result_format="message",
93
+ )
94
+
95
+ search_results = []
96
+ response_content = ""
97
+
98
+ if hasattr(response, "output") and response.output:
99
+ if hasattr(response.output, "search_info") and response.output.search_info:
100
+ search_results = response.output.search_info.get("search_results", [])
101
+
102
+ if hasattr(response.output, "choices") and response.output.choices and len(response.output.choices) > 0:
103
+ response_content = response.output.choices[0].message.content
104
+
105
+ final_result = {
106
+ "query": query,
107
+ "search_results": search_results,
108
+ "response_content": response_content,
109
+ "model": self.model,
110
+ "search_strategy": self.search_strategy
111
+ }
158
112
 
159
- return
160
-
161
- except Exception as e:
162
- logger.warning(f"Dashscope search attempt {attempt + 1} failed for query='{query}': {e}")
163
- if attempt < self.max_retries - 1:
164
- time.sleep(attempt + 1) # Exponential backoff
165
- else:
166
- logger.error(f"All {self.max_retries} attempts failed for Dashscope search")
167
-
168
- self.context.dashscope_search_result = "dashscope_search failed"
169
-
170
-
171
- def main():
172
- from flowllm.utils.common_utils import load_env
113
+ if self.enable_cache and self.cache:
114
+ self.cache.save(query, final_result, expire_hours=self.cache_expire_hours)
173
115
 
174
- load_env()
116
+ self.output_dict["dashscope_search_result"] = final_result["response_content"]
117
+ if self.save_answer:
118
+ self.context.response.answer = final_result["response_content"]
175
119
 
176
- C.set_default_service_config().init_by_service_config()
177
120
 
178
- op = DashscopeSearchOp(enable_print=True, enable_cache=False)
121
+ async def async_main():
122
+ C.set_service_config().init_by_service_config()
179
123
 
180
- context = FlowContext(query="杭州明天天气")
181
- op(context=context)
124
+ op = DashscopeSearchOp()
125
+ context = FlowContext(query="what is AI?")
126
+ await op.async_call(context=context)
182
127
  print(context.dashscope_search_result)
183
128
 
184
129
 
185
130
  if __name__ == "__main__":
186
- main()
131
+ asyncio.run(async_main())