flowllm 0.1.1__py3-none-any.whl → 0.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. flowllm/__init__.py +19 -6
  2. flowllm/app.py +4 -14
  3. flowllm/client/__init__.py +25 -0
  4. flowllm/client/async_http_client.py +81 -0
  5. flowllm/client/http_client.py +81 -0
  6. flowllm/client/mcp_client.py +133 -0
  7. flowllm/client/sync_mcp_client.py +116 -0
  8. flowllm/config/__init__.py +1 -0
  9. flowllm/config/{default_config.yaml → default.yaml} +3 -8
  10. flowllm/config/empty.yaml +37 -0
  11. flowllm/config/pydantic_config_parser.py +17 -17
  12. flowllm/context/base_context.py +27 -7
  13. flowllm/context/flow_context.py +6 -18
  14. flowllm/context/registry.py +5 -1
  15. flowllm/context/service_context.py +83 -37
  16. flowllm/embedding_model/__init__.py +1 -1
  17. flowllm/embedding_model/base_embedding_model.py +91 -0
  18. flowllm/embedding_model/openai_compatible_embedding_model.py +63 -5
  19. flowllm/flow/__init__.py +1 -0
  20. flowllm/flow/base_flow.py +74 -0
  21. flowllm/flow/base_tool_flow.py +15 -0
  22. flowllm/flow/gallery/__init__.py +8 -0
  23. flowllm/flow/gallery/cmd_flow.py +11 -0
  24. flowllm/flow/gallery/code_tool_flow.py +30 -0
  25. flowllm/flow/gallery/dashscope_search_tool_flow.py +34 -0
  26. flowllm/flow/gallery/deepsearch_tool_flow.py +39 -0
  27. flowllm/flow/gallery/expression_tool_flow.py +18 -0
  28. flowllm/flow/gallery/mock_tool_flow.py +62 -0
  29. flowllm/flow/gallery/tavily_search_tool_flow.py +30 -0
  30. flowllm/flow/gallery/terminate_tool_flow.py +30 -0
  31. flowllm/flow/parser/__init__.py +0 -0
  32. flowllm/{flow_engine/simple_flow_engine.py → flow/parser/expression_parser.py} +25 -67
  33. flowllm/llm/__init__.py +2 -1
  34. flowllm/llm/base_llm.py +94 -4
  35. flowllm/llm/litellm_llm.py +456 -0
  36. flowllm/llm/openai_compatible_llm.py +205 -5
  37. flowllm/op/__init__.py +12 -3
  38. flowllm/op/agent/__init__.py +1 -0
  39. flowllm/op/agent/react_v1_op.py +109 -0
  40. flowllm/op/agent/react_v1_prompt.yaml +54 -0
  41. flowllm/op/agent/react_v2_op.py +86 -0
  42. flowllm/op/agent/react_v2_prompt.yaml +35 -0
  43. flowllm/op/akshare/__init__.py +3 -0
  44. flowllm/op/akshare/get_ak_a_code_op.py +14 -22
  45. flowllm/op/akshare/get_ak_a_info_op.py +17 -20
  46. flowllm/op/{llm_base_op.py → base_llm_op.py} +7 -5
  47. flowllm/op/base_op.py +40 -44
  48. flowllm/op/base_ray_op.py +313 -0
  49. flowllm/op/code/__init__.py +1 -0
  50. flowllm/op/code/execute_code_op.py +42 -0
  51. flowllm/op/gallery/__init__.py +2 -0
  52. flowllm/op/{mock_op.py → gallery/mock_op.py} +4 -4
  53. flowllm/op/gallery/terminate_op.py +29 -0
  54. flowllm/op/parallel_op.py +2 -9
  55. flowllm/op/search/__init__.py +3 -0
  56. flowllm/op/search/dashscope_deep_research_op.py +267 -0
  57. flowllm/op/search/dashscope_search_op.py +186 -0
  58. flowllm/op/search/dashscope_search_prompt.yaml +13 -0
  59. flowllm/op/search/tavily_search_op.py +109 -0
  60. flowllm/op/sequential_op.py +1 -9
  61. flowllm/schema/flow_request.py +12 -0
  62. flowllm/schema/message.py +2 -0
  63. flowllm/schema/service_config.py +12 -16
  64. flowllm/schema/tool_call.py +20 -8
  65. flowllm/schema/vector_node.py +1 -0
  66. flowllm/service/__init__.py +3 -2
  67. flowllm/service/base_service.py +50 -41
  68. flowllm/service/cmd_service.py +15 -0
  69. flowllm/service/http_service.py +34 -42
  70. flowllm/service/mcp_service.py +13 -11
  71. flowllm/storage/cache/__init__.py +1 -0
  72. flowllm/storage/cache/cache_data_handler.py +104 -0
  73. flowllm/{utils/dataframe_cache.py → storage/cache/data_cache.py} +136 -92
  74. flowllm/storage/vector_store/__init__.py +3 -3
  75. flowllm/storage/vector_store/base_vector_store.py +3 -0
  76. flowllm/storage/vector_store/es_vector_store.py +4 -5
  77. flowllm/storage/vector_store/local_vector_store.py +0 -1
  78. flowllm/utils/common_utils.py +9 -21
  79. flowllm/utils/fetch_url.py +16 -12
  80. flowllm/utils/llm_utils.py +28 -0
  81. flowllm/utils/logger_utils.py +28 -0
  82. flowllm/utils/ridge_v2.py +54 -0
  83. {flowllm-0.1.1.dist-info → flowllm-0.1.3.dist-info}/METADATA +43 -390
  84. flowllm-0.1.3.dist-info/RECORD +102 -0
  85. flowllm-0.1.3.dist-info/entry_points.txt +2 -0
  86. flowllm/flow_engine/__init__.py +0 -1
  87. flowllm/flow_engine/base_flow_engine.py +0 -34
  88. flowllm-0.1.1.dist-info/RECORD +0 -62
  89. flowllm-0.1.1.dist-info/entry_points.txt +0 -4
  90. {flowllm-0.1.1.dist-info → flowllm-0.1.3.dist-info}/WHEEL +0 -0
  91. {flowllm-0.1.1.dist-info → flowllm-0.1.3.dist-info}/licenses/LICENSE +0 -0
  92. {flowllm-0.1.1.dist-info → flowllm-0.1.3.dist-info}/top_level.txt +0 -0
@@ -2,7 +2,7 @@ import os
2
2
  from typing import List, Dict
3
3
 
4
4
  from loguru import logger
5
- from openai import OpenAI
5
+ from openai import OpenAI, AsyncOpenAI
6
6
  from openai.types import CompletionUsage
7
7
  from pydantic import Field, PrivateAttr, model_validator
8
8
 
@@ -33,19 +33,21 @@ class OpenAICompatibleBaseLLM(BaseLLM):
33
33
  base_url: str = Field(default_factory=lambda: os.getenv("FLOW_LLM_BASE_URL"),
34
34
  description="Base URL for the API endpoint")
35
35
  _client: OpenAI = PrivateAttr()
36
+ _aclient: AsyncOpenAI = PrivateAttr()
36
37
 
37
38
  @model_validator(mode="after")
38
39
  def init_client(self):
39
40
  """
40
- Initialize the OpenAI client after model validation.
41
+ Initialize the OpenAI clients after model validation.
41
42
 
42
43
  This validator runs after all field validation is complete,
43
- ensuring we have valid API credentials before creating the client.
44
+ ensuring we have valid API credentials before creating the clients.
44
45
 
45
46
  Returns:
46
47
  Self for method chaining
47
48
  """
48
49
  self._client = OpenAI(api_key=self.api_key, base_url=self.base_url)
50
+ self._aclient = AsyncOpenAI(api_key=self.api_key, base_url=self.base_url)
49
51
  return self
50
52
 
51
53
  def stream_chat(self, messages: List[Message], tools: List[ToolCall] = None, **kwargs):
@@ -151,6 +153,109 @@ class OpenAICompatibleBaseLLM(BaseLLM):
151
153
  else:
152
154
  yield e.args, ChunkEnum.ERROR
153
155
 
156
+ async def astream_chat(self, messages: List[Message], tools: List[ToolCall] = None, **kwargs):
157
+ """
158
+ Async stream chat completions from OpenAI-compatible API.
159
+
160
+ This method handles async streaming responses and categorizes chunks into different types:
161
+ - THINK: Reasoning/thinking content from the model
162
+ - ANSWER: Regular response content
163
+ - TOOL: Tool calls that need to be executed
164
+ - USAGE: Token usage statistics
165
+ - ERROR: Error information
166
+
167
+ Args:
168
+ messages: List of conversation messages
169
+ tools: Optional list of tools available to the model
170
+ **kwargs: Additional parameters
171
+
172
+ Yields:
173
+ Tuple of (chunk_content, ChunkEnum) for each streaming piece
174
+ """
175
+ for i in range(self.max_retries):
176
+ try:
177
+ extra_body = {}
178
+ if self.enable_thinking:
179
+ extra_body["enable_thinking"] = True # qwen3 params
180
+
181
+ completion = await self._aclient.chat.completions.create(
182
+ model=self.model_name,
183
+ messages=[x.simple_dump() for x in messages],
184
+ seed=self.seed,
185
+ top_p=self.top_p,
186
+ stream=True,
187
+ stream_options=self.stream_options,
188
+ temperature=self.temperature,
189
+ extra_body=extra_body,
190
+ tools=[x.simple_input_dump() for x in tools] if tools else None,
191
+ parallel_tool_calls=self.parallel_tool_calls)
192
+
193
+ # Initialize tool call tracking
194
+ ret_tools: List[ToolCall] = [] # Accumulate tool calls across chunks
195
+ is_answering: bool = False # Track when model starts answering
196
+
197
+ # Process each chunk in the streaming response
198
+ async for chunk in completion:
199
+ # Handle chunks without choices (usually usage info)
200
+ if not chunk.choices:
201
+ yield chunk.usage, ChunkEnum.USAGE
202
+
203
+ else:
204
+ delta = chunk.choices[0].delta
205
+
206
+ # Handle reasoning/thinking content (model's internal thoughts)
207
+ if hasattr(delta, 'reasoning_content') and delta.reasoning_content is not None:
208
+ yield delta.reasoning_content, ChunkEnum.THINK
209
+
210
+ else:
211
+ # Mark transition from thinking to answering
212
+ if not is_answering:
213
+ is_answering = True
214
+
215
+ # Handle regular response content
216
+ if delta.content is not None:
217
+ yield delta.content, ChunkEnum.ANSWER
218
+
219
+ # Handle tool calls (function calling)
220
+ if delta.tool_calls is not None:
221
+ for tool_call in delta.tool_calls:
222
+ index = tool_call.index
223
+
224
+ # Ensure we have enough tool call slots
225
+ while len(ret_tools) <= index:
226
+ ret_tools.append(ToolCall(index=index))
227
+
228
+ # Accumulate tool call information across chunks
229
+ if tool_call.id:
230
+ ret_tools[index].id += tool_call.id
231
+
232
+ if tool_call.function and tool_call.function.name:
233
+ ret_tools[index].name += tool_call.function.name
234
+
235
+ if tool_call.function and tool_call.function.arguments:
236
+ ret_tools[index].arguments += tool_call.function.arguments
237
+
238
+ # Yield completed tool calls after streaming finishes
239
+ if ret_tools:
240
+ tool_dict: Dict[str, ToolCall] = {x.name: x for x in tools} if tools else {}
241
+ for tool in ret_tools:
242
+ # Only yield tool calls that correspond to available tools
243
+ if tool.name not in tool_dict:
244
+ continue
245
+
246
+ yield tool, ChunkEnum.TOOL
247
+
248
+ return
249
+
250
+ except Exception as e:
251
+ logger.exception(f"async stream chat with model={self.model_name} encounter error with e={e.args}")
252
+
253
+ # Handle retry logic
254
+ if i == self.max_retries - 1 and self.raise_exception:
255
+ raise e
256
+ else:
257
+ yield e.args, ChunkEnum.ERROR
258
+
154
259
  def _chat(self, messages: List[Message], tools: List[ToolCall] = None, enable_stream_print: bool = False,
155
260
  **kwargs) -> Message:
156
261
  """
@@ -224,6 +329,95 @@ class OpenAICompatibleBaseLLM(BaseLLM):
224
329
  content=answer_content,
225
330
  tool_calls=tool_calls)
226
331
 
332
+ async def _achat(self, messages: List[Message], tools: List[ToolCall] = None, enable_stream_print: bool = False,
333
+ **kwargs) -> Message:
334
+ """
335
+ Perform an async complete chat completion by aggregating streaming chunks.
336
+
337
+ This method consumes the entire async streaming response and combines all
338
+ chunks into a single Message object. It separates reasoning content,
339
+ regular answer content, and tool calls.
340
+
341
+ Args:
342
+ messages: List of conversation messages
343
+ tools: Optional list of tools available to the model
344
+ enable_stream_print: Whether to print streaming response to console
345
+ **kwargs: Additional parameters
346
+
347
+ Returns:
348
+ Complete Message with all content aggregated
349
+ """
350
+
351
+ enter_think = False # Whether we've started printing thinking content
352
+ enter_answer = False # Whether we've started printing answer content
353
+ reasoning_content = "" # Model's internal reasoning
354
+ answer_content = "" # Final response content
355
+ tool_calls = [] # List of tool calls to execute
356
+
357
+ # Consume async streaming response and aggregate chunks by type
358
+ async for chunk, chunk_enum in self.astream_chat(messages, tools, **kwargs):
359
+ if chunk_enum is ChunkEnum.USAGE:
360
+ # Display token usage statistics
361
+ if enable_stream_print:
362
+ if isinstance(chunk, CompletionUsage):
363
+ print(f"\n<usage>{chunk.model_dump_json(indent=2)}</usage>")
364
+ else:
365
+ print(f"\n<usage>{chunk}</usage>")
366
+
367
+ elif chunk_enum is ChunkEnum.THINK:
368
+ if enable_stream_print:
369
+ # Format thinking/reasoning content
370
+ if not enter_think:
371
+ enter_think = True
372
+ print("<think>\n", end="")
373
+ print(chunk, end="")
374
+
375
+ reasoning_content += chunk
376
+
377
+ elif chunk_enum is ChunkEnum.ANSWER:
378
+ if enable_stream_print:
379
+ if not enter_answer:
380
+ enter_answer = True
381
+ # Close thinking section if we were in it
382
+ if enter_think:
383
+ print("\n</think>")
384
+ print(chunk, end="")
385
+
386
+ answer_content += chunk
387
+
388
+ elif chunk_enum is ChunkEnum.TOOL:
389
+ if enable_stream_print:
390
+ print(f"\n<tool>{chunk.model_dump_json()}</tool>", end="")
391
+
392
+ tool_calls.append(chunk)
393
+
394
+ elif chunk_enum is ChunkEnum.ERROR:
395
+ if enable_stream_print:
396
+ # Display error information
397
+ print(f"\n<error>{chunk}</error>", end="")
398
+
399
+ # Construct complete response message
400
+ return Message(role=Role.ASSISTANT,
401
+ reasoning_content=reasoning_content,
402
+ content=answer_content,
403
+ tool_calls=tool_calls)
404
+
405
+
406
+ async def async_main():
407
+ from flowllm.utils.common_utils import load_env
408
+
409
+ load_env()
410
+
411
+ # model_name = "qwen-max-2025-01-25"
412
+ model_name = "qwen3-30b-a3b-thinking-2507"
413
+ llm = OpenAICompatibleBaseLLM(model_name=model_name)
414
+
415
+ # Test async chat
416
+ message: Message = await llm.achat([Message(role=Role.USER, content="hello")], [],
417
+ enable_stream_print=True)
418
+ print("Async result:", message)
419
+
420
+
227
421
  def main():
228
422
  from flowllm.utils.common_utils import load_env
229
423
 
@@ -231,9 +425,15 @@ def main():
231
425
 
232
426
  model_name = "qwen-max-2025-01-25"
233
427
  llm = OpenAICompatibleBaseLLM(model_name=model_name)
428
+
429
+ # Test sync chat
234
430
  message: Message = llm.chat([Message(role=Role.USER, content="hello")], [],
235
431
  enable_stream_print=False)
236
- print(message)
432
+ print("Sync result:", message)
433
+
237
434
 
238
435
  if __name__ == "__main__":
239
- main()
436
+ # main()
437
+
438
+ import asyncio
439
+ asyncio.run(async_main())
flowllm/op/__init__.py CHANGED
@@ -1,3 +1,12 @@
1
- from flowllm.op.akshare.get_ak_a_code_op import GetAkACodeOp
2
- from flowllm.op.akshare.get_ak_a_info_op import GetAkAInfoOp, GetAkASpotOp, GetAkAMoneyFlowOp, GetAkAFinancialInfoOp, GetAkANewsOp, MergeAkAInfoOp
3
- from flowllm.op.mock_op import Mock1Op, Mock2Op, Mock3Op, Mock4Op, Mock5Op, Mock6Op
1
+ from .base_llm_op import BaseLLMOp
2
+ from .base_op import BaseOp
3
+ from .base_ray_op import BaseRayOp
4
+
5
+ """
6
+ op folder
7
+ """
8
+ from . import akshare
9
+ from . import code
10
+ from . import gallery
11
+ from . import search
12
+ from . import agent
@@ -0,0 +1 @@
1
+ from .react_v1_op import ReactV1Op
@@ -0,0 +1,109 @@
1
+ import datetime
2
+ import json
3
+ import time
4
+ from typing import List, Dict
5
+
6
+ from loguru import logger
7
+
8
+ from flowllm.context.flow_context import FlowContext
9
+ from flowllm.context.service_context import C
10
+ from flowllm.op.base_llm_op import BaseLLMOp
11
+ from flowllm.schema.flow_response import FlowResponse
12
+ from flowllm.schema.message import Message, Role
13
+
14
+
15
+ @C.register_op()
16
+ class ReactV1Op(BaseLLMOp):
17
+ file_path: str = __file__
18
+
19
+ def execute(self):
20
+ query: str = self.context.query
21
+
22
+ max_steps: int = int(self.op_params.get("max_steps", 10))
23
+ from flowllm.flow.base_tool_flow import BaseToolFlow
24
+ from flowllm.flow.gallery import DashscopeSearchToolFlow, CodeToolFlow, TerminateToolFlow
25
+
26
+ tools: List[BaseToolFlow] = [DashscopeSearchToolFlow(), CodeToolFlow(), TerminateToolFlow()]
27
+
28
+ """
29
+ NOTE : x.tool_call.name != x.name
30
+ `x.tool_call.name` is tool's namex.name is flow's name(unique service name)
31
+ """
32
+ tool_dict: Dict[str, BaseToolFlow] = {x.tool_call.name: x for x in tools}
33
+ for name, tool_call in tool_dict.items():
34
+ logger.info(f"name={name} "
35
+ f"tool_call={json.dumps(tool_call.tool_call.simple_input_dump(), ensure_ascii=False)}")
36
+
37
+ now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
38
+ has_terminate_tool = False
39
+
40
+ user_prompt = self.prompt_format(prompt_name="role_prompt",
41
+ time=now_time,
42
+ tools=",".join(list(tool_dict.keys())),
43
+ query=query)
44
+ messages: List[Message] = [Message(role=Role.USER, content=user_prompt)]
45
+ logger.info(f"step.0 user_prompt={user_prompt}")
46
+
47
+ for i in range(max_steps):
48
+ if has_terminate_tool:
49
+ assistant_message: Message = self.llm.chat(messages)
50
+ else:
51
+ assistant_message: Message = self.llm.chat(messages, tools=[x.tool_call for x in tools])
52
+
53
+ messages.append(assistant_message)
54
+ logger.info(f"assistant.{i}.reasoning_content={assistant_message.reasoning_content}\n"
55
+ f"content={assistant_message.content}\n"
56
+ f"tool.size={len(assistant_message.tool_calls)}")
57
+
58
+ if has_terminate_tool:
59
+ break
60
+
61
+ for tool in assistant_message.tool_calls:
62
+ if tool.name == "terminate":
63
+ has_terminate_tool = True
64
+ logger.info(f"step={i} find terminate tool, break.")
65
+ break
66
+
67
+ if not has_terminate_tool and not assistant_message.tool_calls:
68
+ logger.warning(f"【bugfix】step={i} no tools, break.")
69
+ has_terminate_tool = True
70
+
71
+ for j, tool_call in enumerate(assistant_message.tool_calls):
72
+ logger.info(f"submit step={i} tool_calls.name={tool_call.name} argument_dict={tool_call.argument_dict}")
73
+
74
+ if tool_call.name not in tool_dict:
75
+ logger.warning(f"step={i} no tool_call.name={tool_call.name}")
76
+ continue
77
+
78
+ self.submit_task(tool_dict[tool_call.name].__call__, **tool_call.argument_dict)
79
+ time.sleep(1)
80
+
81
+ if not has_terminate_tool:
82
+ user_content_list = []
83
+ for tool_result, tool_call in zip(self.join_task(), assistant_message.tool_calls):
84
+ logger.info(f"submit step={i} tool_calls.name={tool_call.name} tool_result={tool_result}")
85
+ if isinstance(tool_result, FlowResponse):
86
+ tool_result = tool_result.answer
87
+ else:
88
+ tool_result = str(tool_result)
89
+ user_content_list.append(f"<tool_response>\n{tool_result}\n</tool_response>")
90
+ user_content_list.append(self.prompt_format(prompt_name="next_prompt"))
91
+ assistant_message.tool_calls.clear()
92
+ messages.append(Message(role=Role.USER, content="\n".join(user_content_list)))
93
+
94
+ else:
95
+ assistant_message.tool_calls.clear()
96
+ query = self.prompt_format(prompt_name="final_prompt", query=query)
97
+ messages.append(Message(role=Role.USER, content=query))
98
+
99
+ # Store results in context instead of response
100
+ self.context.response.messages = messages
101
+ self.context.response.answer = messages[-1].content
102
+
103
+
104
+ if __name__ == "__main__":
105
+ C.set_default_service_config().init_by_service_config()
106
+ context = FlowContext(query="茅台和五粮现在股价多少?")
107
+
108
+ op = ReactV1Op()
109
+ op(context=context)
@@ -0,0 +1,54 @@
1
+ role_prompt: |
2
+ You are a helpful assistant.
3
+ The current time is {time}.
4
+ Please proactively choose the most suitable tool or combination of tools based on the user's question, including {tools} etc.
5
+ Please first think about how to break down the problem into subtasks, what tools and parameters should be used for each subtask, and finally provide the tool call name and parameters.
6
+ Try calling the same tool multiple times with different parameters to obtain information from various perspectives.
7
+ Please determine the response language based on the language of the user's question.
8
+
9
+ {query}
10
+
11
+ # write a complete and rigorous report to answer user's questions based on the context.
12
+ next_prompt: |
13
+ Think based on the current content and the user's question: Is the current context sufficient to answer the user's question?
14
+
15
+ - If the current context is not sufficient to answer the user's question, consider what information is missing.
16
+ Re-plan and think about how to break down the missing information into subtasks.
17
+ For each subtask, determine what tools and parameters should be used for the query.
18
+ Please first provide the reasoning process, then give the tool call name and parameters.
19
+
20
+ - If the current context is sufficient to answer the user's question, use the **terminate** tool.
21
+
22
+ # Please determine the response language based on the language of the user's question.
23
+ final_prompt: |
24
+ Please integrate the context and provide a complete answer to the user's question.
25
+
26
+ # User's Question
27
+ {query}
28
+
29
+
30
+
31
+
32
+ role_prompt_zh: |
33
+ 你是一个有用的助手。
34
+ 当前时间是 {time}。
35
+ 请根据用户的问题,主动选择最合适的工具或工具组合,包括 {tools} 等。
36
+ 请先思考如何将问题分解为子任务,每个子任务应使用哪些工具和参数,最后提供工具调用名称和参数。
37
+ 尝试多次使用相同的工具,但使用不同的参数,从多个角度获取信息。
38
+ 请根据用户问题的语言来确定回复的语言。
39
+
40
+ {query}
41
+
42
+ next_prompt_zh: |
43
+ 根据当前内容和用户的问题进行思考:当前上下文是否足以回答用户的问题?
44
+ - 如果当前上下文不足以回答用户的问题,请考虑缺少哪些信息。
45
+ 重新规划并思考如何将缺失的信息分解为子任务。
46
+ 对于每个子任务,确定应使用哪些工具和参数进行查询。
47
+ 请先提供推理过程,然后给出工具调用名称和参数。
48
+ - 如果当前上下文足以回答用户的问题,请使用 **terminate** 工具。
49
+
50
+ final_prompt_zh: |
51
+ 请整合上下文,为用户的问题提供一个完整的答案。
52
+
53
+ # 用户的问题
54
+ {query}
@@ -0,0 +1,86 @@
1
+ import datetime
2
+ import json
3
+ import time
4
+ from typing import List, Dict
5
+
6
+ from loguru import logger
7
+
8
+ from flowllm.context.flow_context import FlowContext
9
+ from flowllm.context.service_context import C
10
+ from flowllm.op.base_llm_op import BaseLLMOp
11
+ from flowllm.schema.flow_response import FlowResponse
12
+ from flowllm.schema.message import Message, Role
13
+
14
+
15
+ @C.register_op()
16
+ class ReactV2Op(BaseLLMOp):
17
+ file_path: str = __file__
18
+
19
+ def execute(self):
20
+ query: str = self.context.query
21
+
22
+ max_steps: int = int(self.op_params.get("max_steps", 10))
23
+ from flowllm.flow.base_tool_flow import BaseToolFlow
24
+ from flowllm.flow.gallery import DashscopeSearchToolFlow, CodeToolFlow
25
+
26
+ tools: List[BaseToolFlow] = [DashscopeSearchToolFlow(), CodeToolFlow()]
27
+
28
+ """
29
+ NOTE : x.tool_call.name != x.name
30
+ `x.tool_call.name` is tool's namex.name is flow's name(unique service name)
31
+ """
32
+ tool_dict: Dict[str, BaseToolFlow] = {x.tool_call.name: x for x in tools}
33
+ for name, tool_call in tool_dict.items():
34
+ logger.info(f"name={name} "
35
+ f"tool_call={json.dumps(tool_call.tool_call.simple_input_dump(), ensure_ascii=False)}")
36
+
37
+ now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
38
+ has_terminate_tool = False
39
+
40
+ user_prompt = self.prompt_format(prompt_name="role_prompt",
41
+ time=now_time,
42
+ tools=",".join(list(tool_dict.keys())),
43
+ query=query)
44
+ messages: List[Message] = [Message(role=Role.USER, content=user_prompt)]
45
+ logger.info(f"step.0 user_prompt={user_prompt}")
46
+
47
+ for i in range(max_steps):
48
+ assistant_message: Message = self.llm.chat(messages, tools=[x.tool_call for x in tools])
49
+ messages.append(assistant_message)
50
+ logger.info(f"assistant.round{i}.reasoning_content={assistant_message.reasoning_content}\n"
51
+ f"content={assistant_message.content}\n"
52
+ f"tool.size={len(assistant_message.tool_calls)}")
53
+
54
+ if not assistant_message.tool_calls:
55
+ break
56
+
57
+ for j, tool_call in enumerate(assistant_message.tool_calls):
58
+ logger.info(f"submit step={i} tool_calls.name={tool_call.name} argument_dict={tool_call.argument_dict}")
59
+
60
+ if tool_call.name not in tool_dict:
61
+ logger.warning(f"step={i} no tool_call.name={tool_call.name}")
62
+ continue
63
+
64
+ self.submit_task(tool_dict[tool_call.name].__call__, **tool_call.argument_dict)
65
+ time.sleep(1)
66
+
67
+ for i, (tool_result, tool_call) in enumerate(zip(self.join_task(), assistant_message.tool_calls)):
68
+ logger.info(f"submit step={i} tool_calls.name={tool_call.name} tool_result={tool_result}")
69
+ if isinstance(tool_result, FlowResponse):
70
+ tool_result = tool_result.answer
71
+ else:
72
+ tool_result = str(tool_result)
73
+ tool_message = Message(role=Role.TOOL, content=tool_result, tool_call_id=tool_call.id)
74
+ messages.append(tool_message)
75
+
76
+ # Store results in context instead of response
77
+ self.context.response.messages = messages
78
+ self.context.response.answer = messages[-1].content
79
+
80
+
81
+ if __name__ == "__main__":
82
+ C.set_default_service_config().init_by_service_config()
83
+ context = FlowContext(query="茅台和五粮现在股价多少?")
84
+
85
+ op = ReactV2Op()
86
+ op(context=context)
@@ -0,0 +1,35 @@
1
+ role_prompt: |
2
+ You are a helpful assistant.
3
+ The current time is {time}.
4
+ Please proactively choose the most suitable tool or combination of tools based on the user's question, including {tools} etc.
5
+ Please first think about how to break down the problem into subtasks, what tools and parameters should be used for each subtask, and finally provide the tool call name and parameters.
6
+ Try calling the same tool multiple times with different parameters to obtain information from various perspectives.
7
+ Please determine the response language based on the language of the user's question.
8
+
9
+ {query}
10
+
11
+ next_prompt: |
12
+ Think based on the current content and the user's question: Is the current context sufficient to answer the user's question?
13
+ - If the current context is not sufficient to answer the user's question, consider what information is missing.
14
+ Re-plan and think about how to break down the missing information into subtasks.
15
+ For each subtask, determine what tools and parameters should be used for the query.
16
+ Please first provide the reasoning process, then give the tool call name and parameters.
17
+ - If the current context is sufficient to answer the user's question, please integrate the context and provide a complete answer to the user's question.
18
+
19
+ role_prompt_zh: |
20
+ 你是一个有用的助手。
21
+ 当前时间是 {time}。
22
+ 请根据用户的问题,主动选择最合适的工具或工具组合,包括 {tools} 等。
23
+ 请先思考如何将问题分解为子任务,每个子任务应使用哪些工具和参数,最后提供工具调用名称和参数。
24
+ 尝试多次使用相同的工具,但使用不同的参数,从多个角度获取信息。
25
+ 请根据用户问题的语言来确定回复的语言。
26
+
27
+ {query}
28
+
29
+ next_prompt_zh: |
30
+ 根据当前内容和用户的问题进行思考:当前上下文是否足以回答用户的问题?
31
+ - 如果当前上下文不足以回答用户的问题,请考虑缺少哪些信息。
32
+ 重新规划并思考如何将缺失的信息分解为子任务。
33
+ 对于每个子任务,确定应使用哪些工具和参数进行查询。
34
+ 请先提供推理过程,然后给出工具调用名称和参数。
35
+ - 如果当前上下文足以回答用户的问题,请整合上下文,为用户的问题提供一个完整的答案。
@@ -0,0 +1,3 @@
1
+ from .get_ak_a_code_op import GetAkACodeOp
2
+ from .get_ak_a_info_op import GetAkAInfoOp, GetAkASpotOp, GetAkAMoneyFlowOp, GetAkAFinancialInfoOp, \
3
+ GetAkANewsOp, MergeAkAInfoOp
@@ -6,13 +6,12 @@ import akshare as ak
6
6
  import pandas as pd
7
7
  from loguru import logger
8
8
 
9
- from flowllm.config.pydantic_config_parser import get_default_config
10
9
  from flowllm.context.flow_context import FlowContext
11
10
  from flowllm.context.service_context import C
12
11
  from flowllm.enumeration.role import Role
13
- from flowllm.op.llm_base_op import BaseLLMOp
12
+ from flowllm.op.base_llm_op import BaseLLMOp
14
13
  from flowllm.schema.message import Message
15
- from flowllm.utils.dataframe_cache import DataFrameCache
14
+ from flowllm.storage.cache.data_cache import DataCache
16
15
  from flowllm.utils.timer import timer
17
16
 
18
17
 
@@ -25,9 +24,9 @@ class GetAkACodeOp(BaseLLMOp):
25
24
 
26
25
  @staticmethod
27
26
  def download_a_stock_df():
28
- df_cache = DataFrameCache()
27
+ cache = DataCache()
29
28
  save_df_key: str = "all_a_stock_name_code"
30
- if not df_cache.exists(save_df_key):
29
+ if not cache.exists(save_df_key):
31
30
  stock_sh_a_spot_em_df = ak.stock_sh_a_spot_em()
32
31
  stock_sz_a_spot_em_df = ak.stock_sz_a_spot_em()
33
32
  stock_bj_a_spot_em_df = ak.stock_bj_a_spot_em()
@@ -36,9 +35,9 @@ class GetAkACodeOp(BaseLLMOp):
36
35
  df = df.drop(columns=["序号"])
37
36
  df = df.reset_index(drop=True)
38
37
  df = df.sort_values(by="代码")
39
- df_cache.save(save_df_key, df, expire_hours=0.25)
38
+ cache.save(save_df_key, df, expire_hours=0.25)
40
39
 
41
- df = df_cache.load(save_df_key, dtype={"代码": str})
40
+ df = cache.load(save_df_key, dtype={"代码": str})
42
41
  return df
43
42
 
44
43
  def get_name_code_dict(self) -> dict:
@@ -73,7 +72,7 @@ class GetAkACodeOp(BaseLLMOp):
73
72
  stock_names = "\n".join([x.strip() for x in stock_names if x])
74
73
  prompt = self.prompt_format(prompt_name="find_stock_name",
75
74
  stock_names=stock_names,
76
- query=self.flow_context.query)
75
+ query=self.context.query)
77
76
  logger.info(f"prompt={prompt}")
78
77
 
79
78
  def callback_fn(msg: Message):
@@ -97,20 +96,13 @@ class GetAkACodeOp(BaseLLMOp):
97
96
  time.sleep(1)
98
97
 
99
98
  stock_names = sorted(set(self.join_task()))
100
- self.flow_context.code_infos = {name_code_dict[n]: {} for n in stock_names}
101
- logger.info(f"code_infos={self.flow_context.code_infos}")
99
+ self.context.code_infos = {name_code_dict[n]: {"股票名称": n} for n in stock_names}
100
+ logger.info(f"code_infos={self.context.code_infos}")
102
101
 
103
102
 
104
103
  if __name__ == "__main__":
105
- from concurrent.futures import ThreadPoolExecutor
106
-
107
- C.thread_pool = ThreadPoolExecutor(max_workers=10)
108
- flow_context = FlowContext()
109
- service_config = get_default_config()
110
- flow_context.query = "茅台和五粮现在价格多少?"
111
- flow_context.service_config = service_config
112
-
113
- op = GetAkACodeOp(flow_context=flow_context)
114
- # for x in op.split_list(list(range(10)), 3):
115
- # print(x)
116
- op.execute()
104
+ C.set_default_service_config().init_by_service_config()
105
+ context = FlowContext(query="茅台和五粮现在价格多少?")
106
+
107
+ op = GetAkACodeOp()
108
+ op(context=context)