flowllm 0.1.0__py3-none-any.whl → 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (141) hide show
  1. flowllm/__init__.py +21 -0
  2. flowllm/app.py +15 -0
  3. flowllm/client/__init__.py +25 -0
  4. flowllm/client/async_http_client.py +81 -0
  5. flowllm/client/http_client.py +81 -0
  6. flowllm/client/mcp_client.py +133 -0
  7. flowllm/client/sync_mcp_client.py +116 -0
  8. flowllm/config/__init__.py +1 -0
  9. flowllm/config/default.yaml +77 -0
  10. flowllm/config/empty.yaml +37 -0
  11. flowllm/config/pydantic_config_parser.py +242 -0
  12. flowllm/context/base_context.py +79 -0
  13. flowllm/context/flow_context.py +16 -0
  14. llmflow/op/prompt_mixin.py → flowllm/context/prompt_handler.py +25 -14
  15. flowllm/context/registry.py +30 -0
  16. flowllm/context/service_context.py +147 -0
  17. flowllm/embedding_model/__init__.py +1 -0
  18. {llmflow → flowllm}/embedding_model/base_embedding_model.py +93 -2
  19. {llmflow → flowllm}/embedding_model/openai_compatible_embedding_model.py +71 -13
  20. flowllm/flow/__init__.py +1 -0
  21. flowllm/flow/base_flow.py +72 -0
  22. flowllm/flow/base_tool_flow.py +15 -0
  23. flowllm/flow/gallery/__init__.py +8 -0
  24. flowllm/flow/gallery/cmd_flow.py +11 -0
  25. flowllm/flow/gallery/code_tool_flow.py +30 -0
  26. flowllm/flow/gallery/dashscope_search_tool_flow.py +34 -0
  27. flowllm/flow/gallery/deepsearch_tool_flow.py +39 -0
  28. flowllm/flow/gallery/expression_tool_flow.py +18 -0
  29. flowllm/flow/gallery/mock_tool_flow.py +67 -0
  30. flowllm/flow/gallery/tavily_search_tool_flow.py +30 -0
  31. flowllm/flow/gallery/terminate_tool_flow.py +30 -0
  32. flowllm/flow/parser/expression_parser.py +171 -0
  33. flowllm/llm/__init__.py +2 -0
  34. {llmflow → flowllm}/llm/base_llm.py +100 -18
  35. flowllm/llm/litellm_llm.py +455 -0
  36. flowllm/llm/openai_compatible_llm.py +439 -0
  37. flowllm/op/__init__.py +11 -0
  38. llmflow/op/react/react_v1_op.py → flowllm/op/agent/react_op.py +17 -22
  39. flowllm/op/akshare/__init__.py +3 -0
  40. flowllm/op/akshare/get_ak_a_code_op.py +108 -0
  41. flowllm/op/akshare/get_ak_a_code_prompt.yaml +21 -0
  42. flowllm/op/akshare/get_ak_a_info_op.py +140 -0
  43. flowllm/op/base_llm_op.py +64 -0
  44. flowllm/op/base_op.py +148 -0
  45. flowllm/op/base_ray_op.py +313 -0
  46. flowllm/op/code/__init__.py +1 -0
  47. flowllm/op/code/execute_code_op.py +42 -0
  48. flowllm/op/gallery/__init__.py +2 -0
  49. flowllm/op/gallery/mock_op.py +42 -0
  50. flowllm/op/gallery/terminate_op.py +29 -0
  51. flowllm/op/parallel_op.py +23 -0
  52. flowllm/op/search/__init__.py +3 -0
  53. flowllm/op/search/dashscope_deep_research_op.py +260 -0
  54. flowllm/op/search/dashscope_search_op.py +179 -0
  55. flowllm/op/search/dashscope_search_prompt.yaml +13 -0
  56. flowllm/op/search/tavily_search_op.py +102 -0
  57. flowllm/op/sequential_op.py +21 -0
  58. flowllm/schema/flow_request.py +12 -0
  59. flowllm/schema/flow_response.py +12 -0
  60. flowllm/schema/message.py +35 -0
  61. flowllm/schema/service_config.py +72 -0
  62. flowllm/schema/tool_call.py +118 -0
  63. {llmflow → flowllm}/schema/vector_node.py +1 -0
  64. flowllm/service/__init__.py +3 -0
  65. flowllm/service/base_service.py +68 -0
  66. flowllm/service/cmd_service.py +15 -0
  67. flowllm/service/http_service.py +79 -0
  68. flowllm/service/mcp_service.py +47 -0
  69. flowllm/storage/__init__.py +1 -0
  70. flowllm/storage/cache/__init__.py +1 -0
  71. flowllm/storage/cache/cache_data_handler.py +104 -0
  72. flowllm/storage/cache/data_cache.py +375 -0
  73. flowllm/storage/vector_store/__init__.py +3 -0
  74. flowllm/storage/vector_store/base_vector_store.py +44 -0
  75. {llmflow → flowllm/storage}/vector_store/chroma_vector_store.py +11 -10
  76. {llmflow → flowllm/storage}/vector_store/es_vector_store.py +11 -11
  77. llmflow/vector_store/file_vector_store.py → flowllm/storage/vector_store/local_vector_store.py +110 -11
  78. flowllm/utils/common_utils.py +52 -0
  79. flowllm/utils/fetch_url.py +117 -0
  80. flowllm/utils/llm_utils.py +28 -0
  81. flowllm/utils/ridge_v2.py +54 -0
  82. {llmflow → flowllm}/utils/timer.py +5 -4
  83. {flowllm-0.1.0.dist-info → flowllm-0.1.2.dist-info}/METADATA +45 -388
  84. flowllm-0.1.2.dist-info/RECORD +99 -0
  85. flowllm-0.1.2.dist-info/entry_points.txt +2 -0
  86. {flowllm-0.1.0.dist-info → flowllm-0.1.2.dist-info}/licenses/LICENSE +1 -1
  87. flowllm-0.1.2.dist-info/top_level.txt +1 -0
  88. flowllm-0.1.0.dist-info/RECORD +0 -66
  89. flowllm-0.1.0.dist-info/entry_points.txt +0 -3
  90. flowllm-0.1.0.dist-info/top_level.txt +0 -1
  91. llmflow/app.py +0 -53
  92. llmflow/config/config_parser.py +0 -80
  93. llmflow/config/mock_config.yaml +0 -58
  94. llmflow/embedding_model/__init__.py +0 -5
  95. llmflow/enumeration/agent_state.py +0 -8
  96. llmflow/llm/__init__.py +0 -5
  97. llmflow/llm/openai_compatible_llm.py +0 -283
  98. llmflow/mcp_server.py +0 -110
  99. llmflow/op/__init__.py +0 -10
  100. llmflow/op/base_op.py +0 -125
  101. llmflow/op/mock_op.py +0 -40
  102. llmflow/op/vector_store/__init__.py +0 -13
  103. llmflow/op/vector_store/recall_vector_store_op.py +0 -48
  104. llmflow/op/vector_store/update_vector_store_op.py +0 -28
  105. llmflow/op/vector_store/vector_store_action_op.py +0 -46
  106. llmflow/pipeline/pipeline.py +0 -94
  107. llmflow/pipeline/pipeline_context.py +0 -37
  108. llmflow/schema/app_config.py +0 -69
  109. llmflow/schema/experience.py +0 -144
  110. llmflow/schema/message.py +0 -68
  111. llmflow/schema/request.py +0 -32
  112. llmflow/schema/response.py +0 -29
  113. llmflow/service/__init__.py +0 -0
  114. llmflow/service/llmflow_service.py +0 -96
  115. llmflow/tool/__init__.py +0 -9
  116. llmflow/tool/base_tool.py +0 -80
  117. llmflow/tool/code_tool.py +0 -43
  118. llmflow/tool/dashscope_search_tool.py +0 -162
  119. llmflow/tool/mcp_tool.py +0 -77
  120. llmflow/tool/tavily_search_tool.py +0 -109
  121. llmflow/tool/terminate_tool.py +0 -23
  122. llmflow/utils/__init__.py +0 -0
  123. llmflow/utils/common_utils.py +0 -17
  124. llmflow/utils/file_handler.py +0 -25
  125. llmflow/utils/http_client.py +0 -156
  126. llmflow/utils/op_utils.py +0 -102
  127. llmflow/utils/registry.py +0 -33
  128. llmflow/vector_store/__init__.py +0 -7
  129. llmflow/vector_store/base_vector_store.py +0 -136
  130. {llmflow → flowllm/context}/__init__.py +0 -0
  131. {llmflow/config → flowllm/enumeration}/__init__.py +0 -0
  132. {llmflow → flowllm}/enumeration/chunk_enum.py +0 -0
  133. {llmflow → flowllm}/enumeration/http_enum.py +0 -0
  134. {llmflow → flowllm}/enumeration/role.py +0 -0
  135. {llmflow/enumeration → flowllm/flow/parser}/__init__.py +0 -0
  136. {llmflow/op/react → flowllm/op/agent}/__init__.py +0 -0
  137. /llmflow/op/react/react_v1_prompt.yaml → /flowllm/op/agent/react_prompt.yaml +0 -0
  138. {llmflow/pipeline → flowllm/schema}/__init__.py +0 -0
  139. {llmflow/schema → flowllm/utils}/__init__.py +0 -0
  140. {llmflow → flowllm}/utils/singleton.py +0 -0
  141. {flowllm-0.1.0.dist-info → flowllm-0.1.2.dist-info}/WHEEL +0 -0
@@ -0,0 +1,439 @@
1
+ import os
2
+ from typing import List, Dict
3
+
4
+ from loguru import logger
5
+ from openai import OpenAI, AsyncOpenAI
6
+ from openai.types import CompletionUsage
7
+ from pydantic import Field, PrivateAttr, model_validator
8
+
9
+ from flowllm.context.service_context import C
10
+ from flowllm.enumeration.chunk_enum import ChunkEnum
11
+ from flowllm.enumeration.role import Role
12
+ from flowllm.llm.base_llm import BaseLLM
13
+ from flowllm.schema.message import Message
14
+ from flowllm.schema.tool_call import ToolCall
15
+
16
+
17
+ @C.register_llm("openai_compatible")
18
+ class OpenAICompatibleBaseLLM(BaseLLM):
19
+ """
20
+ OpenAI-compatible LLM implementation supporting streaming and tool calls.
21
+
22
+ This class implements the BaseLLM interface for OpenAI-compatible APIs,
23
+ including support for:
24
+ - Streaming responses with different chunk types (thinking, answer, tools)
25
+ - Tool calling with parallel execution
26
+ - Reasoning/thinking content from supported models
27
+ - Robust error handling and retries
28
+ """
29
+
30
+ # API configuration
31
+ api_key: str = Field(default_factory=lambda: os.getenv("FLOW_LLM_API_KEY"),
32
+ description="API key for authentication")
33
+ base_url: str = Field(default_factory=lambda: os.getenv("FLOW_LLM_BASE_URL"),
34
+ description="Base URL for the API endpoint")
35
+ _client: OpenAI = PrivateAttr()
36
+ _aclient: AsyncOpenAI = PrivateAttr()
37
+
38
+ @model_validator(mode="after")
39
+ def init_client(self):
40
+ """
41
+ Initialize the OpenAI clients after model validation.
42
+
43
+ This validator runs after all field validation is complete,
44
+ ensuring we have valid API credentials before creating the clients.
45
+
46
+ Returns:
47
+ Self for method chaining
48
+ """
49
+ self._client = OpenAI(api_key=self.api_key, base_url=self.base_url)
50
+ self._aclient = AsyncOpenAI(api_key=self.api_key, base_url=self.base_url)
51
+ return self
52
+
53
+ def stream_chat(self, messages: List[Message], tools: List[ToolCall] = None, **kwargs):
54
+ """
55
+ Stream chat completions from OpenAI-compatible API.
56
+
57
+ This method handles streaming responses and categorizes chunks into different types:
58
+ - THINK: Reasoning/thinking content from the model
59
+ - ANSWER: Regular response content
60
+ - TOOL: Tool calls that need to be executed
61
+ - USAGE: Token usage statistics
62
+ - ERROR: Error information
63
+
64
+ Args:
65
+ messages: List of conversation messages
66
+ tools: Optional list of tools available to the model
67
+ **kwargs: Additional parameters
68
+
69
+ Yields:
70
+ Tuple of (chunk_content, ChunkEnum) for each streaming piece
71
+ """
72
+ for i in range(self.max_retries):
73
+ try:
74
+ extra_body = {}
75
+ if self.enable_thinking:
76
+ extra_body["enable_thinking"] = True # qwen3 params
77
+
78
+ completion = self._client.chat.completions.create(
79
+ model=self.model_name,
80
+ messages=[x.simple_dump() for x in messages],
81
+ seed=self.seed,
82
+ top_p=self.top_p,
83
+ stream=True,
84
+ stream_options=self.stream_options,
85
+ temperature=self.temperature,
86
+ extra_body=extra_body,
87
+ tools=[x.simple_input_dump() for x in tools] if tools else None,
88
+ parallel_tool_calls=self.parallel_tool_calls)
89
+
90
+ # Initialize tool call tracking
91
+ ret_tools: List[ToolCall] = [] # Accumulate tool calls across chunks
92
+ is_answering: bool = False # Track when model starts answering
93
+
94
+ # Process each chunk in the streaming response
95
+ for chunk in completion:
96
+ # Handle chunks without choices (usually usage info)
97
+ if not chunk.choices:
98
+ yield chunk.usage, ChunkEnum.USAGE
99
+
100
+ else:
101
+ delta = chunk.choices[0].delta
102
+
103
+ # Handle reasoning/thinking content (model's internal thoughts)
104
+ if hasattr(delta, 'reasoning_content') and delta.reasoning_content is not None:
105
+ yield delta.reasoning_content, ChunkEnum.THINK
106
+
107
+ else:
108
+ # Mark transition from thinking to answering
109
+ if not is_answering:
110
+ is_answering = True
111
+
112
+ # Handle regular response content
113
+ if delta.content is not None:
114
+ yield delta.content, ChunkEnum.ANSWER
115
+
116
+ # Handle tool calls (function calling)
117
+ if delta.tool_calls is not None:
118
+ for tool_call in delta.tool_calls:
119
+ index = tool_call.index
120
+
121
+ # Ensure we have enough tool call slots
122
+ while len(ret_tools) <= index:
123
+ ret_tools.append(ToolCall(index=index))
124
+
125
+ # Accumulate tool call information across chunks
126
+ if tool_call.id:
127
+ ret_tools[index].id += tool_call.id
128
+
129
+ if tool_call.function and tool_call.function.name:
130
+ ret_tools[index].name += tool_call.function.name
131
+
132
+ if tool_call.function and tool_call.function.arguments:
133
+ ret_tools[index].arguments += tool_call.function.arguments
134
+
135
+ # Yield completed tool calls after streaming finishes
136
+ if ret_tools:
137
+ tool_dict: Dict[str, ToolCall] = {x.name: x for x in tools} if tools else {}
138
+ for tool in ret_tools:
139
+ # Only yield tool calls that correspond to available tools
140
+ if tool.name not in tool_dict:
141
+ continue
142
+
143
+ yield tool, ChunkEnum.TOOL
144
+
145
+ return
146
+
147
+ except Exception as e:
148
+ logger.exception(f"stream chat with model={self.model_name} encounter error with e={e.args}")
149
+
150
+ # Handle retry logic
151
+ if i == self.max_retries - 1 and self.raise_exception:
152
+ raise e
153
+ else:
154
+ yield e.args, ChunkEnum.ERROR
155
+
156
+ async def astream_chat(self, messages: List[Message], tools: List[ToolCall] = None, **kwargs):
157
+ """
158
+ Async stream chat completions from OpenAI-compatible API.
159
+
160
+ This method handles async streaming responses and categorizes chunks into different types:
161
+ - THINK: Reasoning/thinking content from the model
162
+ - ANSWER: Regular response content
163
+ - TOOL: Tool calls that need to be executed
164
+ - USAGE: Token usage statistics
165
+ - ERROR: Error information
166
+
167
+ Args:
168
+ messages: List of conversation messages
169
+ tools: Optional list of tools available to the model
170
+ **kwargs: Additional parameters
171
+
172
+ Yields:
173
+ Tuple of (chunk_content, ChunkEnum) for each streaming piece
174
+ """
175
+ for i in range(self.max_retries):
176
+ try:
177
+ extra_body = {}
178
+ if self.enable_thinking:
179
+ extra_body["enable_thinking"] = True # qwen3 params
180
+
181
+ completion = await self._aclient.chat.completions.create(
182
+ model=self.model_name,
183
+ messages=[x.simple_dump() for x in messages],
184
+ seed=self.seed,
185
+ top_p=self.top_p,
186
+ stream=True,
187
+ stream_options=self.stream_options,
188
+ temperature=self.temperature,
189
+ extra_body=extra_body,
190
+ tools=[x.simple_input_dump() for x in tools] if tools else None,
191
+ parallel_tool_calls=self.parallel_tool_calls)
192
+
193
+ # Initialize tool call tracking
194
+ ret_tools: List[ToolCall] = [] # Accumulate tool calls across chunks
195
+ is_answering: bool = False # Track when model starts answering
196
+
197
+ # Process each chunk in the streaming response
198
+ async for chunk in completion:
199
+ # Handle chunks without choices (usually usage info)
200
+ if not chunk.choices:
201
+ yield chunk.usage, ChunkEnum.USAGE
202
+
203
+ else:
204
+ delta = chunk.choices[0].delta
205
+
206
+ # Handle reasoning/thinking content (model's internal thoughts)
207
+ if hasattr(delta, 'reasoning_content') and delta.reasoning_content is not None:
208
+ yield delta.reasoning_content, ChunkEnum.THINK
209
+
210
+ else:
211
+ # Mark transition from thinking to answering
212
+ if not is_answering:
213
+ is_answering = True
214
+
215
+ # Handle regular response content
216
+ if delta.content is not None:
217
+ yield delta.content, ChunkEnum.ANSWER
218
+
219
+ # Handle tool calls (function calling)
220
+ if delta.tool_calls is not None:
221
+ for tool_call in delta.tool_calls:
222
+ index = tool_call.index
223
+
224
+ # Ensure we have enough tool call slots
225
+ while len(ret_tools) <= index:
226
+ ret_tools.append(ToolCall(index=index))
227
+
228
+ # Accumulate tool call information across chunks
229
+ if tool_call.id:
230
+ ret_tools[index].id += tool_call.id
231
+
232
+ if tool_call.function and tool_call.function.name:
233
+ ret_tools[index].name += tool_call.function.name
234
+
235
+ if tool_call.function and tool_call.function.arguments:
236
+ ret_tools[index].arguments += tool_call.function.arguments
237
+
238
+ # Yield completed tool calls after streaming finishes
239
+ if ret_tools:
240
+ tool_dict: Dict[str, ToolCall] = {x.name: x for x in tools} if tools else {}
241
+ for tool in ret_tools:
242
+ # Only yield tool calls that correspond to available tools
243
+ if tool.name not in tool_dict:
244
+ continue
245
+
246
+ yield tool, ChunkEnum.TOOL
247
+
248
+ return
249
+
250
+ except Exception as e:
251
+ logger.exception(f"async stream chat with model={self.model_name} encounter error with e={e.args}")
252
+
253
+ # Handle retry logic
254
+ if i == self.max_retries - 1 and self.raise_exception:
255
+ raise e
256
+ else:
257
+ yield e.args, ChunkEnum.ERROR
258
+
259
+ def _chat(self, messages: List[Message], tools: List[ToolCall] = None, enable_stream_print: bool = False,
260
+ **kwargs) -> Message:
261
+ """
262
+ Perform a complete chat completion by aggregating streaming chunks.
263
+
264
+ This method consumes the entire streaming response and combines all
265
+ chunks into a single Message object. It separates reasoning content,
266
+ regular answer content, and tool calls.
267
+
268
+ Args:
269
+ messages: List of conversation messages
270
+ tools: Optional list of tools available to the model
271
+ enable_stream_print: Whether to print streaming response to console
272
+ **kwargs: Additional parameters
273
+
274
+ Returns:
275
+ Complete Message with all content aggregated
276
+ """
277
+
278
+ enter_think = False # Whether we've started printing thinking content
279
+ enter_answer = False # Whether we've started printing answer content
280
+ reasoning_content = "" # Model's internal reasoning
281
+ answer_content = "" # Final response content
282
+ tool_calls = [] # List of tool calls to execute
283
+
284
+ # Consume streaming response and aggregate chunks by type
285
+ for chunk, chunk_enum in self.stream_chat(messages, tools, **kwargs):
286
+ if chunk_enum is ChunkEnum.USAGE:
287
+ # Display token usage statistics
288
+ if enable_stream_print:
289
+ if isinstance(chunk, CompletionUsage):
290
+ print(f"\n<usage>{chunk.model_dump_json(indent=2)}</usage>")
291
+ else:
292
+ print(f"\n<usage>{chunk}</usage>")
293
+
294
+ elif chunk_enum is ChunkEnum.THINK:
295
+ if enable_stream_print:
296
+ # Format thinking/reasoning content
297
+ if not enter_think:
298
+ enter_think = True
299
+ print("<think>\n", end="")
300
+ print(chunk, end="")
301
+
302
+ reasoning_content += chunk
303
+
304
+ elif chunk_enum is ChunkEnum.ANSWER:
305
+ if enable_stream_print:
306
+ if not enter_answer:
307
+ enter_answer = True
308
+ # Close thinking section if we were in it
309
+ if enter_think:
310
+ print("\n</think>")
311
+ print(chunk, end="")
312
+
313
+ answer_content += chunk
314
+
315
+ elif chunk_enum is ChunkEnum.TOOL:
316
+ if enable_stream_print:
317
+ print(f"\n<tool>{chunk.model_dump_json()}</tool>", end="")
318
+
319
+ tool_calls.append(chunk)
320
+
321
+ elif chunk_enum is ChunkEnum.ERROR:
322
+ if enable_stream_print:
323
+ # Display error information
324
+ print(f"\n<error>{chunk}</error>", end="")
325
+
326
+ # Construct complete response message
327
+ return Message(role=Role.ASSISTANT,
328
+ reasoning_content=reasoning_content,
329
+ content=answer_content,
330
+ tool_calls=tool_calls)
331
+
332
+ async def _achat(self, messages: List[Message], tools: List[ToolCall] = None, enable_stream_print: bool = False,
333
+ **kwargs) -> Message:
334
+ """
335
+ Perform an async complete chat completion by aggregating streaming chunks.
336
+
337
+ This method consumes the entire async streaming response and combines all
338
+ chunks into a single Message object. It separates reasoning content,
339
+ regular answer content, and tool calls.
340
+
341
+ Args:
342
+ messages: List of conversation messages
343
+ tools: Optional list of tools available to the model
344
+ enable_stream_print: Whether to print streaming response to console
345
+ **kwargs: Additional parameters
346
+
347
+ Returns:
348
+ Complete Message with all content aggregated
349
+ """
350
+
351
+ enter_think = False # Whether we've started printing thinking content
352
+ enter_answer = False # Whether we've started printing answer content
353
+ reasoning_content = "" # Model's internal reasoning
354
+ answer_content = "" # Final response content
355
+ tool_calls = [] # List of tool calls to execute
356
+
357
+ # Consume async streaming response and aggregate chunks by type
358
+ async for chunk, chunk_enum in self.astream_chat(messages, tools, **kwargs):
359
+ if chunk_enum is ChunkEnum.USAGE:
360
+ # Display token usage statistics
361
+ if enable_stream_print:
362
+ if isinstance(chunk, CompletionUsage):
363
+ print(f"\n<usage>{chunk.model_dump_json(indent=2)}</usage>")
364
+ else:
365
+ print(f"\n<usage>{chunk}</usage>")
366
+
367
+ elif chunk_enum is ChunkEnum.THINK:
368
+ if enable_stream_print:
369
+ # Format thinking/reasoning content
370
+ if not enter_think:
371
+ enter_think = True
372
+ print("<think>\n", end="")
373
+ print(chunk, end="")
374
+
375
+ reasoning_content += chunk
376
+
377
+ elif chunk_enum is ChunkEnum.ANSWER:
378
+ if enable_stream_print:
379
+ if not enter_answer:
380
+ enter_answer = True
381
+ # Close thinking section if we were in it
382
+ if enter_think:
383
+ print("\n</think>")
384
+ print(chunk, end="")
385
+
386
+ answer_content += chunk
387
+
388
+ elif chunk_enum is ChunkEnum.TOOL:
389
+ if enable_stream_print:
390
+ print(f"\n<tool>{chunk.model_dump_json()}</tool>", end="")
391
+
392
+ tool_calls.append(chunk)
393
+
394
+ elif chunk_enum is ChunkEnum.ERROR:
395
+ if enable_stream_print:
396
+ # Display error information
397
+ print(f"\n<error>{chunk}</error>", end="")
398
+
399
+ # Construct complete response message
400
+ return Message(role=Role.ASSISTANT,
401
+ reasoning_content=reasoning_content,
402
+ content=answer_content,
403
+ tool_calls=tool_calls)
404
+
405
+
406
+ async def async_main():
407
+ from flowllm.utils.common_utils import load_env
408
+
409
+ load_env()
410
+
411
+ # model_name = "qwen-max-2025-01-25"
412
+ model_name = "qwen3-30b-a3b-thinking-2507"
413
+ llm = OpenAICompatibleBaseLLM(model_name=model_name)
414
+
415
+ # Test async chat
416
+ message: Message = await llm.achat([Message(role=Role.USER, content="hello")], [],
417
+ enable_stream_print=True)
418
+ print("Async result:", message)
419
+
420
+
421
+ def main():
422
+ from flowllm.utils.common_utils import load_env
423
+
424
+ load_env()
425
+
426
+ model_name = "qwen-max-2025-01-25"
427
+ llm = OpenAICompatibleBaseLLM(model_name=model_name)
428
+
429
+ # Test sync chat
430
+ message: Message = llm.chat([Message(role=Role.USER, content="hello")], [],
431
+ enable_stream_print=False)
432
+ print("Sync result:", message)
433
+
434
+
435
+ if __name__ == "__main__":
436
+ # main()
437
+
438
+ import asyncio
439
+ asyncio.run(async_main())
flowllm/op/__init__.py ADDED
@@ -0,0 +1,11 @@
1
+ from .base_llm_op import BaseLLMOp
2
+ from .base_op import BaseOp
3
+ from .base_ray_op import BaseRayOp
4
+
5
+ """
6
+ op folder
7
+ """
8
+ from . import akshare
9
+ from . import code
10
+ from . import gallery
11
+ from . import search
@@ -4,36 +4,30 @@ from typing import List, Dict
4
4
 
5
5
  from loguru import logger
6
6
 
7
- from llmflow.enumeration.role import Role
8
- from llmflow.op import OP_REGISTRY
9
- from llmflow.op.base_op import BaseOp
10
- from llmflow.schema.message import Message
11
- from llmflow.schema.request import AgentRequest
12
- from llmflow.schema.response import AgentResponse
13
- from llmflow.tool import TOOL_REGISTRY
14
- from llmflow.tool.base_tool import BaseTool
7
+ from flowllm import C, BaseLLMOp
8
+ from flowllm.flow.base_tool_flow import BaseToolFlow
9
+ from flowllm.flow.gallery import DashscopeSearchToolFlow, CodeToolFlow, TerminateToolFlow
10
+ from flowllm.schema.message import Message, Role
15
11
 
16
12
 
17
- @OP_REGISTRY.register()
18
- class ReactV1Op(BaseOp):
19
- current_path: str = __file__
13
+ @C.register_op()
14
+ class ReactOp(BaseLLMOp):
15
+ # TODO: test react op
16
+ file_path: str = __file__
20
17
 
21
18
  def execute(self):
22
- request: AgentRequest = self.context.request
23
- response: AgentResponse = self.context.response
19
+ query: str = self.context.query
24
20
 
25
21
  max_steps: int = int(self.op_params.get("max_steps", 10))
26
- # dashscope_search_tool tavily_search_tool
27
- tool_names = self.op_params.get("tool_names", "code_tool,tavily_search_tool,terminate_tool")
28
- tools: List[BaseTool] = [TOOL_REGISTRY[x.strip()]() for x in tool_names.split(",") if x]
29
- tool_dict: Dict[str, BaseTool] = {x.name: x for x in tools}
22
+ tools: List[BaseToolFlow] = [DashscopeSearchToolFlow(), CodeToolFlow(), TerminateToolFlow()]
23
+ tool_dict: Dict[str, BaseToolFlow] = {x.name: x for x in tools}
30
24
  now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
31
25
  has_terminate_tool = False
32
26
 
33
27
  user_prompt = self.prompt_format(prompt_name="role_prompt",
34
28
  time=now_time,
35
29
  tools=",".join([x.name for x in tools]),
36
- query=request.query)
30
+ query=query)
37
31
  messages: List[Message] = [Message(role=Role.USER, content=user_prompt)]
38
32
  logger.info(f"step.0 user_prompt={user_prompt}")
39
33
 
@@ -41,7 +35,7 @@ class ReactV1Op(BaseOp):
41
35
  if has_terminate_tool:
42
36
  assistant_message: Message = self.llm.chat(messages)
43
37
  else:
44
- assistant_message: Message = self.llm.chat(messages, tools=tools)
38
+ assistant_message: Message = self.llm.chat(messages, tools=[x.tool_call for x in tools])
45
39
 
46
40
  messages.append(assistant_message)
47
41
  logger.info(f"assistant.{i}.reasoning_content={assistant_message.reasoning_content}\n"
@@ -67,7 +61,7 @@ class ReactV1Op(BaseOp):
67
61
  if tool_call.name not in tool_dict:
68
62
  continue
69
63
 
70
- self.submit_task(tool_dict[tool_call.name].execute, **tool_call.argument_dict)
64
+ self.submit_task(tool_dict[tool_call.name].__call__, **tool_call.argument_dict)
71
65
  time.sleep(1)
72
66
 
73
67
  if not has_terminate_tool:
@@ -84,5 +78,6 @@ class ReactV1Op(BaseOp):
84
78
  assistant_message.tool_calls.clear()
85
79
  messages.append(Message(role=Role.USER, content=self.prompt_format(prompt_name="final_prompt")))
86
80
 
87
- response.messages = messages
88
- response.answer = response.messages[-1].content
81
+ # Store results in context instead of response
82
+ self.context.messages = messages
83
+ self.context.answer = messages[-1].content
@@ -0,0 +1,3 @@
1
+ from .get_ak_a_code_op import GetAkACodeOp
2
+ from .get_ak_a_info_op import GetAkAInfoOp, GetAkASpotOp, GetAkAMoneyFlowOp, GetAkAFinancialInfoOp, \
3
+ GetAkANewsOp, MergeAkAInfoOp
@@ -0,0 +1,108 @@
1
+ import json
2
+ import time
3
+ from typing import List
4
+
5
+ import akshare as ak
6
+ import pandas as pd
7
+ from loguru import logger
8
+
9
+ from flowllm.context.flow_context import FlowContext
10
+ from flowllm.context.service_context import C
11
+ from flowllm.enumeration.role import Role
12
+ from flowllm.op.base_llm_op import BaseLLMOp
13
+ from flowllm.schema.message import Message
14
+ from flowllm.storage.cache.data_cache import DataCache
15
+ from flowllm.utils.timer import timer
16
+
17
+
18
+ @C.register_op()
19
+ class GetAkACodeOp(BaseLLMOp):
20
+ file_path: str = __file__
21
+
22
+ def __init__(self, language: str = "zh", llm="qwen3_30b_instruct", **kwargs):
23
+ super().__init__(language=language, llm=llm, **kwargs)
24
+
25
+ @staticmethod
26
+ def download_a_stock_df():
27
+ cache = DataCache()
28
+ save_df_key: str = "all_a_stock_name_code"
29
+ if not cache.exists(save_df_key):
30
+ stock_sh_a_spot_em_df = ak.stock_sh_a_spot_em()
31
+ stock_sz_a_spot_em_df = ak.stock_sz_a_spot_em()
32
+ stock_bj_a_spot_em_df = ak.stock_bj_a_spot_em()
33
+
34
+ df: pd.DataFrame = pd.concat([stock_sh_a_spot_em_df, stock_sz_a_spot_em_df, stock_bj_a_spot_em_df], axis=0)
35
+ df = df.drop(columns=["序号"])
36
+ df = df.reset_index(drop=True)
37
+ df = df.sort_values(by="代码")
38
+ cache.save(save_df_key, df, expire_hours=0.25)
39
+
40
+ df = cache.load(save_df_key, dtype={"代码": str})
41
+ return df
42
+
43
+ def get_name_code_dict(self) -> dict:
44
+ df = self.download_a_stock_df()
45
+
46
+ name_code_dict = {}
47
+ for line in df.to_dict(orient="records"):
48
+ name = line["名称"].replace(" ", "")
49
+ code = line["代码"]
50
+ name_code_dict[name] = code
51
+ logger.info(f"name_code_dict.size={len(name_code_dict)} content={str(name_code_dict)[:50]}...")
52
+ return name_code_dict
53
+
54
+ @staticmethod
55
+ def split_list(array_list: list, n: int):
56
+ if n <= 0:
57
+ raise ValueError
58
+
59
+ length = len(array_list)
60
+ base_size = length // n
61
+ remainder = length % n
62
+
63
+ start = 0
64
+ for i in range(n):
65
+ size = base_size + (1 if i < remainder else 0)
66
+ end = start + size
67
+ yield array_list[start:end]
68
+ start = end
69
+
70
+ @timer()
71
+ def find_stock_codes(self, stock_names: List[str]):
72
+ stock_names = "\n".join([x.strip() for x in stock_names if x])
73
+ prompt = self.prompt_format(prompt_name="find_stock_name",
74
+ stock_names=stock_names,
75
+ query=self.context.query)
76
+ logger.info(f"prompt={prompt}")
77
+
78
+ def callback_fn(msg: Message):
79
+ content = msg.content
80
+ if "```" in content:
81
+ content = content.split("```")[1]
82
+ content = content.strip("json")
83
+ content = json.loads(content.strip())
84
+ return content
85
+
86
+ codes: List[str] = self.llm.chat(messages=[Message(role=Role.USER, content=prompt)],
87
+ enable_stream_print=False,
88
+ callback_fn=callback_fn)
89
+ return codes
90
+
91
+ def execute(self):
92
+ name_code_dict = self.get_name_code_dict()
93
+ stock_names = list(name_code_dict.keys())
94
+ for p_stock_names in self.split_list(stock_names, n=2):
95
+ self.submit_task(self.find_stock_codes, stock_names=p_stock_names)
96
+ time.sleep(1)
97
+
98
+ stock_names = sorted(set(self.join_task()))
99
+ self.context.code_infos = {name_code_dict[n]: {"股票名称": n} for n in stock_names}
100
+ logger.info(f"code_infos={self.context.code_infos}")
101
+
102
+
103
+ if __name__ == "__main__":
104
+ C.set_default_service_config().init_by_service_config()
105
+ context = FlowContext(query="茅台和五粮现在价格多少?")
106
+
107
+ op = GetAkACodeOp()
108
+ op(context=context)
@@ -0,0 +1,21 @@
1
+ find_stock_name_zh: |
2
+ # 股票标准名称
3
+ {stock_names}
4
+
5
+ # 用户问题
6
+ {query}
7
+
8
+ # 任务
9
+ 请你提取出用户问题中提到的股票名称,并通过**股票标准名称**找到对应的标准名称,并以json格式返回。
10
+ 如果用户问题中没有提及股票名称或者**股票标准名称**中没有找到,请返回空"[]"。
11
+ 如果用户只是提到了某个行业,并且没有明确提及股票名称,请返回空"[]"。
12
+ 请思考后输出你的答案。
13
+
14
+ # 答案格式
15
+ ```json
16
+ [
17
+ "股票标准名称1",
18
+ "股票标准名称2",
19
+ ...
20
+ ]
21
+ ```