flowllm 0.1.0__py3-none-any.whl → 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (141) hide show
  1. flowllm/__init__.py +21 -0
  2. flowllm/app.py +15 -0
  3. flowllm/client/__init__.py +25 -0
  4. flowllm/client/async_http_client.py +81 -0
  5. flowllm/client/http_client.py +81 -0
  6. flowllm/client/mcp_client.py +133 -0
  7. flowllm/client/sync_mcp_client.py +116 -0
  8. flowllm/config/__init__.py +1 -0
  9. flowllm/config/default.yaml +77 -0
  10. flowllm/config/empty.yaml +37 -0
  11. flowllm/config/pydantic_config_parser.py +242 -0
  12. flowllm/context/base_context.py +79 -0
  13. flowllm/context/flow_context.py +16 -0
  14. llmflow/op/prompt_mixin.py → flowllm/context/prompt_handler.py +25 -14
  15. flowllm/context/registry.py +30 -0
  16. flowllm/context/service_context.py +147 -0
  17. flowllm/embedding_model/__init__.py +1 -0
  18. {llmflow → flowllm}/embedding_model/base_embedding_model.py +93 -2
  19. {llmflow → flowllm}/embedding_model/openai_compatible_embedding_model.py +71 -13
  20. flowllm/flow/__init__.py +1 -0
  21. flowllm/flow/base_flow.py +72 -0
  22. flowllm/flow/base_tool_flow.py +15 -0
  23. flowllm/flow/gallery/__init__.py +8 -0
  24. flowllm/flow/gallery/cmd_flow.py +11 -0
  25. flowllm/flow/gallery/code_tool_flow.py +30 -0
  26. flowllm/flow/gallery/dashscope_search_tool_flow.py +34 -0
  27. flowllm/flow/gallery/deepsearch_tool_flow.py +39 -0
  28. flowllm/flow/gallery/expression_tool_flow.py +18 -0
  29. flowllm/flow/gallery/mock_tool_flow.py +67 -0
  30. flowllm/flow/gallery/tavily_search_tool_flow.py +30 -0
  31. flowllm/flow/gallery/terminate_tool_flow.py +30 -0
  32. flowllm/flow/parser/expression_parser.py +171 -0
  33. flowllm/llm/__init__.py +2 -0
  34. {llmflow → flowllm}/llm/base_llm.py +100 -18
  35. flowllm/llm/litellm_llm.py +455 -0
  36. flowllm/llm/openai_compatible_llm.py +439 -0
  37. flowllm/op/__init__.py +11 -0
  38. llmflow/op/react/react_v1_op.py → flowllm/op/agent/react_op.py +17 -22
  39. flowllm/op/akshare/__init__.py +3 -0
  40. flowllm/op/akshare/get_ak_a_code_op.py +108 -0
  41. flowllm/op/akshare/get_ak_a_code_prompt.yaml +21 -0
  42. flowllm/op/akshare/get_ak_a_info_op.py +140 -0
  43. flowllm/op/base_llm_op.py +64 -0
  44. flowllm/op/base_op.py +148 -0
  45. flowllm/op/base_ray_op.py +313 -0
  46. flowllm/op/code/__init__.py +1 -0
  47. flowllm/op/code/execute_code_op.py +42 -0
  48. flowllm/op/gallery/__init__.py +2 -0
  49. flowllm/op/gallery/mock_op.py +42 -0
  50. flowllm/op/gallery/terminate_op.py +29 -0
  51. flowllm/op/parallel_op.py +23 -0
  52. flowllm/op/search/__init__.py +3 -0
  53. flowllm/op/search/dashscope_deep_research_op.py +260 -0
  54. flowllm/op/search/dashscope_search_op.py +179 -0
  55. flowllm/op/search/dashscope_search_prompt.yaml +13 -0
  56. flowllm/op/search/tavily_search_op.py +102 -0
  57. flowllm/op/sequential_op.py +21 -0
  58. flowllm/schema/flow_request.py +12 -0
  59. flowllm/schema/flow_response.py +12 -0
  60. flowllm/schema/message.py +35 -0
  61. flowllm/schema/service_config.py +72 -0
  62. flowllm/schema/tool_call.py +118 -0
  63. {llmflow → flowllm}/schema/vector_node.py +1 -0
  64. flowllm/service/__init__.py +3 -0
  65. flowllm/service/base_service.py +68 -0
  66. flowllm/service/cmd_service.py +15 -0
  67. flowllm/service/http_service.py +79 -0
  68. flowllm/service/mcp_service.py +47 -0
  69. flowllm/storage/__init__.py +1 -0
  70. flowllm/storage/cache/__init__.py +1 -0
  71. flowllm/storage/cache/cache_data_handler.py +104 -0
  72. flowllm/storage/cache/data_cache.py +375 -0
  73. flowllm/storage/vector_store/__init__.py +3 -0
  74. flowllm/storage/vector_store/base_vector_store.py +44 -0
  75. {llmflow → flowllm/storage}/vector_store/chroma_vector_store.py +11 -10
  76. {llmflow → flowllm/storage}/vector_store/es_vector_store.py +11 -11
  77. llmflow/vector_store/file_vector_store.py → flowllm/storage/vector_store/local_vector_store.py +110 -11
  78. flowllm/utils/common_utils.py +52 -0
  79. flowllm/utils/fetch_url.py +117 -0
  80. flowllm/utils/llm_utils.py +28 -0
  81. flowllm/utils/ridge_v2.py +54 -0
  82. {llmflow → flowllm}/utils/timer.py +5 -4
  83. {flowllm-0.1.0.dist-info → flowllm-0.1.2.dist-info}/METADATA +45 -388
  84. flowllm-0.1.2.dist-info/RECORD +99 -0
  85. flowllm-0.1.2.dist-info/entry_points.txt +2 -0
  86. {flowllm-0.1.0.dist-info → flowllm-0.1.2.dist-info}/licenses/LICENSE +1 -1
  87. flowllm-0.1.2.dist-info/top_level.txt +1 -0
  88. flowllm-0.1.0.dist-info/RECORD +0 -66
  89. flowllm-0.1.0.dist-info/entry_points.txt +0 -3
  90. flowllm-0.1.0.dist-info/top_level.txt +0 -1
  91. llmflow/app.py +0 -53
  92. llmflow/config/config_parser.py +0 -80
  93. llmflow/config/mock_config.yaml +0 -58
  94. llmflow/embedding_model/__init__.py +0 -5
  95. llmflow/enumeration/agent_state.py +0 -8
  96. llmflow/llm/__init__.py +0 -5
  97. llmflow/llm/openai_compatible_llm.py +0 -283
  98. llmflow/mcp_server.py +0 -110
  99. llmflow/op/__init__.py +0 -10
  100. llmflow/op/base_op.py +0 -125
  101. llmflow/op/mock_op.py +0 -40
  102. llmflow/op/vector_store/__init__.py +0 -13
  103. llmflow/op/vector_store/recall_vector_store_op.py +0 -48
  104. llmflow/op/vector_store/update_vector_store_op.py +0 -28
  105. llmflow/op/vector_store/vector_store_action_op.py +0 -46
  106. llmflow/pipeline/pipeline.py +0 -94
  107. llmflow/pipeline/pipeline_context.py +0 -37
  108. llmflow/schema/app_config.py +0 -69
  109. llmflow/schema/experience.py +0 -144
  110. llmflow/schema/message.py +0 -68
  111. llmflow/schema/request.py +0 -32
  112. llmflow/schema/response.py +0 -29
  113. llmflow/service/__init__.py +0 -0
  114. llmflow/service/llmflow_service.py +0 -96
  115. llmflow/tool/__init__.py +0 -9
  116. llmflow/tool/base_tool.py +0 -80
  117. llmflow/tool/code_tool.py +0 -43
  118. llmflow/tool/dashscope_search_tool.py +0 -162
  119. llmflow/tool/mcp_tool.py +0 -77
  120. llmflow/tool/tavily_search_tool.py +0 -109
  121. llmflow/tool/terminate_tool.py +0 -23
  122. llmflow/utils/__init__.py +0 -0
  123. llmflow/utils/common_utils.py +0 -17
  124. llmflow/utils/file_handler.py +0 -25
  125. llmflow/utils/http_client.py +0 -156
  126. llmflow/utils/op_utils.py +0 -102
  127. llmflow/utils/registry.py +0 -33
  128. llmflow/vector_store/__init__.py +0 -7
  129. llmflow/vector_store/base_vector_store.py +0 -136
  130. {llmflow → flowllm/context}/__init__.py +0 -0
  131. {llmflow/config → flowllm/enumeration}/__init__.py +0 -0
  132. {llmflow → flowllm}/enumeration/chunk_enum.py +0 -0
  133. {llmflow → flowllm}/enumeration/http_enum.py +0 -0
  134. {llmflow → flowllm}/enumeration/role.py +0 -0
  135. {llmflow/enumeration → flowllm/flow/parser}/__init__.py +0 -0
  136. {llmflow/op/react → flowllm/op/agent}/__init__.py +0 -0
  137. /llmflow/op/react/react_v1_prompt.yaml → /flowllm/op/agent/react_prompt.yaml +0 -0
  138. {llmflow/pipeline → flowllm/schema}/__init__.py +0 -0
  139. {llmflow/schema → flowllm/utils}/__init__.py +0 -0
  140. {llmflow → flowllm}/utils/singleton.py +0 -0
  141. {flowllm-0.1.0.dist-info → flowllm-0.1.2.dist-info}/WHEEL +0 -0
@@ -0,0 +1,171 @@
1
+ import re
2
+
3
+ from flowllm.context.service_context import C
4
+ from flowllm.op.base_op import BaseOp
5
+ from flowllm.op.parallel_op import ParallelOp
6
+ from flowllm.op.sequential_op import SequentialOp
7
+ from flowllm.schema.service_config import OpConfig
8
+
9
+
10
+ class ExpressionParser:
11
+ SEQ_SYMBOL = ">>"
12
+ PARALLEL_SYMBOL = "|"
13
+
14
+ """
15
+ Simple flow implementation that supports parsing operation expressions.
16
+
17
+ Supports flow expressions like:
18
+ - "op1 >> op2" (sequential expressions)
19
+ - "op1 | op2" (parallel expressions)
20
+ - "op1 >> (op2 | op3) >> op4" (mixed expressions)
21
+ - "op1 >> (op1 | (op2 >> op3)) >> op4" (complex nested expressions)
22
+ """
23
+
24
+ def __init__(self, flow_content: str = ""):
25
+ self.flow_content: str = flow_content
26
+ self._parsed_ops_cache = {}
27
+
28
+ def parse_flow(self):
29
+ self._parsed_ops_cache.clear()
30
+ expression = re.sub(r'\s+', ' ', self.flow_content.strip())
31
+ return self._parse_expression(expression)
32
+
33
+ def _parse_expression(self, expression: str) -> BaseOp:
34
+ """
35
+ Parse the flow content string into executable operations.
36
+
37
+ Supports expressions with operators:
38
+ - ">>" for sequential execution
39
+ - "|" for parallel execution
40
+ - Parentheses for grouping operations
41
+
42
+ Args:
43
+ expression: The expression string to parse. If None, uses self.flow_content
44
+
45
+ Returns:
46
+ BaseOp: The parsed flow as an executable operation tree
47
+ """
48
+ # handle parentheses by finding and replacing innermost groups
49
+ while '(' in expression:
50
+ # Find the innermost parentheses
51
+ start = -1
52
+ for i, char in enumerate(expression):
53
+ if char == '(':
54
+ start = i
55
+ elif char == ')':
56
+ if start == -1:
57
+ raise ValueError(f"mismatched parentheses in expression: {expression}")
58
+
59
+ # extract and parse the inner expression
60
+ inner_expr = expression[start + 1:i]
61
+ inner_result = self._parse_expression(inner_expr)
62
+
63
+ # create a placeholder for the parsed inner expression
64
+ placeholder = f"__PARSED_OP_{len(self._parsed_ops_cache)}__"
65
+
66
+ # store the parsed operation for later retrieval
67
+ self._parsed_ops_cache[placeholder] = inner_result
68
+
69
+ # Replace the parentheses group with placeholder
70
+ expression = expression[:start] + placeholder + expression[i + 1:]
71
+ break
72
+ else:
73
+ if start != -1:
74
+ raise ValueError(f"mismatched parentheses in expression: {expression}")
75
+
76
+ # Parse the expression without parentheses
77
+ return self._parse_flat_expression(expression)
78
+
79
+ def _parse_flat_expression(self, expression: str) -> BaseOp:
80
+ """
81
+ Parse a flat expression (no parentheses) into operation objects.
82
+
83
+ Args:
84
+ expression: The flat expression string
85
+
86
+ Returns:
87
+ BaseOp: The parsed operation tree
88
+ """
89
+ # split by '>>' first (sequential has higher precedence)
90
+ sequential_parts = [part.strip() for part in expression.split(self.SEQ_SYMBOL)]
91
+
92
+ if len(sequential_parts) > 1:
93
+ # parse each part and create sequential operation
94
+ ops = []
95
+ for part in sequential_parts:
96
+ part = part.strip()
97
+ if part in self._parsed_ops_cache:
98
+ ops.append(self._parsed_ops_cache[part])
99
+ else:
100
+ ops.append(self._parse_parallel_expression(part))
101
+
102
+ return SequentialOp(ops=ops)
103
+
104
+ else:
105
+ # no sequential operators, parse for parallel
106
+ return self._parse_parallel_expression(expression)
107
+
108
+ def _parse_parallel_expression(self, expression: str) -> BaseOp:
109
+ """
110
+ Parse a parallel expression (operations separated by |).
111
+
112
+ Args:
113
+ expression: The expression string
114
+
115
+ Returns:
116
+ BaseOp: The parsed operation (single op or parallel op)
117
+ """
118
+ parallel_parts = [part.strip() for part in expression.split(self.PARALLEL_SYMBOL)]
119
+
120
+ if len(parallel_parts) > 1:
121
+ # create parallel operation
122
+ ops = []
123
+ for part in parallel_parts:
124
+ part = part.strip()
125
+ if part in self._parsed_ops_cache:
126
+ ops.append(self._parsed_ops_cache[part])
127
+ else:
128
+ ops.append(self._create_op(part))
129
+
130
+ return ParallelOp(ops=ops)
131
+
132
+ else:
133
+ # single operation
134
+ part = expression.strip()
135
+ if part in self._parsed_ops_cache:
136
+ return self._parsed_ops_cache[part]
137
+ else:
138
+ return self._create_op(part)
139
+
140
+ @staticmethod
141
+ def _create_op(op_name: str) -> BaseOp:
142
+ if op_name in C.service_config.op:
143
+ op_config: OpConfig = C.service_config.op[op_name]
144
+ op_cls = C.resolve_op(op_config.backend)
145
+
146
+
147
+ elif op_name in C.registry_dict["op"]:
148
+ op_config: OpConfig = OpConfig()
149
+ op_cls = C.resolve_op(op_name)
150
+
151
+ else:
152
+ raise ValueError(f"op='{op_name}' is not registered!")
153
+
154
+ kwargs = {
155
+ "name": op_name,
156
+ "raise_exception": op_config.raise_exception,
157
+ **op_config.params
158
+ }
159
+
160
+ if op_config.language:
161
+ kwargs["language"] = op_config.language
162
+ if op_config.prompt_path:
163
+ kwargs["prompt_path"] = op_config.prompt_path
164
+ if op_config.llm:
165
+ kwargs["llm"] = op_config.llm
166
+ if op_config.embedding_model:
167
+ kwargs["embedding_model"] = op_config.embedding_model
168
+ if op_config.vector_store:
169
+ kwargs["vector_store"] = op_config.vector_store
170
+
171
+ return op_cls(**kwargs)
@@ -0,0 +1,2 @@
1
+ from .litellm_llm import LiteLLMBaseLLM
2
+ from .openai_compatible_llm import OpenAICompatibleBaseLLM
@@ -1,12 +1,13 @@
1
+ import asyncio
1
2
  import time
2
3
  from abc import ABC
3
- from typing import List, Literal, Callable
4
+ from typing import List, Callable
4
5
 
5
6
  from loguru import logger
6
7
  from pydantic import Field, BaseModel
7
8
 
8
- from llmflow.schema.message import Message
9
- from llmflow.tool.base_tool import BaseTool
9
+ from flowllm.schema.message import Message
10
+ from flowllm.schema.tool_call import ToolCall
10
11
 
11
12
 
12
13
  class BaseLLM(BaseModel, ABC):
@@ -28,19 +29,19 @@ class BaseLLM(BaseModel, ABC):
28
29
  stream_options: dict = Field(default={"include_usage": True}, description="Options for streaming responses")
29
30
  temperature: float = Field(default=0.0000001, description="Sampling temperature (low for deterministic outputs)")
30
31
  presence_penalty: float | None = Field(default=None, description="Presence penalty to reduce repetition")
31
-
32
+
32
33
  # Model-specific features
33
- enable_thinking: bool = Field(default=True, description="Enable reasoning/thinking mode for supported models")
34
-
34
+ enable_thinking: bool = Field(default=False, description="Enable reasoning/thinking mode for supported models")
35
+
35
36
  # Tool usage configuration
36
- tool_choice: Literal["none", "auto", "required"] = Field(default="auto", description="Strategy for tool selection")
37
+ tool_choice: str = Field(default=None, description="Strategy for tool selection")
37
38
  parallel_tool_calls: bool = Field(default=True, description="Allow multiple tool calls in parallel")
38
39
 
39
40
  # Error handling and reliability
40
41
  max_retries: int = Field(default=5, description="Maximum number of retry attempts on failure")
41
42
  raise_exception: bool = Field(default=False, description="Whether to raise exceptions or return default values")
42
43
 
43
- def stream_chat(self, messages: List[Message], tools: List[BaseTool] = None, **kwargs):
44
+ def stream_chat(self, messages: List[Message], tools: List[ToolCall] = None, **kwargs):
44
45
  """
45
46
  Stream chat completions from the LLM.
46
47
 
@@ -57,21 +58,25 @@ class BaseLLM(BaseModel, ABC):
57
58
  """
58
59
  raise NotImplementedError
59
60
 
60
- def stream_print(self, messages: List[Message], tools: List[BaseTool] = None, **kwargs):
61
+ async def astream_chat(self, messages: List[Message], tools: List[ToolCall] = None, **kwargs):
61
62
  """
62
- Stream chat completions and print them to console in real-time.
63
+ Async stream chat completions from the LLM.
63
64
 
64
- This is a convenience method for debugging and interactive use,
65
- combining streaming with formatted console output.
65
+ This method should yield chunks of the response as they become available,
66
+ allowing for real-time display of the model's output in async contexts.
66
67
 
67
68
  Args:
68
69
  messages: List of conversation messages
69
70
  tools: Optional list of tools the model can use
70
71
  **kwargs: Additional model-specific parameters
72
+
73
+ Yields:
74
+ Chunks of the streaming response with their types
71
75
  """
72
76
  raise NotImplementedError
73
77
 
74
- def _chat(self, messages: List[Message], tools: List[BaseTool] = None, **kwargs) -> Message:
78
+ def _chat(self, messages: List[Message], tools: List[ToolCall] = None, enable_stream_print: bool = False,
79
+ **kwargs) -> Message:
75
80
  """
76
81
  Internal method to perform a single chat completion.
77
82
 
@@ -82,6 +87,7 @@ class BaseLLM(BaseModel, ABC):
82
87
  Args:
83
88
  messages: List of conversation messages
84
89
  tools: Optional list of tools the model can use
90
+ enable_stream_print: Whether to print streaming response to console
85
91
  **kwargs: Additional model-specific parameters
86
92
 
87
93
  Returns:
@@ -89,8 +95,28 @@ class BaseLLM(BaseModel, ABC):
89
95
  """
90
96
  raise NotImplementedError
91
97
 
92
- def chat(self, messages: List[Message], tools: List[BaseTool] = None, callback_fn: Callable = None,
93
- default_value=None, **kwargs):
98
+ async def _achat(self, messages: List[Message], tools: List[ToolCall] = None, enable_stream_print: bool = False,
99
+ **kwargs) -> Message:
100
+ """
101
+ Internal async method to perform a single chat completion.
102
+
103
+ This method should be implemented by subclasses to handle the actual
104
+ async communication with the LLM provider. It's called by the public achat()
105
+ method which adds retry logic and error handling.
106
+
107
+ Args:
108
+ messages: List of conversation messages
109
+ tools: Optional list of tools the model can use
110
+ enable_stream_print: Whether to print streaming response to console
111
+ **kwargs: Additional model-specific parameters
112
+
113
+ Returns:
114
+ The complete response message from the LLM
115
+ """
116
+ raise NotImplementedError
117
+
118
+ def chat(self, messages: List[Message], tools: List[ToolCall] = None, enable_stream_print: bool = False,
119
+ callback_fn: Callable = None, default_value=None, **kwargs):
94
120
  """
95
121
  Perform a chat completion with retry logic and error handling.
96
122
 
@@ -103,6 +129,7 @@ class BaseLLM(BaseModel, ABC):
103
129
  tools: Optional list of tools the model can use
104
130
  callback_fn: Optional callback to process the response message
105
131
  default_value: Value to return if all retries fail (when raise_exception=False)
132
+ enable_stream_print: Whether to print streaming response to console
106
133
  **kwargs: Additional model-specific parameters
107
134
 
108
135
  Returns:
@@ -114,8 +141,11 @@ class BaseLLM(BaseModel, ABC):
114
141
  for i in range(self.max_retries):
115
142
  try:
116
143
  # Attempt to get response from the model
117
- message: Message = self._chat(messages, tools, **kwargs)
118
-
144
+ message: Message = self._chat(messages=messages,
145
+ tools=tools,
146
+ enable_stream_print=enable_stream_print,
147
+ **kwargs)
148
+
119
149
  # Apply callback function if provided
120
150
  if callback_fn:
121
151
  return callback_fn(message)
@@ -124,7 +154,7 @@ class BaseLLM(BaseModel, ABC):
124
154
 
125
155
  except Exception as e:
126
156
  logger.exception(f"chat with model={self.model_name} encounter error with e={e.args}")
127
-
157
+
128
158
  # Exponential backoff: wait longer after each failure
129
159
  time.sleep(1 + i)
130
160
 
@@ -136,3 +166,55 @@ class BaseLLM(BaseModel, ABC):
136
166
  return default_value
137
167
 
138
168
  return None
169
+
170
+ async def achat(self, messages: List[Message], tools: List[ToolCall] = None, enable_stream_print: bool = False,
171
+ callback_fn: Callable = None, default_value=None, **kwargs):
172
+ """
173
+ Perform an async chat completion with retry logic and error handling.
174
+
175
+ This is the main public interface for async chat completions. It wraps the
176
+ internal _achat() method with robust error handling, exponential backoff,
177
+ and optional callback processing.
178
+
179
+ Args:
180
+ messages: List of conversation messages
181
+ tools: Optional list of tools the model can use
182
+ callback_fn: Optional callback to process the response message
183
+ default_value: Value to return if all retries fail (when raise_exception=False)
184
+ enable_stream_print: Whether to print streaming response to console
185
+ **kwargs: Additional model-specific parameters
186
+
187
+ Returns:
188
+ The response message (possibly processed by callback_fn) or default_value
189
+
190
+ Raises:
191
+ Exception: If raise_exception=True and all retries fail
192
+ """
193
+ for i in range(self.max_retries):
194
+ try:
195
+ # Attempt to get response from the model
196
+ message: Message = await self._achat(messages=messages,
197
+ tools=tools,
198
+ enable_stream_print=enable_stream_print,
199
+ **kwargs)
200
+
201
+ # Apply callback function if provided
202
+ if callback_fn:
203
+ return callback_fn(message)
204
+ else:
205
+ return message
206
+
207
+ except Exception as e:
208
+ logger.exception(f"async chat with model={self.model_name} encounter error with e={e.args}")
209
+
210
+ # Exponential backoff: wait longer after each failure
211
+ await asyncio.sleep(1 + i)
212
+
213
+ # Handle final retry failure
214
+ if i == self.max_retries - 1:
215
+ if self.raise_exception:
216
+ raise e
217
+ else:
218
+ return default_value
219
+
220
+ return None