flowllm 0.1.0__tar.gz → 0.1.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (153) hide show
  1. {flowllm-0.1.0 → flowllm-0.1.2}/LICENSE +1 -1
  2. {flowllm-0.1.0 → flowllm-0.1.2}/PKG-INFO +45 -388
  3. flowllm-0.1.2/flowllm/__init__.py +21 -0
  4. flowllm-0.1.2/flowllm/app.py +15 -0
  5. flowllm-0.1.2/flowllm/client/__init__.py +25 -0
  6. flowllm-0.1.2/flowllm/client/async_http_client.py +81 -0
  7. flowllm-0.1.2/flowllm/client/http_client.py +81 -0
  8. flowllm-0.1.2/flowllm/client/mcp_client.py +133 -0
  9. flowllm-0.1.2/flowllm/client/sync_mcp_client.py +116 -0
  10. flowllm-0.1.2/flowllm/config/__init__.py +1 -0
  11. flowllm-0.1.2/flowllm/config/default.yaml +77 -0
  12. flowllm-0.1.2/flowllm/config/empty.yaml +37 -0
  13. flowllm-0.1.2/flowllm/config/pydantic_config_parser.py +242 -0
  14. flowllm-0.1.2/flowllm/context/base_context.py +79 -0
  15. flowllm-0.1.2/flowllm/context/flow_context.py +16 -0
  16. flowllm-0.1.0/llmflow/op/prompt_mixin.py → flowllm-0.1.2/flowllm/context/prompt_handler.py +25 -14
  17. flowllm-0.1.2/flowllm/context/registry.py +30 -0
  18. flowllm-0.1.2/flowllm/context/service_context.py +147 -0
  19. flowllm-0.1.2/flowllm/embedding_model/__init__.py +1 -0
  20. {flowllm-0.1.0/llmflow → flowllm-0.1.2/flowllm}/embedding_model/base_embedding_model.py +93 -2
  21. {flowllm-0.1.0/llmflow → flowllm-0.1.2/flowllm}/embedding_model/openai_compatible_embedding_model.py +71 -13
  22. flowllm-0.1.2/flowllm/flow/__init__.py +1 -0
  23. flowllm-0.1.2/flowllm/flow/base_flow.py +72 -0
  24. flowllm-0.1.2/flowllm/flow/base_tool_flow.py +15 -0
  25. flowllm-0.1.2/flowllm/flow/gallery/__init__.py +8 -0
  26. flowllm-0.1.2/flowllm/flow/gallery/cmd_flow.py +11 -0
  27. flowllm-0.1.2/flowllm/flow/gallery/code_tool_flow.py +30 -0
  28. flowllm-0.1.2/flowllm/flow/gallery/dashscope_search_tool_flow.py +34 -0
  29. flowllm-0.1.2/flowllm/flow/gallery/deepsearch_tool_flow.py +39 -0
  30. flowllm-0.1.2/flowllm/flow/gallery/expression_tool_flow.py +18 -0
  31. flowllm-0.1.2/flowllm/flow/gallery/mock_tool_flow.py +67 -0
  32. flowllm-0.1.2/flowllm/flow/gallery/tavily_search_tool_flow.py +30 -0
  33. flowllm-0.1.2/flowllm/flow/gallery/terminate_tool_flow.py +30 -0
  34. flowllm-0.1.2/flowllm/flow/parser/expression_parser.py +171 -0
  35. flowllm-0.1.2/flowllm/llm/__init__.py +2 -0
  36. {flowllm-0.1.0/llmflow → flowllm-0.1.2/flowllm}/llm/base_llm.py +100 -18
  37. flowllm-0.1.2/flowllm/llm/litellm_llm.py +455 -0
  38. flowllm-0.1.2/flowllm/llm/openai_compatible_llm.py +439 -0
  39. flowllm-0.1.2/flowllm/op/__init__.py +11 -0
  40. flowllm-0.1.0/llmflow/op/react/react_v1_op.py → flowllm-0.1.2/flowllm/op/agent/react_op.py +17 -22
  41. flowllm-0.1.2/flowllm/op/akshare/__init__.py +3 -0
  42. flowllm-0.1.2/flowllm/op/akshare/get_ak_a_code_op.py +108 -0
  43. flowllm-0.1.2/flowllm/op/akshare/get_ak_a_code_prompt.yaml +21 -0
  44. flowllm-0.1.2/flowllm/op/akshare/get_ak_a_info_op.py +140 -0
  45. flowllm-0.1.2/flowllm/op/base_llm_op.py +64 -0
  46. flowllm-0.1.2/flowllm/op/base_op.py +148 -0
  47. flowllm-0.1.2/flowllm/op/base_ray_op.py +313 -0
  48. flowllm-0.1.2/flowllm/op/code/__init__.py +1 -0
  49. flowllm-0.1.2/flowllm/op/code/execute_code_op.py +42 -0
  50. flowllm-0.1.2/flowllm/op/gallery/__init__.py +2 -0
  51. flowllm-0.1.2/flowllm/op/gallery/mock_op.py +42 -0
  52. flowllm-0.1.2/flowllm/op/gallery/terminate_op.py +29 -0
  53. flowllm-0.1.2/flowllm/op/parallel_op.py +23 -0
  54. flowllm-0.1.2/flowllm/op/search/__init__.py +3 -0
  55. flowllm-0.1.2/flowllm/op/search/dashscope_deep_research_op.py +260 -0
  56. flowllm-0.1.2/flowllm/op/search/dashscope_search_op.py +179 -0
  57. flowllm-0.1.2/flowllm/op/search/dashscope_search_prompt.yaml +13 -0
  58. flowllm-0.1.2/flowllm/op/search/tavily_search_op.py +102 -0
  59. flowllm-0.1.2/flowllm/op/sequential_op.py +21 -0
  60. flowllm-0.1.2/flowllm/schema/flow_request.py +12 -0
  61. flowllm-0.1.2/flowllm/schema/flow_response.py +12 -0
  62. flowllm-0.1.2/flowllm/schema/message.py +35 -0
  63. flowllm-0.1.2/flowllm/schema/service_config.py +72 -0
  64. flowllm-0.1.2/flowllm/schema/tool_call.py +118 -0
  65. {flowllm-0.1.0/llmflow → flowllm-0.1.2/flowllm}/schema/vector_node.py +1 -0
  66. flowllm-0.1.2/flowllm/service/__init__.py +3 -0
  67. flowllm-0.1.2/flowllm/service/base_service.py +68 -0
  68. flowllm-0.1.2/flowllm/service/cmd_service.py +15 -0
  69. flowllm-0.1.2/flowllm/service/http_service.py +79 -0
  70. flowllm-0.1.2/flowllm/service/mcp_service.py +47 -0
  71. flowllm-0.1.2/flowllm/storage/__init__.py +1 -0
  72. flowllm-0.1.2/flowllm/storage/cache/__init__.py +1 -0
  73. flowllm-0.1.2/flowllm/storage/cache/cache_data_handler.py +104 -0
  74. flowllm-0.1.2/flowllm/storage/cache/data_cache.py +375 -0
  75. flowllm-0.1.2/flowllm/storage/vector_store/__init__.py +3 -0
  76. flowllm-0.1.2/flowllm/storage/vector_store/base_vector_store.py +44 -0
  77. {flowllm-0.1.0/llmflow → flowllm-0.1.2/flowllm/storage}/vector_store/chroma_vector_store.py +11 -10
  78. {flowllm-0.1.0/llmflow → flowllm-0.1.2/flowllm/storage}/vector_store/es_vector_store.py +11 -11
  79. flowllm-0.1.0/llmflow/vector_store/file_vector_store.py → flowllm-0.1.2/flowllm/storage/vector_store/local_vector_store.py +110 -11
  80. flowllm-0.1.2/flowllm/utils/common_utils.py +52 -0
  81. flowllm-0.1.2/flowllm/utils/fetch_url.py +117 -0
  82. flowllm-0.1.2/flowllm/utils/llm_utils.py +28 -0
  83. flowllm-0.1.2/flowllm/utils/ridge_v2.py +54 -0
  84. {flowllm-0.1.0/llmflow → flowllm-0.1.2/flowllm}/utils/timer.py +5 -4
  85. {flowllm-0.1.0 → flowllm-0.1.2}/flowllm.egg-info/PKG-INFO +45 -388
  86. flowllm-0.1.2/flowllm.egg-info/SOURCES.txt +107 -0
  87. flowllm-0.1.2/flowllm.egg-info/entry_points.txt +2 -0
  88. flowllm-0.1.2/flowllm.egg-info/requires.txt +31 -0
  89. flowllm-0.1.2/flowllm.egg-info/top_level.txt +1 -0
  90. flowllm-0.1.2/pyproject.toml +81 -0
  91. flowllm-0.1.2/test/test_cache.py +456 -0
  92. flowllm-0.1.2/test/test_config.py +66 -0
  93. flowllm-0.1.2/test/test_dashscope_llm.py +190 -0
  94. flowllm-0.1.2/test/test_dataframe_cache.py +205 -0
  95. flowllm-0.1.2/test/test_simple_flow.py +148 -0
  96. flowllm-0.1.0/README.md +0 -370
  97. flowllm-0.1.0/flowllm.egg-info/SOURCES.txt +0 -69
  98. flowllm-0.1.0/flowllm.egg-info/entry_points.txt +0 -3
  99. flowllm-0.1.0/flowllm.egg-info/requires.txt +0 -13
  100. flowllm-0.1.0/flowllm.egg-info/top_level.txt +0 -1
  101. flowllm-0.1.0/llmflow/app.py +0 -53
  102. flowllm-0.1.0/llmflow/config/config_parser.py +0 -80
  103. flowllm-0.1.0/llmflow/config/mock_config.yaml +0 -58
  104. flowllm-0.1.0/llmflow/embedding_model/__init__.py +0 -5
  105. flowllm-0.1.0/llmflow/enumeration/agent_state.py +0 -8
  106. flowllm-0.1.0/llmflow/llm/__init__.py +0 -5
  107. flowllm-0.1.0/llmflow/llm/openai_compatible_llm.py +0 -283
  108. flowllm-0.1.0/llmflow/mcp_server.py +0 -110
  109. flowllm-0.1.0/llmflow/op/__init__.py +0 -10
  110. flowllm-0.1.0/llmflow/op/base_op.py +0 -125
  111. flowllm-0.1.0/llmflow/op/mock_op.py +0 -40
  112. flowllm-0.1.0/llmflow/op/vector_store/__init__.py +0 -13
  113. flowllm-0.1.0/llmflow/op/vector_store/recall_vector_store_op.py +0 -48
  114. flowllm-0.1.0/llmflow/op/vector_store/update_vector_store_op.py +0 -28
  115. flowllm-0.1.0/llmflow/op/vector_store/vector_store_action_op.py +0 -46
  116. flowllm-0.1.0/llmflow/pipeline/pipeline.py +0 -94
  117. flowllm-0.1.0/llmflow/pipeline/pipeline_context.py +0 -37
  118. flowllm-0.1.0/llmflow/schema/app_config.py +0 -69
  119. flowllm-0.1.0/llmflow/schema/experience.py +0 -144
  120. flowllm-0.1.0/llmflow/schema/message.py +0 -68
  121. flowllm-0.1.0/llmflow/schema/request.py +0 -32
  122. flowllm-0.1.0/llmflow/schema/response.py +0 -29
  123. flowllm-0.1.0/llmflow/service/llmflow_service.py +0 -96
  124. flowllm-0.1.0/llmflow/tool/__init__.py +0 -9
  125. flowllm-0.1.0/llmflow/tool/base_tool.py +0 -80
  126. flowllm-0.1.0/llmflow/tool/code_tool.py +0 -43
  127. flowllm-0.1.0/llmflow/tool/dashscope_search_tool.py +0 -162
  128. flowllm-0.1.0/llmflow/tool/mcp_tool.py +0 -77
  129. flowllm-0.1.0/llmflow/tool/tavily_search_tool.py +0 -109
  130. flowllm-0.1.0/llmflow/tool/terminate_tool.py +0 -23
  131. flowllm-0.1.0/llmflow/utils/__init__.py +0 -0
  132. flowllm-0.1.0/llmflow/utils/common_utils.py +0 -17
  133. flowllm-0.1.0/llmflow/utils/file_handler.py +0 -25
  134. flowllm-0.1.0/llmflow/utils/http_client.py +0 -156
  135. flowllm-0.1.0/llmflow/utils/op_utils.py +0 -102
  136. flowllm-0.1.0/llmflow/utils/registry.py +0 -33
  137. flowllm-0.1.0/llmflow/vector_store/__init__.py +0 -7
  138. flowllm-0.1.0/llmflow/vector_store/base_vector_store.py +0 -136
  139. flowllm-0.1.0/pyproject.toml +0 -49
  140. /flowllm-0.1.0/llmflow/__init__.py → /flowllm-0.1.2/README.md +0 -0
  141. {flowllm-0.1.0/llmflow/config → flowllm-0.1.2/flowllm/context}/__init__.py +0 -0
  142. {flowllm-0.1.0/llmflow → flowllm-0.1.2/flowllm}/enumeration/__init__.py +0 -0
  143. {flowllm-0.1.0/llmflow → flowllm-0.1.2/flowllm}/enumeration/chunk_enum.py +0 -0
  144. {flowllm-0.1.0/llmflow → flowllm-0.1.2/flowllm}/enumeration/http_enum.py +0 -0
  145. {flowllm-0.1.0/llmflow → flowllm-0.1.2/flowllm}/enumeration/role.py +0 -0
  146. {flowllm-0.1.0/llmflow/op/react → flowllm-0.1.2/flowllm/flow/parser}/__init__.py +0 -0
  147. {flowllm-0.1.0/llmflow/pipeline → flowllm-0.1.2/flowllm/op/agent}/__init__.py +0 -0
  148. /flowllm-0.1.0/llmflow/op/react/react_v1_prompt.yaml → /flowllm-0.1.2/flowllm/op/agent/react_prompt.yaml +0 -0
  149. {flowllm-0.1.0/llmflow → flowllm-0.1.2/flowllm}/schema/__init__.py +0 -0
  150. {flowllm-0.1.0/llmflow/service → flowllm-0.1.2/flowllm/utils}/__init__.py +0 -0
  151. {flowllm-0.1.0/llmflow → flowllm-0.1.2/flowllm}/utils/singleton.py +0 -0
  152. {flowllm-0.1.0 → flowllm-0.1.2}/flowllm.egg-info/dependency_links.txt +0 -0
  153. {flowllm-0.1.0 → flowllm-0.1.2}/setup.cfg +0 -0
@@ -186,7 +186,7 @@
186
186
  same "printed page" as the copyright notice for easier
187
187
  identification within third-party archives.
188
188
 
189
- Copyright 2024 Alibaba Group
189
+ Copyright 2024 FlowLLM
190
190
 
191
191
  Licensed under the Apache License, Version 2.0 (the "License");
192
192
  you may not use this file except in compliance with the License.
@@ -1,7 +1,9 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: flowllm
3
- Version: 0.1.0
4
- Summary: build llm flow
3
+ Version: 0.1.2
4
+ Summary: A flexible framework for building LLM-powered flows and mcp services
5
+ Author-email: FlowLLM Team <flowllm@example.com>
6
+ Maintainer-email: FlowLLM Team <flowllm@example.com>
5
7
  License: Apache License
6
8
  Version 2.0, January 2004
7
9
  http://www.apache.org/licenses/
@@ -190,7 +192,7 @@ License: Apache License
190
192
  same "printed page" as the copyright notice for easier
191
193
  identification within third-party archives.
192
194
 
193
- Copyright 2024 Alibaba Group
195
+ Copyright 2024 FlowLLM
194
196
 
195
197
  Licensed under the Apache License, Version 2.0 (the "License");
196
198
  you may not use this file except in compliance with the License.
@@ -204,394 +206,49 @@ License: Apache License
204
206
  See the License for the specific language governing permissions and
205
207
  limitations under the License.
206
208
 
207
- Classifier: Programming Language :: Python :: 3
209
+ Keywords: llm,ai,flow,framework,openai,chatgpt,language-model,mcp,http
210
+ Classifier: Development Status :: 3 - Alpha
211
+ Classifier: Intended Audience :: Developers
212
+ Classifier: Intended Audience :: Science/Research
208
213
  Classifier: License :: OSI Approved :: Apache Software License
209
214
  Classifier: Operating System :: OS Independent
215
+ Classifier: Programming Language :: Python :: 3
216
+ Classifier: Programming Language :: Python :: 3.12
217
+ Classifier: Programming Language :: Python :: 3.13
218
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
219
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
220
+ Classifier: Topic :: Software Development :: Libraries :: Application Frameworks
221
+ Classifier: Typing :: Typed
210
222
  Requires-Python: >=3.12
211
223
  Description-Content-Type: text/markdown
212
224
  License-File: LICENSE
213
- Requires-Dist: dashscope>=1.19.1
214
- Requires-Dist: elasticsearch>=8.14.0
215
- Requires-Dist: fastapi>=0.115.13
216
- Requires-Dist: fastmcp>=2.10.6
217
- Requires-Dist: loguru>=0.7.3
218
- Requires-Dist: mcp>=1.9.4
219
- Requires-Dist: numpy>=2.3.0
220
- Requires-Dist: openai>=1.88.0
221
- Requires-Dist: pydantic>=2.11.7
222
- Requires-Dist: PyYAML>=6.0.2
223
- Requires-Dist: Requests>=2.32.4
224
- Requires-Dist: uvicorn>=0.34.3
225
- Requires-Dist: setuptools>=75.0
225
+ Requires-Dist: akshare
226
+ Requires-Dist: beautifulsoup4
227
+ Requires-Dist: dashscope
228
+ Requires-Dist: elasticsearch
229
+ Requires-Dist: fastapi
230
+ Requires-Dist: fastmcp
231
+ Requires-Dist: httpx
232
+ Requires-Dist: litellm
233
+ Requires-Dist: loguru
234
+ Requires-Dist: mcp
235
+ Requires-Dist: numpy
236
+ Requires-Dist: openai
237
+ Requires-Dist: pandas
238
+ Requires-Dist: pydantic
239
+ Requires-Dist: PyYAML
240
+ Requires-Dist: ray
241
+ Requires-Dist: requests
242
+ Requires-Dist: scikit-learn
243
+ Requires-Dist: tavily-python
244
+ Requires-Dist: tqdm
245
+ Requires-Dist: urllib3
246
+ Requires-Dist: uvicorn[standard]
247
+ Requires-Dist: chromadb
248
+ Requires-Dist: elasticsearch
249
+ Requires-Dist: ray
250
+ Provides-Extra: distributed
251
+ Requires-Dist: ray; extra == "distributed"
252
+ Provides-Extra: all
253
+ Requires-Dist: flowllm[distributed]; extra == "all"
226
254
  Dynamic: license-file
227
-
228
- # LLMFlow
229
-
230
- [![Python](https://img.shields.io/badge/python-3.12+-blue.svg)](https://www.python.org/downloads/)
231
- [![License](https://img.shields.io/badge/License-Apache%202.0-green.svg)](https://opensource.org/licenses/Apache-2.0)
232
-
233
- LLMFlow is a flexible large language model workflow framework that provides a modular pipeline architecture for building complex AI applications. The framework supports multiple LLM providers, vector storage backends, and tool integrations, enabling you to easily build Retrieval-Augmented Generation (RAG), intelligent agents, and other AI-powered applications.
234
-
235
- ## 🚀 Key Features
236
-
237
- ### 🔧 Modular Architecture
238
- - **Pipeline System**: Flexible pipeline configuration supporting both serial and parallel operations
239
- - **Operation Registry**: Extensible operation registry with support for custom operations
240
- - **Configuration-Driven**: Manage entire applications through YAML configuration files
241
-
242
- ### 🤖 LLM Support
243
- - **Multi-Provider Compatible**: Support for OpenAI-compatible APIs
244
- - **Streaming Responses**: Real-time streaming output support
245
- - **Tool Calling**: Built-in tool calling and parallel execution support
246
- - **Reasoning Mode**: Chain-of-thought reasoning support
247
-
248
- ### 📚 Vector Storage
249
- - **Multi-Backend Support**:
250
- - Elasticsearch
251
- - ChromaDB
252
- - Local file storage
253
- - **Embedding Models**: Support for multiple embedding models
254
- - **Workspace Management**: Multi-tenant vector storage management
255
-
256
- ### 🛠️ Rich Tool Ecosystem
257
- - **Code Execution**: Python code execution tool
258
- - **Web Search**: Integrated Tavily and DashScope search
259
- - **MCP Protocol**: Model Context Protocol support
260
- - **Termination Control**: Intelligent conversation termination management
261
-
262
- ### 🌐 API Services
263
- - **RESTful API**: FastAPI-powered HTTP services
264
- - **MCP Server**: Model Context Protocol server support
265
- - **Multiple Endpoints**: Retriever, summarizer, vector store, agent APIs
266
-
267
- ## 📦 Installation
268
-
269
- ### Prerequisites
270
- - Python 3.12+
271
- - pip or poetry
272
-
273
- ### Installation Steps
274
-
275
- ```bash
276
- # Clone the repository
277
- git clone https://github.com/your-username/llmflow.git
278
- cd llmflow
279
-
280
- # Install dependencies
281
- pip install -e .
282
-
283
- # Or using poetry
284
- poetry install
285
- ```
286
-
287
- ### Environment Configuration
288
-
289
- Copy the environment template:
290
- ```bash
291
- cp example.env .env
292
- ```
293
-
294
- Edit the `.env` file to configure necessary API keys:
295
-
296
- ```bash
297
- # LLM Configuration
298
- LLM_API_KEY=sk-your-llm-api-key
299
- LLM_BASE_URL=https://your-llm-endpoint/v1
300
-
301
- # Embedding Model Configuration
302
- EMBEDDING_API_KEY=sk-your-embedding-api-key
303
- EMBEDDING_BASE_URL=https://your-embedding-endpoint/v1
304
-
305
- # Elasticsearch (Optional)
306
- ES_HOSTS=http://localhost:9200
307
-
308
- # DashScope Search (Optional)
309
- DASHSCOPE_API_KEY=sk-your-dashscope-key
310
- ```
311
-
312
- ## 🏃 Quick Start
313
-
314
- ### 1. Start HTTP Service
315
-
316
- ```bash
317
- llmflow \
318
- http_service.port=8001 \
319
- llm.default.model_name=qwen3-32b \
320
- embedding_model.default.model_name=text-embedding-v4 \
321
- vector_store.default.backend=local_file
322
- ```
323
-
324
- ### 2. Start MCP Server
325
-
326
- ```bash
327
- llmflow_mcp \
328
- mcp_transport=stdio \
329
- http_service.port=8001 \
330
- llm.default.model_name=qwen3-32b \
331
- embedding_model.default.model_name=text-embedding-v4 \
332
- vector_store.default.backend=local_file
333
- ```
334
-
335
- ### 3. API Usage Examples
336
-
337
- #### Retriever API
338
- ```python
339
- import requests
340
-
341
- response = requests.post('http://localhost:8001/retriever', json={
342
- "query": "What is artificial intelligence?",
343
- "top_k": 5,
344
- "workspace_id": "default",
345
- "config": {}
346
- })
347
- print(response.json())
348
- ```
349
-
350
- #### Agent API
351
- ```python
352
- response = requests.post('http://localhost:8001/agent', json={
353
- "query": "Help me search for the latest AI technology trends",
354
- "workspace_id": "default",
355
- "config": {}
356
- })
357
- print(response.json())
358
- ```
359
-
360
- ## ⚙️ Configuration Guide
361
-
362
- ### Pipeline Configuration Syntax
363
-
364
- LLMFlow uses an intuitive string syntax to define operation pipelines:
365
-
366
- ```yaml
367
- api:
368
- # Serial execution: op1 -> op2 -> op3
369
- retriever: recall_vector_store_op->summarizer_op
370
-
371
- # Parallel execution: [op1 | op2] runs in parallel
372
- summarizer: mock1_op->[mock4_op->mock2_op|mock5_op]->mock3_op
373
-
374
- # Mixed mode: combination of serial and parallel
375
- agent: react_v1_op
376
- ```
377
-
378
- ### Complete Configuration Example
379
-
380
- ```yaml
381
- # HTTP Service Configuration
382
- http_service:
383
- host: "0.0.0.0"
384
- port: 8001
385
- timeout_keep_alive: 600
386
- limit_concurrency: 64
387
-
388
- # Thread Pool Configuration
389
- thread_pool:
390
- max_workers: 10
391
-
392
- # API Pipeline Definitions
393
- api:
394
- retriever: recall_vector_store_op
395
- summarizer: update_vector_store_op
396
- vector_store: vector_store_action_op
397
- agent: react_v1_op
398
-
399
- # Operation Configuration
400
- op:
401
- react_v1_op:
402
- backend: react_v1_op
403
- llm: default
404
- params:
405
- max_steps: 10
406
- tool_names: "code_tool,tavily_search_tool,terminate_tool"
407
-
408
- # LLM Configuration
409
- llm:
410
- default:
411
- backend: openai_compatible
412
- model_name: qwen3-32b
413
- params:
414
- temperature: 0.6
415
- max_retries: 5
416
-
417
- # Embedding Model Configuration
418
- embedding_model:
419
- default:
420
- backend: openai_compatible
421
- model_name: text-embedding-v4
422
- params:
423
- dimensions: 1024
424
-
425
- # Vector Store Configuration
426
- vector_store:
427
- default:
428
- backend: local_file # or elasticsearch, chroma
429
- embedding_model: default
430
- params:
431
- store_dir: "./vector_store_data"
432
- ```
433
-
434
- ## 🧩 Architecture Design
435
-
436
- ### Core Components
437
-
438
- ```
439
- ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐
440
- │ FastAPI App │ │ MCP Server │ │ Configuration │
441
- │ │ │ │ │ Parser │
442
- └─────────────────┘ └─────────────────┘ └─────────────────┘
443
- │ │ │
444
- └───────────────────────┼───────────────────────┘
445
-
446
- ┌─────────────────┐
447
- │ LLMFlow Service │
448
- └─────────────────┘
449
-
450
- ┌─────────────────┐
451
- │ Pipeline │
452
- │ Context │
453
- └─────────────────┘
454
-
455
- ┌───────────────────┼───────────────────┐
456
- │ │ │
457
- ┌─────────────┐ ┌─────────────┐ ┌─────────────┐
458
- │ Operations │ │ Tools │ │Vector Stores│
459
- │ │ │ │ │ │
460
- │ • ReAct │ │ • Code │ │ • File │
461
- │ • Recall │ │ • Search │ │ • ES │
462
- │ • Update │ │ • MCP │ │ • Chroma │
463
- │ • Mock │ │ • Terminate │ │ │
464
- └─────────────┘ └─────────────┘ └─────────────┘
465
- ```
466
-
467
- ### Data Flow
468
-
469
- ```
470
- Request → Configuration → Pipeline → Operations → Tools/VectorStore → Response
471
- ```
472
-
473
- ## 🔧 Development Guide
474
-
475
- ### Custom Operations
476
-
477
- ```python
478
- from llmflow.op import OP_REGISTRY
479
- from llmflow.op.base_op import BaseOp
480
-
481
- @OP_REGISTRY.register()
482
- class CustomOp(BaseOp):
483
- def execute(self):
484
- # Implement your custom logic
485
- request = self.context.request
486
- response = self.context.response
487
-
488
- # Process request
489
- result = self.process_data(request.query)
490
-
491
- # Update response
492
- response.metadata["custom_result"] = result
493
- ```
494
-
495
- ### Custom Tools
496
-
497
- ```python
498
- from llmflow.tool import TOOL_REGISTRY
499
- from llmflow.tool.base_tool import BaseTool
500
-
501
- @TOOL_REGISTRY.register()
502
- class CustomTool(BaseTool):
503
- name: str = "custom_tool"
504
- description: str = "Custom tool description"
505
- parameters: dict = {
506
- "type": "object",
507
- "properties": {
508
- "input": {"type": "string", "description": "Input parameter"}
509
- },
510
- "required": ["input"]
511
- }
512
-
513
- def _execute(self, input: str, **kwargs):
514
- # Implement tool logic
515
- return f"Processing result: {input}"
516
- ```
517
-
518
- ### Custom Vector Stores
519
-
520
- ```python
521
- from llmflow.vector_store import VECTOR_STORE_REGISTRY
522
- from llmflow.vector_store.base_vector_store import BaseVectorStore
523
-
524
- @VECTOR_STORE_REGISTRY.register("custom_store")
525
- class CustomVectorStore(BaseVectorStore):
526
- def search(self, query: str, top_k: int = 10, **kwargs):
527
- # Implement search logic
528
- pass
529
-
530
- def insert(self, nodes: List[VectorNode], **kwargs):
531
- # Implement insertion logic
532
- pass
533
- ```
534
-
535
- ## 🧪 Testing
536
-
537
- ```bash
538
- # Run tests
539
- pytest
540
-
541
- # Run specific tests
542
- pytest tests/test_pipeline.py
543
-
544
- # Generate coverage report
545
- pytest --cov=llmflow tests/
546
- ```
547
-
548
- ## 🤝 Contributing
549
-
550
- We welcome community contributions! Please follow these steps:
551
-
552
- 1. Fork the repository
553
- 2. Create a feature branch (`git checkout -b feature/AmazingFeature`)
554
- 3. Commit your changes (`git commit -m 'Add some AmazingFeature'`)
555
- 4. Push to the branch (`git push origin feature/AmazingFeature`)
556
- 5. Open a Pull Request
557
-
558
- ### Development Environment Setup
559
-
560
- ```bash
561
- # Install development dependencies
562
- pip install -e ".[dev]"
563
-
564
- # Install pre-commit hooks
565
- pre-commit install
566
-
567
- # Run code formatting
568
- black llmflow/
569
- isort llmflow/
570
-
571
- # Run type checking
572
- mypy llmflow/
573
- ```
574
-
575
- ## 📚 Documentation
576
-
577
- - [API Documentation](docs/api.md)
578
- - [Configuration Guide](docs/configuration.md)
579
- - [Operations Development](docs/operations.md)
580
- - [Tools Development](docs/tools.md)
581
- - [Deployment Guide](docs/deployment.md)
582
-
583
- ## 🐛 Bug Reports
584
-
585
- If you find bugs or have feature requests, please create an issue on [GitHub Issues](https://github.com/your-username/llmflow/issues).
586
-
587
- ## 📄 License
588
-
589
- This project is licensed under the Apache License 2.0. See the [LICENSE](LICENSE) file for details.
590
-
591
- ## 🙏 Acknowledgments
592
-
593
- Thanks to all developers and community members who have contributed to the LLMFlow project.
594
-
595
- ---
596
-
597
- **LLMFlow** - Making AI workflow development simple and powerful 🚀
@@ -0,0 +1,21 @@
1
+ import os
2
+
3
+ from flowllm.utils.common_utils import load_env
4
+
5
+ load_env()
6
+
7
+ from flowllm import embedding_model
8
+ from flowllm import llm
9
+ from flowllm import storage
10
+
11
+ if not os.environ.get("FLOW_USE_FRAMEWORK", "").lower() == "true":
12
+ from flowllm import flow
13
+ from flowllm import op
14
+
15
+ from flowllm import service
16
+
17
+ from flowllm.context.service_context import C
18
+ from flowllm.op import BaseOp, BaseRayOp, BaseLLMOp
19
+
20
+ __version__ = "0.1.2"
21
+
@@ -0,0 +1,15 @@
1
+ import sys
2
+
3
+ from flowllm.service.base_service import BaseService
4
+
5
+
6
+ def main():
7
+ with BaseService.get_service(*sys.argv[1:]) as service:
8
+ service()
9
+
10
+
11
+ if __name__ == "__main__":
12
+ main()
13
+
14
+ # python -m build
15
+ # twine upload dist/*
@@ -0,0 +1,25 @@
1
+ """
2
+ FlowLLM service clients module
3
+
4
+ This module provides various client implementations for interacting with FlowLLM services:
5
+
6
+ - HttpClient: Synchronous HTTP client for FlowLLM HTTP service
7
+ - AsyncHttpClient: Asynchronous HTTP client for FlowLLM HTTP service
8
+ - MCPClient: Asynchronous client for FlowLLM MCP (Model Context Protocol) service
9
+ - SyncMCPClient: Synchronous wrapper around MCPClient for easier synchronous usage
10
+
11
+ Each client provides methods to execute tool flows, list available flows, and perform
12
+ health checks on the respective services.
13
+ """
14
+
15
+ from .async_http_client import AsyncHttpClient
16
+ from .http_client import HttpClient
17
+ from .mcp_client import MCPClient
18
+ from .sync_mcp_client import SyncMCPClient
19
+
20
+ __all__ = [
21
+ "HttpClient",
22
+ "AsyncHttpClient",
23
+ "MCPClient",
24
+ "SyncMCPClient"
25
+ ]
@@ -0,0 +1,81 @@
1
+ from typing import Dict
2
+
3
+ import httpx
4
+
5
+ from flowllm.schema.flow_response import FlowResponse
6
+
7
+
8
+ class AsyncHttpClient:
9
+ """Async client for interacting with FlowLLM HTTP service"""
10
+
11
+ def __init__(self, base_url: str = "http://localhost:8001", timeout: float = 3600.0):
12
+ """
13
+ Initialize async HTTP client
14
+
15
+ Args:
16
+ base_url: Base URL of the FlowLLM HTTP service
17
+ timeout: Request timeout in seconds
18
+ """
19
+ self.base_url = base_url.rstrip('/') # Remove trailing slash for consistent URL formatting
20
+ self.timeout = timeout
21
+ self.client = httpx.AsyncClient(timeout=timeout) # Create async HTTP client with timeout
22
+
23
+ async def __aenter__(self):
24
+ """Async context manager entry - returns self for 'async with' usage"""
25
+ return self
26
+
27
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
28
+ """Async context manager exit - ensures proper cleanup of HTTP client"""
29
+ await self.client.aclose()
30
+
31
+ async def close(self):
32
+ """Explicitly close the HTTP client connection"""
33
+ await self.client.aclose()
34
+
35
+ async def health_check(self) -> Dict[str, str]:
36
+ """
37
+ Perform health check on the FlowLLM service
38
+
39
+ Returns:
40
+ Dict containing health status information from the service
41
+
42
+ Raises:
43
+ httpx.HTTPStatusError: If the service is not healthy or unreachable
44
+ """
45
+ response = await self.client.get(f"{self.base_url}/health")
46
+ response.raise_for_status() # Raise exception for HTTP error status codes
47
+ return response.json()
48
+
49
+ async def execute_tool_flow(self, flow_name: str, **kwargs) -> FlowResponse:
50
+ """
51
+ Execute a specific tool flow on the FlowLLM service
52
+
53
+ Args:
54
+ flow_name: Name of the tool flow to execute
55
+ **kwargs: Additional parameters to pass to the tool flow
56
+
57
+ Returns:
58
+ FlowResponse object containing the execution results
59
+
60
+ Raises:
61
+ httpx.HTTPStatusError: If the request fails or flow execution errors
62
+ """
63
+ endpoint = f"{self.base_url}/{flow_name}"
64
+ response = await self.client.post(endpoint, json=kwargs) # Send flow parameters as JSON
65
+ response.raise_for_status() # Raise exception for HTTP error status codes
66
+ result_data = response.json()
67
+ return FlowResponse(**result_data) # Parse response into FlowResponse schema
68
+
69
+ async def list_tool_flows(self) -> list:
70
+ """
71
+ Get list of available tool flows from the FlowLLM service
72
+
73
+ Returns:
74
+ List of available tool flow names and their metadata
75
+
76
+ Raises:
77
+ httpx.HTTPStatusError: If the service is unreachable or returns an error
78
+ """
79
+ response = await self.client.get(f"{self.base_url}/list")
80
+ response.raise_for_status() # Raise exception for HTTP error status codes
81
+ return response.json()
@@ -0,0 +1,81 @@
1
+ from typing import Dict
2
+
3
+ import httpx
4
+
5
+ from flowllm.schema.flow_response import FlowResponse
6
+
7
+
8
+ class HttpClient:
9
+ """Client for interacting with FlowLLM HTTP service"""
10
+
11
+ def __init__(self, base_url: str = "http://localhost:8001", timeout: float = 3600.0):
12
+ """
13
+ Initialize HTTP client
14
+
15
+ Args:
16
+ base_url: Base URL of the FlowLLM HTTP service
17
+ timeout: Request timeout in seconds
18
+ """
19
+ self.base_url = base_url.rstrip('/') # Remove trailing slash for consistent URL formatting
20
+ self.timeout = timeout
21
+ self.client = httpx.Client(timeout=timeout) # Create synchronous HTTP client with timeout
22
+
23
+ def __enter__(self):
24
+ """Context manager entry - returns self for 'with' usage"""
25
+ return self
26
+
27
+ def __exit__(self, exc_type, exc_val, exc_tb):
28
+ """Context manager exit - ensures proper cleanup of HTTP client"""
29
+ self.client.close()
30
+
31
+ def close(self):
32
+ """Explicitly close the HTTP client connection"""
33
+ self.client.close()
34
+
35
+ def health_check(self) -> Dict[str, str]:
36
+ """
37
+ Perform health check on the FlowLLM service
38
+
39
+ Returns:
40
+ Dict containing health status information from the service
41
+
42
+ Raises:
43
+ httpx.HTTPStatusError: If the service is not healthy or unreachable
44
+ """
45
+ response = self.client.get(f"{self.base_url}/health")
46
+ response.raise_for_status() # Raise exception for HTTP error status codes
47
+ return response.json()
48
+
49
+ def execute_tool_flow(self, flow_name: str, **kwargs) -> FlowResponse:
50
+ """
51
+ Execute a specific tool flow on the FlowLLM service
52
+
53
+ Args:
54
+ flow_name: Name of the tool flow to execute
55
+ **kwargs: Additional parameters to pass to the tool flow
56
+
57
+ Returns:
58
+ FlowResponse object containing the execution results
59
+
60
+ Raises:
61
+ httpx.HTTPStatusError: If the request fails or flow execution errors
62
+ """
63
+ endpoint = f"{self.base_url}/{flow_name}"
64
+ response = self.client.post(endpoint, json=kwargs) # Send flow parameters as JSON
65
+ response.raise_for_status() # Raise exception for HTTP error status codes
66
+ result_data = response.json()
67
+ return FlowResponse(**result_data) # Parse response into FlowResponse schema
68
+
69
+ def list_tool_flows(self) -> list:
70
+ """
71
+ Get list of available tool flows from the FlowLLM service
72
+
73
+ Returns:
74
+ List of available tool flow names and their metadata
75
+
76
+ Raises:
77
+ httpx.HTTPStatusError: If the service is unreachable or returns an error
78
+ """
79
+ response = self.client.get(f"{self.base_url}/list")
80
+ response.raise_for_status() # Raise exception for HTTP error status codes
81
+ return response.json()