flowllm 0.1.0__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. flowllm/__init__.py +12 -0
  2. flowllm/app.py +25 -0
  3. flowllm/config/default_config.yaml +82 -0
  4. flowllm/config/pydantic_config_parser.py +242 -0
  5. flowllm/context/base_context.py +59 -0
  6. flowllm/context/flow_context.py +28 -0
  7. llmflow/op/prompt_mixin.py → flowllm/context/prompt_handler.py +25 -14
  8. flowllm/context/registry.py +26 -0
  9. flowllm/context/service_context.py +103 -0
  10. flowllm/embedding_model/__init__.py +1 -0
  11. {llmflow → flowllm}/embedding_model/base_embedding_model.py +2 -2
  12. {llmflow → flowllm}/embedding_model/openai_compatible_embedding_model.py +8 -8
  13. flowllm/flow_engine/__init__.py +1 -0
  14. flowllm/flow_engine/base_flow_engine.py +34 -0
  15. flowllm/flow_engine/simple_flow_engine.py +213 -0
  16. flowllm/llm/__init__.py +1 -0
  17. {llmflow → flowllm}/llm/base_llm.py +16 -24
  18. {llmflow → flowllm}/llm/openai_compatible_llm.py +64 -108
  19. flowllm/op/__init__.py +3 -0
  20. flowllm/op/akshare/get_ak_a_code_op.py +116 -0
  21. flowllm/op/akshare/get_ak_a_code_prompt.yaml +21 -0
  22. flowllm/op/akshare/get_ak_a_info_op.py +143 -0
  23. flowllm/op/base_op.py +169 -0
  24. flowllm/op/llm_base_op.py +63 -0
  25. flowllm/op/mock_op.py +42 -0
  26. flowllm/op/parallel_op.py +30 -0
  27. flowllm/op/sequential_op.py +29 -0
  28. flowllm/schema/flow_response.py +12 -0
  29. flowllm/schema/message.py +35 -0
  30. flowllm/schema/service_config.py +76 -0
  31. flowllm/schema/tool_call.py +110 -0
  32. flowllm/service/__init__.py +2 -0
  33. flowllm/service/base_service.py +59 -0
  34. flowllm/service/http_service.py +87 -0
  35. flowllm/service/mcp_service.py +45 -0
  36. flowllm/storage/__init__.py +1 -0
  37. flowllm/storage/vector_store/__init__.py +3 -0
  38. flowllm/storage/vector_store/base_vector_store.py +44 -0
  39. {llmflow → flowllm/storage}/vector_store/chroma_vector_store.py +11 -10
  40. {llmflow → flowllm/storage}/vector_store/es_vector_store.py +10 -9
  41. llmflow/vector_store/file_vector_store.py → flowllm/storage/vector_store/local_vector_store.py +110 -10
  42. flowllm/utils/common_utils.py +64 -0
  43. flowllm/utils/dataframe_cache.py +331 -0
  44. flowllm/utils/fetch_url.py +113 -0
  45. {llmflow → flowllm}/utils/timer.py +5 -4
  46. {flowllm-0.1.0.dist-info → flowllm-0.1.1.dist-info}/METADATA +31 -27
  47. flowllm-0.1.1.dist-info/RECORD +62 -0
  48. flowllm-0.1.1.dist-info/entry_points.txt +4 -0
  49. {flowllm-0.1.0.dist-info → flowllm-0.1.1.dist-info}/licenses/LICENSE +1 -1
  50. flowllm-0.1.1.dist-info/top_level.txt +1 -0
  51. flowllm-0.1.0.dist-info/RECORD +0 -66
  52. flowllm-0.1.0.dist-info/entry_points.txt +0 -3
  53. flowllm-0.1.0.dist-info/top_level.txt +0 -1
  54. llmflow/app.py +0 -53
  55. llmflow/config/config_parser.py +0 -80
  56. llmflow/config/mock_config.yaml +0 -58
  57. llmflow/embedding_model/__init__.py +0 -5
  58. llmflow/enumeration/agent_state.py +0 -8
  59. llmflow/llm/__init__.py +0 -5
  60. llmflow/mcp_server.py +0 -110
  61. llmflow/op/__init__.py +0 -10
  62. llmflow/op/base_op.py +0 -125
  63. llmflow/op/mock_op.py +0 -40
  64. llmflow/op/react/react_v1_op.py +0 -88
  65. llmflow/op/react/react_v1_prompt.yaml +0 -28
  66. llmflow/op/vector_store/__init__.py +0 -13
  67. llmflow/op/vector_store/recall_vector_store_op.py +0 -48
  68. llmflow/op/vector_store/update_vector_store_op.py +0 -28
  69. llmflow/op/vector_store/vector_store_action_op.py +0 -46
  70. llmflow/pipeline/pipeline.py +0 -94
  71. llmflow/pipeline/pipeline_context.py +0 -37
  72. llmflow/schema/app_config.py +0 -69
  73. llmflow/schema/experience.py +0 -144
  74. llmflow/schema/message.py +0 -68
  75. llmflow/schema/request.py +0 -32
  76. llmflow/schema/response.py +0 -29
  77. llmflow/service/__init__.py +0 -0
  78. llmflow/service/llmflow_service.py +0 -96
  79. llmflow/tool/__init__.py +0 -9
  80. llmflow/tool/base_tool.py +0 -80
  81. llmflow/tool/code_tool.py +0 -43
  82. llmflow/tool/dashscope_search_tool.py +0 -162
  83. llmflow/tool/mcp_tool.py +0 -77
  84. llmflow/tool/tavily_search_tool.py +0 -109
  85. llmflow/tool/terminate_tool.py +0 -23
  86. llmflow/utils/__init__.py +0 -0
  87. llmflow/utils/common_utils.py +0 -17
  88. llmflow/utils/file_handler.py +0 -25
  89. llmflow/utils/http_client.py +0 -156
  90. llmflow/utils/op_utils.py +0 -102
  91. llmflow/utils/registry.py +0 -33
  92. llmflow/vector_store/__init__.py +0 -7
  93. llmflow/vector_store/base_vector_store.py +0 -136
  94. {llmflow → flowllm/config}/__init__.py +0 -0
  95. {llmflow/config → flowllm/context}/__init__.py +0 -0
  96. {llmflow → flowllm}/enumeration/__init__.py +0 -0
  97. {llmflow → flowllm}/enumeration/chunk_enum.py +0 -0
  98. {llmflow → flowllm}/enumeration/http_enum.py +0 -0
  99. {llmflow → flowllm}/enumeration/role.py +0 -0
  100. {llmflow/op/react → flowllm/op/akshare}/__init__.py +0 -0
  101. {llmflow/pipeline → flowllm/schema}/__init__.py +0 -0
  102. {llmflow → flowllm}/schema/vector_node.py +0 -0
  103. {llmflow/schema → flowllm/utils}/__init__.py +0 -0
  104. {llmflow → flowllm}/utils/singleton.py +0 -0
  105. {flowllm-0.1.0.dist-info → flowllm-0.1.1.dist-info}/WHEEL +0 -0
@@ -1,144 +0,0 @@
1
- import datetime
2
- from abc import ABC
3
- from typing import List
4
- from uuid import uuid4
5
-
6
- from loguru import logger
7
- from pydantic import BaseModel, Field
8
-
9
- from llmflow.schema.vector_node import VectorNode
10
-
11
-
12
- class ExperienceMeta(BaseModel):
13
- author: str = Field(default="")
14
- created_time: str = Field(default_factory=lambda: datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
15
- modified_time: str = Field(default_factory=lambda: datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
16
- extra_info: dict | None = Field(default=None)
17
-
18
- def update_modified_time(self):
19
- self.modified_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
20
-
21
-
22
- class BaseExperience(BaseModel, ABC):
23
- workspace_id: str = Field(default="")
24
-
25
- experience_id: str = Field(default_factory=lambda: uuid4().hex)
26
- experience_type: str = Field(default="")
27
-
28
- when_to_use: str = Field(default="")
29
- content: str | bytes = Field(default="")
30
- score: float | None = Field(default=None)
31
- metadata: ExperienceMeta = Field(default_factory=ExperienceMeta)
32
-
33
- def to_vector_node(self) -> VectorNode:
34
- raise NotImplementedError
35
-
36
- @classmethod
37
- def from_vector_node(cls, node: VectorNode):
38
- raise NotImplementedError
39
-
40
-
41
- class TextExperience(BaseExperience):
42
- experience_type: str = Field(default="text")
43
-
44
- def to_vector_node(self) -> VectorNode:
45
- return VectorNode(unique_id=self.experience_id,
46
- workspace_id=self.workspace_id,
47
- content=self.when_to_use,
48
- metadata={
49
- "experience_type": self.experience_type,
50
- "experience_content": self.content,
51
- "score": self.score,
52
- "metadata": self.metadata.model_dump(),
53
- })
54
-
55
- @classmethod
56
- def from_vector_node(cls, node: VectorNode):
57
- return cls(workspace_id=node.workspace_id,
58
- experience_id=node.unique_id,
59
- experience_type=node.metadata.get("experience_type"),
60
- when_to_use=node.content,
61
- content=node.metadata.get("experience_content"),
62
- score=node.metadata.get("score"),
63
- metadata=node.metadata.get("metadata"))
64
-
65
-
66
- class FunctionArg(BaseModel):
67
- arg_name: str = Field(default=...)
68
- arg_type: str = Field(default=...)
69
- required: bool = Field(default=True)
70
-
71
-
72
- class Function(BaseModel):
73
- func_code: str = Field(default=..., description="function code")
74
- func_name: str = Field(default=..., description="function name")
75
- func_args: List[FunctionArg] = Field(default_factory=list)
76
-
77
-
78
- class FuncExperience(BaseExperience):
79
- experience_type: str = Field(default="function")
80
- functions: List[Function] = Field(default_factory=list)
81
-
82
-
83
- class PersonalExperience(BaseExperience):
84
- experience_type: str = Field(default="personal")
85
- person: str = Field(default="")
86
- topic: str = Field(default="")
87
-
88
-
89
- class KnowledgeExperience(BaseExperience):
90
- experience_type: str = Field(default="knowledge")
91
- topic: str = Field(default="")
92
-
93
-
94
- def vector_node_to_experience(node: VectorNode) -> BaseExperience:
95
- experience_type = node.metadata.get("experience_type")
96
- if experience_type == "text":
97
- return TextExperience.from_vector_node(node)
98
-
99
- elif experience_type == "function":
100
- return FuncExperience.from_vector_node(node)
101
-
102
- elif experience_type == "personal":
103
- return PersonalExperience.from_vector_node(node)
104
-
105
- elif experience_type == "knowledge":
106
- return KnowledgeExperience.from_vector_node(node)
107
-
108
- else:
109
- logger.warning(f"experience type {experience_type} not supported")
110
- return TextExperience.from_vector_node(node)
111
-
112
-
113
- def dict_to_experience(experience_dict: dict) -> BaseExperience:
114
- experience_type = experience_dict.get("experience_type", "text")
115
- if experience_type == "text":
116
- return TextExperience(**experience_dict)
117
-
118
- elif experience_type == "function":
119
- return FuncExperience(**experience_dict)
120
-
121
- elif experience_type == "personal":
122
- return PersonalExperience(**experience_dict)
123
-
124
- elif experience_type == "knowledge":
125
- return KnowledgeExperience(**experience_dict)
126
-
127
- else:
128
- logger.warning(f"experience type {experience_type} not supported")
129
- return TextExperience(**experience_dict)
130
-
131
-
132
- if __name__ == "__main__":
133
- e1 = TextExperience(
134
- workspace_id="w_1024",
135
- experience_id="123",
136
- when_to_use="test case use",
137
- content="test content",
138
- score=0.99,
139
- metadata=ExperienceMeta(author="user"))
140
- print(e1.model_dump_json(indent=2))
141
- v1 = e1.to_vector_node()
142
- print(v1.model_dump_json(indent=2))
143
- e2 = vector_node_to_experience(v1)
144
- print(e2.model_dump_json(indent=2))
llmflow/schema/message.py DELETED
@@ -1,68 +0,0 @@
1
- import json
2
- from typing import List
3
-
4
- from pydantic import BaseModel, Field, model_validator
5
-
6
- from llmflow.enumeration.role import Role
7
-
8
-
9
- class ToolCall(BaseModel):
10
- index: int = Field(default=0)
11
- id: str = Field(default="")
12
- name: str = Field(default="")
13
- arguments: str = Field(default="")
14
- type: str = Field(default="function")
15
-
16
- @model_validator(mode="before") # noqa
17
- @classmethod
18
- def init_tool_call(cls, data: dict):
19
- tool_type = data.get("type", "")
20
- tool_type_dict = data.get(tool_type, {})
21
-
22
- for key in ["name", "arguments"]:
23
- if key not in data:
24
- data[key] = tool_type_dict.get(key, "")
25
- return data
26
-
27
- @property
28
- def argument_dict(self) -> dict:
29
- return json.loads(self.arguments)
30
-
31
- def simple_dump(self) -> dict:
32
- return {
33
- "id": self.id,
34
- self.type: {
35
- "arguments": self.arguments,
36
- "name": self.name
37
- },
38
- "type": self.type,
39
- "index": self.index,
40
- }
41
-
42
- class Message(BaseModel):
43
- role: Role = Field(default=Role.USER)
44
- content: str | bytes = Field(default="")
45
- reasoning_content: str = Field(default="")
46
- tool_calls: List[ToolCall] = Field(default_factory=list)
47
- tool_call_id: str = Field(default="")
48
- metadata: dict = Field(default_factory=dict)
49
-
50
- def simple_dump(self, add_reason_when_empty: bool = True) -> dict:
51
- result: dict
52
- if self.content:
53
- result = {"role": self.role.value, "content": self.content}
54
- elif add_reason_when_empty and self.reasoning_content:
55
- result = {"role": self.role.value, "content": self.reasoning_content}
56
- else:
57
- result = {"role": self.role.value, "content": ""}
58
-
59
- if self.tool_calls:
60
- result["tool_calls"] = [x.simple_dump() for x in self.tool_calls]
61
- return result
62
-
63
-
64
- class Trajectory(BaseModel):
65
- task_id: str = Field(default="")
66
- messages: List[Message] = Field(default_factory=list)
67
- score: float = Field(default=0.0)
68
- metadata: dict = Field(default_factory=dict)
llmflow/schema/request.py DELETED
@@ -1,32 +0,0 @@
1
- from typing import List
2
-
3
- from pydantic import BaseModel, Field
4
-
5
- from llmflow.schema.message import Message, Trajectory
6
-
7
-
8
- class BaseRequest(BaseModel):
9
- workspace_id: str = Field(default="default")
10
- config: dict = Field(default_factory=dict)
11
-
12
-
13
- class RetrieverRequest(BaseRequest):
14
- query: str = Field(default="")
15
- messages: List[Message] = Field(default_factory=list)
16
- top_k: int = Field(default=1)
17
-
18
-
19
- class SummarizerRequest(BaseRequest):
20
- traj_list: List[Trajectory] = Field(default_factory=list)
21
-
22
-
23
- class VectorStoreRequest(BaseRequest):
24
- action: str = Field(default="")
25
- src_workspace_id: str = Field(default="")
26
- path: str = Field(default="")
27
-
28
-
29
- class AgentRequest(BaseRequest):
30
- query: str = Field(default="")
31
- messages: List[Message] = Field(default_factory=list)
32
-
@@ -1,29 +0,0 @@
1
- from typing import List
2
-
3
- from pydantic import BaseModel, Field
4
-
5
- from llmflow.schema.experience import BaseExperience
6
- from llmflow.schema.message import Message
7
-
8
-
9
- class BaseResponse(BaseModel):
10
- success: bool = Field(default=True)
11
- metadata: dict = Field(default_factory=dict)
12
-
13
-
14
- class RetrieverResponse(BaseResponse):
15
- experience_list: List[BaseExperience] = Field(default_factory=list)
16
- experience_merged: str = Field(default="")
17
-
18
-
19
- class SummarizerResponse(BaseResponse):
20
- experience_list: List[BaseExperience] = Field(default_factory=list)
21
- deleted_experience_ids: List[str] = Field(default_factory=list)
22
-
23
-
24
- class VectorStoreResponse(BaseResponse):
25
- ...
26
-
27
- class AgentResponse(BaseResponse):
28
- answer: str = Field(default="")
29
- messages: List[Message] = Field(default_factory=list)
File without changes
@@ -1,96 +0,0 @@
1
- from concurrent.futures import ThreadPoolExecutor
2
- from typing import List
3
-
4
- from loguru import logger
5
-
6
- from llmflow.config.config_parser import ConfigParser
7
- from llmflow.embedding_model import EMBEDDING_MODEL_REGISTRY
8
- from llmflow.pipeline.pipeline import Pipeline
9
- from llmflow.pipeline.pipeline_context import PipelineContext
10
- from llmflow.schema.app_config import AppConfig, HttpServiceConfig, EmbeddingModelConfig
11
- from llmflow.schema.request import SummarizerRequest, RetrieverRequest, VectorStoreRequest, AgentRequest, \
12
- BaseRequest
13
- from llmflow.schema.response import SummarizerResponse, RetrieverResponse, VectorStoreResponse, AgentResponse, \
14
- BaseResponse
15
- from llmflow.vector_store import VECTOR_STORE_REGISTRY
16
-
17
-
18
- class LLMFlowService:
19
-
20
- def __init__(self, args: List[str]):
21
- self.config_parser = ConfigParser(args)
22
- self.init_app_config: AppConfig = self.config_parser.get_app_config()
23
- self.thread_pool = ThreadPoolExecutor(max_workers=self.init_app_config.thread_pool.max_workers)
24
-
25
- # The vectorstore is initialized at the very beginning and then used directly afterward.
26
- self.vector_store_dict: dict = {}
27
- for name, config in self.init_app_config.vector_store.items():
28
- assert config.backend in VECTOR_STORE_REGISTRY, f"backend={config.backend} is not existed"
29
- vector_store_cls = VECTOR_STORE_REGISTRY[config.backend]
30
-
31
- assert config.embedding_model in self.init_app_config.embedding_model, \
32
- f"embedding_model={config.embedding_model} is not existed"
33
- embedding_model_config: EmbeddingModelConfig = self.init_app_config.embedding_model[config.embedding_model]
34
-
35
- assert embedding_model_config.backend in EMBEDDING_MODEL_REGISTRY, \
36
- f"embedding_model={embedding_model_config.backend} is not existed"
37
- embedding_model_cls = EMBEDDING_MODEL_REGISTRY[embedding_model_config.backend]
38
- embedding_model = embedding_model_cls(model_name=embedding_model_config.model_name,
39
- **embedding_model_config.params)
40
-
41
- self.vector_store_dict[name] = vector_store_cls(embedding_model=embedding_model, **config.params)
42
-
43
- @property
44
- def http_service_config(self) -> HttpServiceConfig:
45
- return self.init_app_config.http_service
46
-
47
- def __call__(self, api: str, request: dict | BaseRequest) -> BaseResponse:
48
- if isinstance(request, dict):
49
- app_config: AppConfig = self.config_parser.get_app_config(**request["config"])
50
- else:
51
- app_config: AppConfig = self.config_parser.get_app_config(**request.config)
52
-
53
- if api == "retriever":
54
- if isinstance(request, dict):
55
- request = RetrieverRequest(**request)
56
- response = RetrieverResponse()
57
- pipeline = app_config.api.retriever
58
-
59
- elif api == "summarizer":
60
- if isinstance(request, dict):
61
- request = SummarizerRequest(**request)
62
- response = SummarizerResponse()
63
- pipeline = app_config.api.summarizer
64
-
65
- elif api == "vector_store":
66
- if isinstance(request, dict):
67
- request = VectorStoreRequest(**request)
68
- response = VectorStoreResponse()
69
- pipeline = app_config.api.vector_store
70
-
71
- elif api == "agent":
72
- if isinstance(request, dict):
73
- request = AgentRequest(**request)
74
- response = AgentResponse()
75
- pipeline = app_config.api.agent
76
-
77
- else:
78
- raise RuntimeError(f"Invalid service.api={api}")
79
-
80
- logger.info(f"request={request.model_dump_json()}")
81
-
82
- try:
83
- context = PipelineContext(app_config=app_config,
84
- thread_pool=self.thread_pool,
85
- request=request,
86
- response=response,
87
- vector_store_dict=self.vector_store_dict)
88
- pipeline = Pipeline(pipeline=pipeline, context=context)
89
- pipeline()
90
-
91
- except Exception as e:
92
- logger.exception(f"api={api} encounter error={e.args}")
93
- response.success = False
94
- response.metadata["error"] = str(e)
95
-
96
- return response
llmflow/tool/__init__.py DELETED
@@ -1,9 +0,0 @@
1
- from llmflow.utils.registry import Registry
2
-
3
- TOOL_REGISTRY = Registry()
4
-
5
- from llmflow.tool.code_tool import CodeTool
6
- from llmflow.tool.dashscope_search_tool import DashscopeSearchTool
7
- from llmflow.tool.tavily_search_tool import TavilySearchTool
8
- from llmflow.tool.terminate_tool import TerminateTool
9
- from llmflow.tool.mcp_tool import MCPTool
llmflow/tool/base_tool.py DELETED
@@ -1,80 +0,0 @@
1
- from abc import ABC
2
-
3
- from loguru import logger
4
- from pydantic import BaseModel, Field
5
-
6
-
7
- class BaseTool(BaseModel, ABC):
8
- tool_id: str = Field(default="")
9
- name: str = Field(..., description="tool name")
10
- description: str = Field(..., description="tool description")
11
- tool_type: str = Field(default="function")
12
- parameters: dict = Field(default_factory=dict, description="tool parameters")
13
- arguments: dict = Field(default_factory=dict, description="execute arguments")
14
-
15
- enable_cache: bool = Field(default=False, description="whether to cache the tool result")
16
- cached_result: dict = Field(default_factory=dict, description="tool execution result")
17
-
18
- max_retries: int = Field(default=3, description="max retries")
19
- raise_exception: bool = Field(default=True, description="raise exception")
20
- success: bool = Field(default=True, description="whether the tool executed successfully")
21
-
22
- def reset(self):
23
- self.arguments.clear()
24
- self.cached_result.clear()
25
- self.success = True
26
-
27
- def _execute(self, **kwargs):
28
- raise NotImplementedError
29
-
30
- def execute(self, **kwargs):
31
- cache_id = ""
32
- if self.enable_cache:
33
- cache_id = self.get_cache_id(**kwargs)
34
- if cache_id in self.cached_result:
35
- return self.cached_result[cache_id]
36
-
37
- for i in range(self.max_retries):
38
- try:
39
- if self.enable_cache:
40
- self.cached_result[cache_id] = self._execute(**kwargs)
41
- return self.cached_result[cache_id]
42
-
43
- else:
44
- return self._execute(**kwargs)
45
-
46
- except Exception as e:
47
- logger.exception(f"using tool.name={self.name} encounter error with e={e.args}")
48
- if i == self.max_retries - 1 and self.raise_exception:
49
- raise e
50
-
51
- return None
52
-
53
-
54
- def simple_dump(self) -> dict:
55
- """
56
- It may be in other different tool params formats; different versions are completed here.
57
- """
58
- return {
59
- "type": self.tool_type,
60
- self.tool_type: {
61
- "name": self.name,
62
- "description": self.description,
63
- "parameters": self.parameters,
64
- },
65
- }
66
-
67
- @property
68
- def input_schema(self) -> dict:
69
- return self.parameters.get("properties", {})
70
-
71
- @property
72
- def output_schema(self) -> dict:
73
- raise NotImplementedError
74
-
75
- def refresh(self):
76
- # for mcp
77
- raise NotImplementedError
78
-
79
- def get_cache_id(self, **kwargs) -> str:
80
- raise NotImplementedError
llmflow/tool/code_tool.py DELETED
@@ -1,43 +0,0 @@
1
- import sys
2
- from io import StringIO
3
-
4
- from llmflow.tool import TOOL_REGISTRY
5
- from llmflow.tool.base_tool import BaseTool
6
-
7
-
8
- @TOOL_REGISTRY.register()
9
- class CodeTool(BaseTool):
10
- name: str = "python_execute"
11
- description: str = "Execute python code can be used in scenarios such as analysis or calculation, and the final result can be printed using the `print` function."
12
- parameters: dict = {
13
- "type": "object",
14
- "properties": {
15
- "code": {
16
- "type": "string",
17
- "description": "code to be executed. Please do not execute any matplotlib code here.",
18
- }
19
- },
20
- "required": ["code"]
21
- }
22
-
23
- def _execute(self, code: str, **kwargs):
24
- old_stdout = sys.stdout
25
- redirected_output = sys.stdout = StringIO()
26
-
27
- try:
28
- exec(code)
29
- result = redirected_output.getvalue()
30
-
31
- except Exception as e:
32
- self.success = False
33
- result = str(e)
34
-
35
- sys.stdout = old_stdout
36
-
37
- return result
38
-
39
-
40
- if __name__ == '__main__':
41
- tool = CodeTool()
42
- print(tool.execute(code="print('Hello World')"))
43
- print(tool.execute(code="print('Hello World!'"))
@@ -1,162 +0,0 @@
1
- import os
2
- from typing import Literal
3
-
4
- import dashscope
5
- from dashscope.api_entities.dashscope_response import Message
6
- from dotenv import load_dotenv
7
- from loguru import logger
8
- from pydantic import Field
9
-
10
- from llmflow.tool import TOOL_REGISTRY
11
- from llmflow.tool.base_tool import BaseTool
12
-
13
-
14
- @TOOL_REGISTRY.register()
15
- class DashscopeSearchTool(BaseTool):
16
- name: str = "web_search"
17
- description: str = "Use search keywords to retrieve relevant information from the internet. " \
18
- "If there are multiple search keywords, please use each keyword separately to call this tool."
19
- parameters: dict = {
20
- "type": "object",
21
- "properties": {
22
- "query": {
23
- "type": "string",
24
- "description": "search keyword",
25
- }
26
- },
27
- "required": ["query"]
28
- }
29
-
30
- model_name: Literal["qwen-plus-2025-04-28", "qwq-plus-latest", "qwen-max-2025-01-25"] = \
31
- Field(default="qwen-plus-2025-04-28")
32
- api_key: str = Field(default_factory=lambda: os.environ["DASHSCOPE_API_KEY"])
33
- stream_print: bool = Field(default=False)
34
- temperature: float = Field(default=0.0000001)
35
- use_role_prompt: bool = Field(default=True)
36
- role_prompt: str = """
37
- # user's question
38
- {question}
39
-
40
- # task
41
- Extract the original content related to the user's question directly from the context, maintain accuracy, and avoid excessive processing. """.strip()
42
- return_only_content: bool = Field(default=True)
43
-
44
- def parse_reasoning_response(self, response, result: dict):
45
- is_answering = False
46
- is_first_chunk = True
47
-
48
- for chunk in response:
49
- if is_first_chunk:
50
- result["search_results"] = chunk.output.search_info["search_results"]
51
-
52
- if self.stream_print:
53
- print("=" * 20 + "search result" + "=" * 20)
54
- for web in result["search_results"]:
55
- print(f"[{web['index']}]: [{web['title']}]({web['url']})")
56
- print("=" * 20 + "thinking process" + "=" * 20)
57
- result["reasoning_content"] += chunk.output.choices[0].message.reasoning_content
58
-
59
- if self.stream_print:
60
- print(chunk.output.choices[0].message.reasoning_content, end="", flush=True)
61
- is_first_chunk = False
62
-
63
- else:
64
- if chunk.output.choices[0].message.content == "" \
65
- and chunk.output.choices[0].message.reasoning_content == "":
66
- pass
67
-
68
- else:
69
- if chunk.output.choices[0].message.reasoning_content != "" and \
70
- chunk.output.choices[0].message.content == "":
71
-
72
- if self.stream_print:
73
- print(chunk.output.choices[0].message.reasoning_content, end="", flush=True)
74
- result["reasoning_content"] += chunk.output.choices[0].message.reasoning_content
75
-
76
- elif chunk.output.choices[0].message.content != "":
77
- if not is_answering:
78
- if self.stream_print:
79
- print("\n" + "=" * 20 + "complete answer" + "=" * 20)
80
- is_answering = True
81
-
82
- if self.stream_print:
83
- print(chunk.output.choices[0].message.content, end="", flush=True)
84
- result["answer_content"] += chunk.output.choices[0].message.content
85
-
86
- def parse_response(self, response, result: dict):
87
- is_first_chunk = True
88
-
89
- for chunk in response:
90
- if is_first_chunk:
91
- result["search_results"] = chunk.output.search_info["search_results"]
92
-
93
- if self.stream_print:
94
- print("=" * 20 + "search result" + "=" * 20)
95
- for web in result["search_results"]:
96
- print(f"[{web['index']}]: [{web['title']}]({web['url']})")
97
- print("\n" + "=" * 20 + "complete answer" + "=" * 20)
98
- is_first_chunk = False
99
-
100
- else:
101
- if chunk.output.choices[0].message.content == "":
102
- pass
103
-
104
- else:
105
- if chunk.output.choices[0].message.content != "":
106
- if self.stream_print:
107
- print(chunk.output.choices[0].message.content, end="", flush=True)
108
- result["answer_content"] += chunk.output.choices[0].message.content
109
-
110
- def execute(self, query: str = "", **kwargs):
111
- result = {
112
- "search_results": [],
113
- "reasoning_content": "",
114
- "answer_content": ""
115
- }
116
- user_query = self.role_prompt.format(question=query) if self.use_role_prompt else query
117
- messages = [Message(role="user", content=user_query)]
118
-
119
- response = dashscope.Generation.call(
120
- api_key=self.api_key,
121
- model=self.model_name,
122
- messages=messages,
123
- enable_thinking=True,
124
- enable_search=True,
125
- search_options={
126
- "forced_search": True,
127
- "enable_source": True,
128
- "enable_citation": False,
129
- "search_strategy": "pro"
130
- },
131
- stream=True,
132
- incremental_output=True,
133
- result_format="message",
134
- )
135
-
136
- if self.model_name != "qwen-max-2025-01-25":
137
- self.parse_reasoning_response(response, result)
138
- else:
139
- self.parse_response(response, result)
140
-
141
- if self.return_only_content:
142
- return result["answer_content"]
143
- else:
144
- return result
145
-
146
-
147
- def main():
148
- load_dotenv()
149
- query = "What is artificial intelligence?"
150
-
151
- tool = DashscopeSearchTool(stream_print=True)
152
- logger.info(tool.execute(query=query))
153
-
154
- tool = DashscopeSearchTool(stream_print=False)
155
- logger.info(tool.execute(query=query))
156
-
157
- tool = DashscopeSearchTool(stream_print=True, model_name="qwen-max-2025-01-25")
158
- logger.info(tool.execute(query=query))
159
-
160
-
161
- if __name__ == '__main__':
162
- main()