botrun-flow-lang 5.12.263__py3-none-any.whl → 6.2.21__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- botrun_flow_lang/api/auth_api.py +39 -39
- botrun_flow_lang/api/auth_utils.py +183 -183
- botrun_flow_lang/api/botrun_back_api.py +65 -65
- botrun_flow_lang/api/flow_api.py +3 -3
- botrun_flow_lang/api/hatch_api.py +508 -508
- botrun_flow_lang/api/langgraph_api.py +816 -811
- botrun_flow_lang/api/langgraph_constants.py +11 -0
- botrun_flow_lang/api/line_bot_api.py +1484 -1484
- botrun_flow_lang/api/model_api.py +300 -300
- botrun_flow_lang/api/rate_limit_api.py +32 -32
- botrun_flow_lang/api/routes.py +79 -79
- botrun_flow_lang/api/search_api.py +53 -53
- botrun_flow_lang/api/storage_api.py +395 -395
- botrun_flow_lang/api/subsidy_api.py +290 -290
- botrun_flow_lang/api/subsidy_api_system_prompt.txt +109 -109
- botrun_flow_lang/api/user_setting_api.py +70 -70
- botrun_flow_lang/api/version_api.py +31 -31
- botrun_flow_lang/api/youtube_api.py +26 -26
- botrun_flow_lang/constants.py +13 -13
- botrun_flow_lang/langgraph_agents/agents/agent_runner.py +178 -178
- botrun_flow_lang/langgraph_agents/agents/agent_tools/step_planner.py +77 -77
- botrun_flow_lang/langgraph_agents/agents/checkpointer/firestore_checkpointer.py +666 -666
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/GOV_RESEARCHER_PRD.md +192 -192
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/gemini_subsidy_graph.py +460 -460
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_2_graph.py +1002 -1002
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_graph.py +822 -822
- botrun_flow_lang/langgraph_agents/agents/langgraph_react_agent.py +730 -723
- botrun_flow_lang/langgraph_agents/agents/search_agent_graph.py +864 -864
- botrun_flow_lang/langgraph_agents/agents/tools/__init__.py +4 -4
- botrun_flow_lang/langgraph_agents/agents/tools/gemini_code_execution.py +376 -376
- botrun_flow_lang/langgraph_agents/agents/util/gemini_grounding.py +66 -66
- botrun_flow_lang/langgraph_agents/agents/util/html_util.py +316 -316
- botrun_flow_lang/langgraph_agents/agents/util/img_util.py +336 -294
- botrun_flow_lang/langgraph_agents/agents/util/local_files.py +419 -419
- botrun_flow_lang/langgraph_agents/agents/util/mermaid_util.py +86 -86
- botrun_flow_lang/langgraph_agents/agents/util/model_utils.py +143 -143
- botrun_flow_lang/langgraph_agents/agents/util/pdf_analyzer.py +562 -486
- botrun_flow_lang/langgraph_agents/agents/util/pdf_cache.py +250 -250
- botrun_flow_lang/langgraph_agents/agents/util/pdf_processor.py +204 -204
- botrun_flow_lang/langgraph_agents/agents/util/perplexity_search.py +464 -464
- botrun_flow_lang/langgraph_agents/agents/util/plotly_util.py +59 -59
- botrun_flow_lang/langgraph_agents/agents/util/tavily_search.py +199 -199
- botrun_flow_lang/langgraph_agents/agents/util/usage_metadata.py +34 -0
- botrun_flow_lang/langgraph_agents/agents/util/youtube_util.py +90 -90
- botrun_flow_lang/langgraph_agents/cache/langgraph_botrun_cache.py +197 -197
- botrun_flow_lang/llm_agent/llm_agent.py +19 -19
- botrun_flow_lang/llm_agent/llm_agent_util.py +83 -83
- botrun_flow_lang/log/.gitignore +2 -2
- botrun_flow_lang/main.py +61 -61
- botrun_flow_lang/main_fast.py +51 -51
- botrun_flow_lang/mcp_server/__init__.py +10 -10
- botrun_flow_lang/mcp_server/default_mcp.py +854 -744
- botrun_flow_lang/models/nodes/utils.py +205 -205
- botrun_flow_lang/models/token_usage.py +34 -34
- botrun_flow_lang/requirements.txt +21 -21
- botrun_flow_lang/services/base/firestore_base.py +30 -30
- botrun_flow_lang/services/hatch/hatch_factory.py +11 -11
- botrun_flow_lang/services/hatch/hatch_fs_store.py +419 -419
- botrun_flow_lang/services/storage/storage_cs_store.py +206 -206
- botrun_flow_lang/services/storage/storage_factory.py +12 -12
- botrun_flow_lang/services/storage/storage_store.py +65 -65
- botrun_flow_lang/services/user_setting/user_setting_factory.py +9 -9
- botrun_flow_lang/services/user_setting/user_setting_fs_store.py +66 -66
- botrun_flow_lang/static/docs/tools/index.html +926 -926
- botrun_flow_lang/tests/api_functional_tests.py +1525 -1525
- botrun_flow_lang/tests/api_stress_test.py +357 -357
- botrun_flow_lang/tests/shared_hatch_tests.py +333 -333
- botrun_flow_lang/tests/test_botrun_app.py +46 -46
- botrun_flow_lang/tests/test_html_util.py +31 -31
- botrun_flow_lang/tests/test_img_analyzer.py +190 -190
- botrun_flow_lang/tests/test_img_util.py +39 -39
- botrun_flow_lang/tests/test_local_files.py +114 -114
- botrun_flow_lang/tests/test_mermaid_util.py +103 -103
- botrun_flow_lang/tests/test_pdf_analyzer.py +104 -104
- botrun_flow_lang/tests/test_plotly_util.py +151 -151
- botrun_flow_lang/tests/test_run_workflow_engine.py +65 -65
- botrun_flow_lang/tools/generate_docs.py +133 -133
- botrun_flow_lang/tools/templates/tools.html +153 -153
- botrun_flow_lang/utils/__init__.py +7 -7
- botrun_flow_lang/utils/botrun_logger.py +344 -344
- botrun_flow_lang/utils/clients/rate_limit_client.py +209 -209
- botrun_flow_lang/utils/clients/token_verify_client.py +153 -153
- botrun_flow_lang/utils/google_drive_utils.py +654 -654
- botrun_flow_lang/utils/langchain_utils.py +324 -324
- botrun_flow_lang/utils/yaml_utils.py +9 -9
- {botrun_flow_lang-5.12.263.dist-info → botrun_flow_lang-6.2.21.dist-info}/METADATA +6 -6
- botrun_flow_lang-6.2.21.dist-info/RECORD +104 -0
- botrun_flow_lang-5.12.263.dist-info/RECORD +0 -102
- {botrun_flow_lang-5.12.263.dist-info → botrun_flow_lang-6.2.21.dist-info}/WHEEL +0 -0
|
@@ -1,290 +1,290 @@
|
|
|
1
|
-
from fastapi import APIRouter, HTTPException
|
|
2
|
-
from fastapi.responses import StreamingResponse
|
|
3
|
-
from typing import List, Dict, Union, AsyncIterator, Optional
|
|
4
|
-
import os
|
|
5
|
-
import uuid
|
|
6
|
-
|
|
7
|
-
from pydantic import BaseModel
|
|
8
|
-
import time
|
|
9
|
-
import json
|
|
10
|
-
from pathlib import Path
|
|
11
|
-
from botrun_flow_lang.api.line_bot_api import (
|
|
12
|
-
get_subsidy_api_system_prompt,
|
|
13
|
-
get_subsidy_bot_normal_chat_prompt,
|
|
14
|
-
get_subsidy_bot_related_prompt,
|
|
15
|
-
get_subsidy_bot_requirement_prompt,
|
|
16
|
-
)
|
|
17
|
-
from botrun_flow_lang.langgraph_agents.agents.util.perplexity_search import (
|
|
18
|
-
respond_with_perplexity_search,
|
|
19
|
-
)
|
|
20
|
-
from botrun_flow_lang.langgraph_agents.agents.agent_runner import agent_runner
|
|
21
|
-
from botrun_flow_lang.langgraph_agents.agents.search_agent_graph import (
|
|
22
|
-
SearchAgentGraph,
|
|
23
|
-
DEFAULT_SEARCH_CONFIG,
|
|
24
|
-
)
|
|
25
|
-
from fastapi import HTTPException, Depends
|
|
26
|
-
|
|
27
|
-
from dotenv import load_dotenv
|
|
28
|
-
|
|
29
|
-
from botrun_flow_lang.utils.langchain_utils import litellm_msgs_to_langchain_msgs
|
|
30
|
-
from botrun_flow_lang.api.auth_utils import verify_token
|
|
31
|
-
|
|
32
|
-
load_dotenv()
|
|
33
|
-
|
|
34
|
-
router = APIRouter(prefix="/subsidy")
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
# 自定義 Pydantic 模型替換 litellm 類型
|
|
38
|
-
class Delta(BaseModel):
|
|
39
|
-
"""Delta represents a change in the message content."""
|
|
40
|
-
|
|
41
|
-
content: Optional[str] = None
|
|
42
|
-
role: Optional[str] = None
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
class Message(BaseModel):
|
|
46
|
-
"""Message represents a chat message."""
|
|
47
|
-
|
|
48
|
-
content: str
|
|
49
|
-
role: str = "assistant"
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
class Choices(BaseModel):
|
|
53
|
-
"""Choices represents a set of alternatives in the API response."""
|
|
54
|
-
|
|
55
|
-
index: int
|
|
56
|
-
delta: Optional[Delta] = None
|
|
57
|
-
message: Optional[Message] = None
|
|
58
|
-
finish_reason: Optional[str] = None
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
# 讀取系統提示詞文件
|
|
62
|
-
# current_dir = Path(__file__).parent
|
|
63
|
-
# DEFAULT_SYSTEM_PROMPT = (current_dir / "subsidy_api_system_prompt.txt").read_text(
|
|
64
|
-
# encoding="utf-8"
|
|
65
|
-
# )
|
|
66
|
-
|
|
67
|
-
# 建立 subsidy_api 專用的 SearchAgentGraph 實例
|
|
68
|
-
subsidy_api_graph = SearchAgentGraph().graph
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
class SubsidyCompletionRequest(BaseModel):
|
|
72
|
-
messages: List[Dict]
|
|
73
|
-
stream: bool = False
|
|
74
|
-
system_prompt_roy: Optional[str] = None # 新增這行
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
class SubsidyCompletionResponse(BaseModel):
|
|
78
|
-
"""
|
|
79
|
-
Non-streaming response format for completion endpoint
|
|
80
|
-
"""
|
|
81
|
-
|
|
82
|
-
id: str
|
|
83
|
-
object: str = "chat.completion"
|
|
84
|
-
created: int
|
|
85
|
-
choices: List[Choices] = []
|
|
86
|
-
state: Dict = {}
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
class SubsidyCompletionStreamChunk(BaseModel):
|
|
90
|
-
"""
|
|
91
|
-
Streaming response chunk format for completion endpoint
|
|
92
|
-
"""
|
|
93
|
-
|
|
94
|
-
id: str
|
|
95
|
-
object: str = "chat.completion.chunk"
|
|
96
|
-
created: int
|
|
97
|
-
choices: List[Choices] = []
|
|
98
|
-
state: Dict = {}
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
def validate_messages(messages: List[Dict]) -> str:
|
|
102
|
-
"""
|
|
103
|
-
Validate messages and extract the last user message content
|
|
104
|
-
|
|
105
|
-
Args:
|
|
106
|
-
messages: List of message dictionaries
|
|
107
|
-
|
|
108
|
-
Returns:
|
|
109
|
-
The content of the last user message
|
|
110
|
-
|
|
111
|
-
Raises:
|
|
112
|
-
HTTPException: If the last message is not from user
|
|
113
|
-
"""
|
|
114
|
-
if not messages or messages[-1].get("role") != "user":
|
|
115
|
-
raise HTTPException(
|
|
116
|
-
status_code=400, detail="The last message must have role 'user'"
|
|
117
|
-
)
|
|
118
|
-
return messages[-1].get("content", "")
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
def get_subsidy_search_config(stream: bool = True) -> dict:
|
|
122
|
-
return {
|
|
123
|
-
**DEFAULT_SEARCH_CONFIG,
|
|
124
|
-
"requirement_prompt": get_subsidy_bot_requirement_prompt(),
|
|
125
|
-
"search_prompt": get_subsidy_api_system_prompt(),
|
|
126
|
-
"related_prompt": get_subsidy_bot_related_prompt(),
|
|
127
|
-
"normal_chat_prompt": get_subsidy_bot_normal_chat_prompt(),
|
|
128
|
-
"domain_filter": ["*.gov.tw", "-*.gov.cn"],
|
|
129
|
-
"user_prompt_prefix": "你是台灣人,你不可以講中國用語也不可以用簡體中文,禁止!",
|
|
130
|
-
"stream": stream,
|
|
131
|
-
}
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
async def process_stream_response(messages: List[Dict]) -> AsyncIterator[str]:
|
|
135
|
-
"""
|
|
136
|
-
Process streaming response from perplexity search
|
|
137
|
-
|
|
138
|
-
Args:
|
|
139
|
-
messages: List of message dictionaries
|
|
140
|
-
|
|
141
|
-
Yields:
|
|
142
|
-
SSE formatted string chunks
|
|
143
|
-
"""
|
|
144
|
-
input_content = validate_messages(messages)
|
|
145
|
-
messages_for_langchain = litellm_msgs_to_langchain_msgs(messages)
|
|
146
|
-
messages_for_llm = messages[:-1]
|
|
147
|
-
env_name = os.getenv("ENV_NAME")
|
|
148
|
-
thread_id = str(uuid.uuid4())
|
|
149
|
-
async for event in agent_runner(
|
|
150
|
-
thread_id,
|
|
151
|
-
{"messages": messages_for_langchain},
|
|
152
|
-
subsidy_api_graph,
|
|
153
|
-
extra_config=get_subsidy_search_config(),
|
|
154
|
-
):
|
|
155
|
-
chunk_content = event.chunk
|
|
156
|
-
choice = Choices(
|
|
157
|
-
index=0,
|
|
158
|
-
delta=Delta(content=chunk_content),
|
|
159
|
-
finish_reason=None,
|
|
160
|
-
)
|
|
161
|
-
id = f"{env_name}-{uuid.uuid4()}"
|
|
162
|
-
stream_chunk = SubsidyCompletionStreamChunk(
|
|
163
|
-
id=id,
|
|
164
|
-
created=int(time.time()),
|
|
165
|
-
choices=[choice],
|
|
166
|
-
)
|
|
167
|
-
yield f"data: {json.dumps(stream_chunk.model_dump(), ensure_ascii=False)}\n\n"
|
|
168
|
-
choice = Choices(
|
|
169
|
-
index=0,
|
|
170
|
-
delta=Delta(content=""),
|
|
171
|
-
finish_reason=None,
|
|
172
|
-
)
|
|
173
|
-
id = f"{env_name}-{uuid.uuid4()}"
|
|
174
|
-
state = subsidy_api_graph.get_state({"configurable": {"thread_id": thread_id}})
|
|
175
|
-
related_questions = state.values.get("related_questions", [])
|
|
176
|
-
stream_chunk = SubsidyCompletionStreamChunk(
|
|
177
|
-
id=id,
|
|
178
|
-
created=int(time.time()),
|
|
179
|
-
choices=[choice],
|
|
180
|
-
state={"related_questions": related_questions},
|
|
181
|
-
)
|
|
182
|
-
yield f"data: {json.dumps(stream_chunk.model_dump(), ensure_ascii=False)}\n\n"
|
|
183
|
-
|
|
184
|
-
yield "data: [DONE]\n\n"
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
async def process_non_stream_response(
|
|
188
|
-
messages: List[Dict],
|
|
189
|
-
) -> SubsidyCompletionResponse:
|
|
190
|
-
"""
|
|
191
|
-
Process non-streaming response from perplexity search
|
|
192
|
-
|
|
193
|
-
Args:
|
|
194
|
-
messages: List of message dictionaries
|
|
195
|
-
|
|
196
|
-
Returns:
|
|
197
|
-
SubsidyCompletionResponse with the complete response
|
|
198
|
-
"""
|
|
199
|
-
input_content = validate_messages(messages)
|
|
200
|
-
messages_for_langchain = litellm_msgs_to_langchain_msgs(messages)
|
|
201
|
-
messages_for_llm = messages[:-1]
|
|
202
|
-
|
|
203
|
-
full_content = ""
|
|
204
|
-
# async for event in respond_with_perplexity_search(
|
|
205
|
-
# input_content=input_content,
|
|
206
|
-
# user_prompt_prefix="",
|
|
207
|
-
# messages_for_llm=messages_for_llm,
|
|
208
|
-
# domain_filter=["*.gov.tw", "-*.gov.cn"],
|
|
209
|
-
# ):
|
|
210
|
-
thread_id = str(uuid.uuid4())
|
|
211
|
-
|
|
212
|
-
print(f"[subsidy_api: process_non_stream_response()] start")
|
|
213
|
-
t1 = time.time()
|
|
214
|
-
async for event in agent_runner(
|
|
215
|
-
thread_id,
|
|
216
|
-
{"messages": messages_for_langchain},
|
|
217
|
-
subsidy_api_graph,
|
|
218
|
-
extra_config=get_subsidy_search_config(stream=False),
|
|
219
|
-
):
|
|
220
|
-
full_content += event.chunk
|
|
221
|
-
print(f"[subsidy_api: process_non_stream_response()] end")
|
|
222
|
-
t2 = time.time()
|
|
223
|
-
print(f"[subsidy_api: process_non_stream_response()] took {t2-t1}")
|
|
224
|
-
|
|
225
|
-
choice = Choices(
|
|
226
|
-
index=0,
|
|
227
|
-
message=Message(content=full_content),
|
|
228
|
-
finish_reason="stop",
|
|
229
|
-
)
|
|
230
|
-
env_name = os.getenv("ENV_NAME")
|
|
231
|
-
id = f"{env_name}-{uuid.uuid4()}"
|
|
232
|
-
state = subsidy_api_graph.get_state({"configurable": {"thread_id": thread_id}})
|
|
233
|
-
related_questions = state.values.get("related_questions", [])
|
|
234
|
-
|
|
235
|
-
return SubsidyCompletionResponse(
|
|
236
|
-
id=id,
|
|
237
|
-
created=int(time.time()),
|
|
238
|
-
choices=[choice],
|
|
239
|
-
state={"related_questions": related_questions},
|
|
240
|
-
)
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
def process_messages(
|
|
244
|
-
messages: List[Dict], system_prompt_roy: Optional[str] = None
|
|
245
|
-
) -> List[Dict]:
|
|
246
|
-
# Remove any existing system messages
|
|
247
|
-
return [msg for msg in messages if msg.get("role") != "system"]
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
@router.post("/completion", dependencies=[Depends(verify_token)])
|
|
251
|
-
async def completion(
|
|
252
|
-
request: SubsidyCompletionRequest,
|
|
253
|
-
):
|
|
254
|
-
"""
|
|
255
|
-
Generates a text completion using perplexity search.
|
|
256
|
-
|
|
257
|
-
Args:
|
|
258
|
-
request: CompletionRequest containing messages and stream flag
|
|
259
|
-
|
|
260
|
-
Returns:
|
|
261
|
-
If stream is False, returns a CompletionResponse
|
|
262
|
-
If stream is True, returns a StreamingResponse with SSE format
|
|
263
|
-
"""
|
|
264
|
-
try:
|
|
265
|
-
processed_messages = process_messages(
|
|
266
|
-
request.messages, request.system_prompt_roy
|
|
267
|
-
)
|
|
268
|
-
# system_prompt = (
|
|
269
|
-
# request.system_prompt_roy
|
|
270
|
-
# if request.system_prompt_roy is not None
|
|
271
|
-
# else DEFAULT_SYSTEM_PROMPT
|
|
272
|
-
# )
|
|
273
|
-
# system_prompt = get_subsidy_api_system_prompt()
|
|
274
|
-
if request.stream:
|
|
275
|
-
return StreamingResponse(
|
|
276
|
-
process_stream_response(processed_messages),
|
|
277
|
-
media_type="text/event-stream",
|
|
278
|
-
headers={
|
|
279
|
-
"Cache-Control": "no-cache",
|
|
280
|
-
"Connection": "keep-alive",
|
|
281
|
-
},
|
|
282
|
-
)
|
|
283
|
-
else:
|
|
284
|
-
return await process_non_stream_response(processed_messages)
|
|
285
|
-
|
|
286
|
-
except Exception as e:
|
|
287
|
-
import traceback
|
|
288
|
-
|
|
289
|
-
traceback.print_exc()
|
|
290
|
-
raise HTTPException(status_code=500, detail=str(e))
|
|
1
|
+
from fastapi import APIRouter, HTTPException
|
|
2
|
+
from fastapi.responses import StreamingResponse
|
|
3
|
+
from typing import List, Dict, Union, AsyncIterator, Optional
|
|
4
|
+
import os
|
|
5
|
+
import uuid
|
|
6
|
+
|
|
7
|
+
from pydantic import BaseModel
|
|
8
|
+
import time
|
|
9
|
+
import json
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from botrun_flow_lang.api.line_bot_api import (
|
|
12
|
+
get_subsidy_api_system_prompt,
|
|
13
|
+
get_subsidy_bot_normal_chat_prompt,
|
|
14
|
+
get_subsidy_bot_related_prompt,
|
|
15
|
+
get_subsidy_bot_requirement_prompt,
|
|
16
|
+
)
|
|
17
|
+
from botrun_flow_lang.langgraph_agents.agents.util.perplexity_search import (
|
|
18
|
+
respond_with_perplexity_search,
|
|
19
|
+
)
|
|
20
|
+
from botrun_flow_lang.langgraph_agents.agents.agent_runner import agent_runner
|
|
21
|
+
from botrun_flow_lang.langgraph_agents.agents.search_agent_graph import (
|
|
22
|
+
SearchAgentGraph,
|
|
23
|
+
DEFAULT_SEARCH_CONFIG,
|
|
24
|
+
)
|
|
25
|
+
from fastapi import HTTPException, Depends
|
|
26
|
+
|
|
27
|
+
from dotenv import load_dotenv
|
|
28
|
+
|
|
29
|
+
from botrun_flow_lang.utils.langchain_utils import litellm_msgs_to_langchain_msgs
|
|
30
|
+
from botrun_flow_lang.api.auth_utils import verify_token
|
|
31
|
+
|
|
32
|
+
load_dotenv()
|
|
33
|
+
|
|
34
|
+
router = APIRouter(prefix="/subsidy")
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
# 自定義 Pydantic 模型替換 litellm 類型
|
|
38
|
+
class Delta(BaseModel):
|
|
39
|
+
"""Delta represents a change in the message content."""
|
|
40
|
+
|
|
41
|
+
content: Optional[str] = None
|
|
42
|
+
role: Optional[str] = None
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class Message(BaseModel):
|
|
46
|
+
"""Message represents a chat message."""
|
|
47
|
+
|
|
48
|
+
content: str
|
|
49
|
+
role: str = "assistant"
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class Choices(BaseModel):
|
|
53
|
+
"""Choices represents a set of alternatives in the API response."""
|
|
54
|
+
|
|
55
|
+
index: int
|
|
56
|
+
delta: Optional[Delta] = None
|
|
57
|
+
message: Optional[Message] = None
|
|
58
|
+
finish_reason: Optional[str] = None
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
# 讀取系統提示詞文件
|
|
62
|
+
# current_dir = Path(__file__).parent
|
|
63
|
+
# DEFAULT_SYSTEM_PROMPT = (current_dir / "subsidy_api_system_prompt.txt").read_text(
|
|
64
|
+
# encoding="utf-8"
|
|
65
|
+
# )
|
|
66
|
+
|
|
67
|
+
# 建立 subsidy_api 專用的 SearchAgentGraph 實例
|
|
68
|
+
subsidy_api_graph = SearchAgentGraph().graph
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class SubsidyCompletionRequest(BaseModel):
|
|
72
|
+
messages: List[Dict]
|
|
73
|
+
stream: bool = False
|
|
74
|
+
system_prompt_roy: Optional[str] = None # 新增這行
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
class SubsidyCompletionResponse(BaseModel):
|
|
78
|
+
"""
|
|
79
|
+
Non-streaming response format for completion endpoint
|
|
80
|
+
"""
|
|
81
|
+
|
|
82
|
+
id: str
|
|
83
|
+
object: str = "chat.completion"
|
|
84
|
+
created: int
|
|
85
|
+
choices: List[Choices] = []
|
|
86
|
+
state: Dict = {}
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
class SubsidyCompletionStreamChunk(BaseModel):
|
|
90
|
+
"""
|
|
91
|
+
Streaming response chunk format for completion endpoint
|
|
92
|
+
"""
|
|
93
|
+
|
|
94
|
+
id: str
|
|
95
|
+
object: str = "chat.completion.chunk"
|
|
96
|
+
created: int
|
|
97
|
+
choices: List[Choices] = []
|
|
98
|
+
state: Dict = {}
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def validate_messages(messages: List[Dict]) -> str:
|
|
102
|
+
"""
|
|
103
|
+
Validate messages and extract the last user message content
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
messages: List of message dictionaries
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
The content of the last user message
|
|
110
|
+
|
|
111
|
+
Raises:
|
|
112
|
+
HTTPException: If the last message is not from user
|
|
113
|
+
"""
|
|
114
|
+
if not messages or messages[-1].get("role") != "user":
|
|
115
|
+
raise HTTPException(
|
|
116
|
+
status_code=400, detail="The last message must have role 'user'"
|
|
117
|
+
)
|
|
118
|
+
return messages[-1].get("content", "")
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def get_subsidy_search_config(stream: bool = True) -> dict:
|
|
122
|
+
return {
|
|
123
|
+
**DEFAULT_SEARCH_CONFIG,
|
|
124
|
+
"requirement_prompt": get_subsidy_bot_requirement_prompt(),
|
|
125
|
+
"search_prompt": get_subsidy_api_system_prompt(),
|
|
126
|
+
"related_prompt": get_subsidy_bot_related_prompt(),
|
|
127
|
+
"normal_chat_prompt": get_subsidy_bot_normal_chat_prompt(),
|
|
128
|
+
"domain_filter": ["*.gov.tw", "-*.gov.cn"],
|
|
129
|
+
"user_prompt_prefix": "你是台灣人,你不可以講中國用語也不可以用簡體中文,禁止!",
|
|
130
|
+
"stream": stream,
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
async def process_stream_response(messages: List[Dict]) -> AsyncIterator[str]:
|
|
135
|
+
"""
|
|
136
|
+
Process streaming response from perplexity search
|
|
137
|
+
|
|
138
|
+
Args:
|
|
139
|
+
messages: List of message dictionaries
|
|
140
|
+
|
|
141
|
+
Yields:
|
|
142
|
+
SSE formatted string chunks
|
|
143
|
+
"""
|
|
144
|
+
input_content = validate_messages(messages)
|
|
145
|
+
messages_for_langchain = litellm_msgs_to_langchain_msgs(messages)
|
|
146
|
+
messages_for_llm = messages[:-1]
|
|
147
|
+
env_name = os.getenv("ENV_NAME")
|
|
148
|
+
thread_id = str(uuid.uuid4())
|
|
149
|
+
async for event in agent_runner(
|
|
150
|
+
thread_id,
|
|
151
|
+
{"messages": messages_for_langchain},
|
|
152
|
+
subsidy_api_graph,
|
|
153
|
+
extra_config=get_subsidy_search_config(),
|
|
154
|
+
):
|
|
155
|
+
chunk_content = event.chunk
|
|
156
|
+
choice = Choices(
|
|
157
|
+
index=0,
|
|
158
|
+
delta=Delta(content=chunk_content),
|
|
159
|
+
finish_reason=None,
|
|
160
|
+
)
|
|
161
|
+
id = f"{env_name}-{uuid.uuid4()}"
|
|
162
|
+
stream_chunk = SubsidyCompletionStreamChunk(
|
|
163
|
+
id=id,
|
|
164
|
+
created=int(time.time()),
|
|
165
|
+
choices=[choice],
|
|
166
|
+
)
|
|
167
|
+
yield f"data: {json.dumps(stream_chunk.model_dump(), ensure_ascii=False)}\n\n"
|
|
168
|
+
choice = Choices(
|
|
169
|
+
index=0,
|
|
170
|
+
delta=Delta(content=""),
|
|
171
|
+
finish_reason=None,
|
|
172
|
+
)
|
|
173
|
+
id = f"{env_name}-{uuid.uuid4()}"
|
|
174
|
+
state = subsidy_api_graph.get_state({"configurable": {"thread_id": thread_id}})
|
|
175
|
+
related_questions = state.values.get("related_questions", [])
|
|
176
|
+
stream_chunk = SubsidyCompletionStreamChunk(
|
|
177
|
+
id=id,
|
|
178
|
+
created=int(time.time()),
|
|
179
|
+
choices=[choice],
|
|
180
|
+
state={"related_questions": related_questions},
|
|
181
|
+
)
|
|
182
|
+
yield f"data: {json.dumps(stream_chunk.model_dump(), ensure_ascii=False)}\n\n"
|
|
183
|
+
|
|
184
|
+
yield "data: [DONE]\n\n"
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
async def process_non_stream_response(
|
|
188
|
+
messages: List[Dict],
|
|
189
|
+
) -> SubsidyCompletionResponse:
|
|
190
|
+
"""
|
|
191
|
+
Process non-streaming response from perplexity search
|
|
192
|
+
|
|
193
|
+
Args:
|
|
194
|
+
messages: List of message dictionaries
|
|
195
|
+
|
|
196
|
+
Returns:
|
|
197
|
+
SubsidyCompletionResponse with the complete response
|
|
198
|
+
"""
|
|
199
|
+
input_content = validate_messages(messages)
|
|
200
|
+
messages_for_langchain = litellm_msgs_to_langchain_msgs(messages)
|
|
201
|
+
messages_for_llm = messages[:-1]
|
|
202
|
+
|
|
203
|
+
full_content = ""
|
|
204
|
+
# async for event in respond_with_perplexity_search(
|
|
205
|
+
# input_content=input_content,
|
|
206
|
+
# user_prompt_prefix="",
|
|
207
|
+
# messages_for_llm=messages_for_llm,
|
|
208
|
+
# domain_filter=["*.gov.tw", "-*.gov.cn"],
|
|
209
|
+
# ):
|
|
210
|
+
thread_id = str(uuid.uuid4())
|
|
211
|
+
|
|
212
|
+
print(f"[subsidy_api: process_non_stream_response()] start")
|
|
213
|
+
t1 = time.time()
|
|
214
|
+
async for event in agent_runner(
|
|
215
|
+
thread_id,
|
|
216
|
+
{"messages": messages_for_langchain},
|
|
217
|
+
subsidy_api_graph,
|
|
218
|
+
extra_config=get_subsidy_search_config(stream=False),
|
|
219
|
+
):
|
|
220
|
+
full_content += event.chunk
|
|
221
|
+
print(f"[subsidy_api: process_non_stream_response()] end")
|
|
222
|
+
t2 = time.time()
|
|
223
|
+
print(f"[subsidy_api: process_non_stream_response()] took {t2-t1}")
|
|
224
|
+
|
|
225
|
+
choice = Choices(
|
|
226
|
+
index=0,
|
|
227
|
+
message=Message(content=full_content),
|
|
228
|
+
finish_reason="stop",
|
|
229
|
+
)
|
|
230
|
+
env_name = os.getenv("ENV_NAME")
|
|
231
|
+
id = f"{env_name}-{uuid.uuid4()}"
|
|
232
|
+
state = subsidy_api_graph.get_state({"configurable": {"thread_id": thread_id}})
|
|
233
|
+
related_questions = state.values.get("related_questions", [])
|
|
234
|
+
|
|
235
|
+
return SubsidyCompletionResponse(
|
|
236
|
+
id=id,
|
|
237
|
+
created=int(time.time()),
|
|
238
|
+
choices=[choice],
|
|
239
|
+
state={"related_questions": related_questions},
|
|
240
|
+
)
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
def process_messages(
|
|
244
|
+
messages: List[Dict], system_prompt_roy: Optional[str] = None
|
|
245
|
+
) -> List[Dict]:
|
|
246
|
+
# Remove any existing system messages
|
|
247
|
+
return [msg for msg in messages if msg.get("role") != "system"]
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
@router.post("/completion", dependencies=[Depends(verify_token)])
|
|
251
|
+
async def completion(
|
|
252
|
+
request: SubsidyCompletionRequest,
|
|
253
|
+
):
|
|
254
|
+
"""
|
|
255
|
+
Generates a text completion using perplexity search.
|
|
256
|
+
|
|
257
|
+
Args:
|
|
258
|
+
request: CompletionRequest containing messages and stream flag
|
|
259
|
+
|
|
260
|
+
Returns:
|
|
261
|
+
If stream is False, returns a CompletionResponse
|
|
262
|
+
If stream is True, returns a StreamingResponse with SSE format
|
|
263
|
+
"""
|
|
264
|
+
try:
|
|
265
|
+
processed_messages = process_messages(
|
|
266
|
+
request.messages, request.system_prompt_roy
|
|
267
|
+
)
|
|
268
|
+
# system_prompt = (
|
|
269
|
+
# request.system_prompt_roy
|
|
270
|
+
# if request.system_prompt_roy is not None
|
|
271
|
+
# else DEFAULT_SYSTEM_PROMPT
|
|
272
|
+
# )
|
|
273
|
+
# system_prompt = get_subsidy_api_system_prompt()
|
|
274
|
+
if request.stream:
|
|
275
|
+
return StreamingResponse(
|
|
276
|
+
process_stream_response(processed_messages),
|
|
277
|
+
media_type="text/event-stream",
|
|
278
|
+
headers={
|
|
279
|
+
"Cache-Control": "no-cache",
|
|
280
|
+
"Connection": "keep-alive",
|
|
281
|
+
},
|
|
282
|
+
)
|
|
283
|
+
else:
|
|
284
|
+
return await process_non_stream_response(processed_messages)
|
|
285
|
+
|
|
286
|
+
except Exception as e:
|
|
287
|
+
import traceback
|
|
288
|
+
|
|
289
|
+
traceback.print_exc()
|
|
290
|
+
raise HTTPException(status_code=500, detail=str(e))
|