botrun-flow-lang 5.12.263__py3-none-any.whl → 5.12.264__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- botrun_flow_lang/api/auth_api.py +39 -39
- botrun_flow_lang/api/auth_utils.py +183 -183
- botrun_flow_lang/api/botrun_back_api.py +65 -65
- botrun_flow_lang/api/flow_api.py +3 -3
- botrun_flow_lang/api/hatch_api.py +508 -508
- botrun_flow_lang/api/langgraph_api.py +811 -811
- botrun_flow_lang/api/line_bot_api.py +1484 -1484
- botrun_flow_lang/api/model_api.py +300 -300
- botrun_flow_lang/api/rate_limit_api.py +32 -32
- botrun_flow_lang/api/routes.py +79 -79
- botrun_flow_lang/api/search_api.py +53 -53
- botrun_flow_lang/api/storage_api.py +395 -395
- botrun_flow_lang/api/subsidy_api.py +290 -290
- botrun_flow_lang/api/subsidy_api_system_prompt.txt +109 -109
- botrun_flow_lang/api/user_setting_api.py +70 -70
- botrun_flow_lang/api/version_api.py +31 -31
- botrun_flow_lang/api/youtube_api.py +26 -26
- botrun_flow_lang/constants.py +13 -13
- botrun_flow_lang/langgraph_agents/agents/agent_runner.py +178 -178
- botrun_flow_lang/langgraph_agents/agents/agent_tools/step_planner.py +77 -77
- botrun_flow_lang/langgraph_agents/agents/checkpointer/firestore_checkpointer.py +666 -666
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/GOV_RESEARCHER_PRD.md +192 -192
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/gemini_subsidy_graph.py +460 -460
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_2_graph.py +1002 -1002
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_graph.py +822 -822
- botrun_flow_lang/langgraph_agents/agents/langgraph_react_agent.py +723 -723
- botrun_flow_lang/langgraph_agents/agents/search_agent_graph.py +864 -864
- botrun_flow_lang/langgraph_agents/agents/tools/__init__.py +4 -4
- botrun_flow_lang/langgraph_agents/agents/tools/gemini_code_execution.py +376 -376
- botrun_flow_lang/langgraph_agents/agents/util/gemini_grounding.py +66 -66
- botrun_flow_lang/langgraph_agents/agents/util/html_util.py +316 -316
- botrun_flow_lang/langgraph_agents/agents/util/img_util.py +294 -294
- botrun_flow_lang/langgraph_agents/agents/util/local_files.py +419 -419
- botrun_flow_lang/langgraph_agents/agents/util/mermaid_util.py +86 -86
- botrun_flow_lang/langgraph_agents/agents/util/model_utils.py +143 -143
- botrun_flow_lang/langgraph_agents/agents/util/pdf_analyzer.py +486 -486
- botrun_flow_lang/langgraph_agents/agents/util/pdf_cache.py +250 -250
- botrun_flow_lang/langgraph_agents/agents/util/pdf_processor.py +204 -204
- botrun_flow_lang/langgraph_agents/agents/util/perplexity_search.py +464 -464
- botrun_flow_lang/langgraph_agents/agents/util/plotly_util.py +59 -59
- botrun_flow_lang/langgraph_agents/agents/util/tavily_search.py +199 -199
- botrun_flow_lang/langgraph_agents/agents/util/youtube_util.py +90 -90
- botrun_flow_lang/langgraph_agents/cache/langgraph_botrun_cache.py +197 -197
- botrun_flow_lang/llm_agent/llm_agent.py +19 -19
- botrun_flow_lang/llm_agent/llm_agent_util.py +83 -83
- botrun_flow_lang/log/.gitignore +2 -2
- botrun_flow_lang/main.py +61 -61
- botrun_flow_lang/main_fast.py +51 -51
- botrun_flow_lang/mcp_server/__init__.py +10 -10
- botrun_flow_lang/mcp_server/default_mcp.py +744 -744
- botrun_flow_lang/models/nodes/utils.py +205 -205
- botrun_flow_lang/models/token_usage.py +34 -34
- botrun_flow_lang/requirements.txt +21 -21
- botrun_flow_lang/services/base/firestore_base.py +30 -30
- botrun_flow_lang/services/hatch/hatch_factory.py +11 -11
- botrun_flow_lang/services/hatch/hatch_fs_store.py +419 -419
- botrun_flow_lang/services/storage/storage_cs_store.py +206 -206
- botrun_flow_lang/services/storage/storage_factory.py +12 -12
- botrun_flow_lang/services/storage/storage_store.py +65 -65
- botrun_flow_lang/services/user_setting/user_setting_factory.py +9 -9
- botrun_flow_lang/services/user_setting/user_setting_fs_store.py +66 -66
- botrun_flow_lang/static/docs/tools/index.html +926 -926
- botrun_flow_lang/tests/api_functional_tests.py +1525 -1525
- botrun_flow_lang/tests/api_stress_test.py +357 -357
- botrun_flow_lang/tests/shared_hatch_tests.py +333 -333
- botrun_flow_lang/tests/test_botrun_app.py +46 -46
- botrun_flow_lang/tests/test_html_util.py +31 -31
- botrun_flow_lang/tests/test_img_analyzer.py +190 -190
- botrun_flow_lang/tests/test_img_util.py +39 -39
- botrun_flow_lang/tests/test_local_files.py +114 -114
- botrun_flow_lang/tests/test_mermaid_util.py +103 -103
- botrun_flow_lang/tests/test_pdf_analyzer.py +104 -104
- botrun_flow_lang/tests/test_plotly_util.py +151 -151
- botrun_flow_lang/tests/test_run_workflow_engine.py +65 -65
- botrun_flow_lang/tools/generate_docs.py +133 -133
- botrun_flow_lang/tools/templates/tools.html +153 -153
- botrun_flow_lang/utils/__init__.py +7 -7
- botrun_flow_lang/utils/botrun_logger.py +344 -344
- botrun_flow_lang/utils/clients/rate_limit_client.py +209 -209
- botrun_flow_lang/utils/clients/token_verify_client.py +153 -153
- botrun_flow_lang/utils/google_drive_utils.py +654 -654
- botrun_flow_lang/utils/langchain_utils.py +324 -324
- botrun_flow_lang/utils/yaml_utils.py +9 -9
- {botrun_flow_lang-5.12.263.dist-info → botrun_flow_lang-5.12.264.dist-info}/METADATA +1 -1
- botrun_flow_lang-5.12.264.dist-info/RECORD +102 -0
- botrun_flow_lang-5.12.263.dist-info/RECORD +0 -102
- {botrun_flow_lang-5.12.263.dist-info → botrun_flow_lang-5.12.264.dist-info}/WHEEL +0 -0
|
@@ -1,90 +1,90 @@
|
|
|
1
|
-
from urllib.parse import urlparse, parse_qs
|
|
2
|
-
from dotenv import load_dotenv
|
|
3
|
-
import re
|
|
4
|
-
import os
|
|
5
|
-
|
|
6
|
-
load_dotenv()
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
def get_video_id(url: str) -> str:
|
|
10
|
-
"""
|
|
11
|
-
Extract video ID from various YouTube URL formats.
|
|
12
|
-
Supports:
|
|
13
|
-
- Standard watch URLs (youtube.com/watch?v=...)
|
|
14
|
-
- Shortened URLs (youtu.be/...)
|
|
15
|
-
- Embed URLs (youtube.com/embed/...)
|
|
16
|
-
"""
|
|
17
|
-
# Try parsing as standard URL first
|
|
18
|
-
parsed_url = urlparse(url)
|
|
19
|
-
|
|
20
|
-
# Handle youtu.be URLs
|
|
21
|
-
if parsed_url.netloc == "youtu.be":
|
|
22
|
-
return parsed_url.path.lstrip("/")
|
|
23
|
-
|
|
24
|
-
# Handle standard youtube.com URLs
|
|
25
|
-
if parsed_url.netloc in ["www.youtube.com", "youtube.com"]:
|
|
26
|
-
if parsed_url.path == "/watch":
|
|
27
|
-
# Standard watch URL
|
|
28
|
-
return parse_qs(parsed_url.query).get("v", [""])[0]
|
|
29
|
-
elif "/embed/" in parsed_url.path:
|
|
30
|
-
# Embed URL
|
|
31
|
-
return parsed_url.path.split("/embed/")[-1]
|
|
32
|
-
|
|
33
|
-
# Try extracting video ID using regex as fallback
|
|
34
|
-
video_id_match = re.search(r"(?:v=|\/)([0-9A-Za-z_-]{11}).*", url)
|
|
35
|
-
if video_id_match:
|
|
36
|
-
return video_id_match.group(1)
|
|
37
|
-
|
|
38
|
-
raise ValueError(f"Could not extract video ID from URL: {url}")
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
def get_youtube_summary(url: str, prompt: str = None) -> str:
|
|
42
|
-
# print("[get_youtube_summary]url============>", url)
|
|
43
|
-
# print("[get_youtube_summary]prompt============>", prompt)
|
|
44
|
-
from google import genai
|
|
45
|
-
from google.genai.types import HttpOptions, Part
|
|
46
|
-
from google.oauth2 import service_account
|
|
47
|
-
from google.genai import types
|
|
48
|
-
|
|
49
|
-
if prompt is None:
|
|
50
|
-
prompt = "Write a short and engaging blog post based on this video."
|
|
51
|
-
|
|
52
|
-
try:
|
|
53
|
-
credentials = service_account.Credentials.from_service_account_file(
|
|
54
|
-
os.getenv("GOOGLE_APPLICATION_CREDENTIALS_FOR_FASTAPI"),
|
|
55
|
-
scopes=["https://www.googleapis.com/auth/cloud-platform"],
|
|
56
|
-
)
|
|
57
|
-
client = genai.Client(
|
|
58
|
-
# http_options=HttpOptions(api_version="v1"),
|
|
59
|
-
credentials=credentials,
|
|
60
|
-
project="scoop-386004",
|
|
61
|
-
location="us-central1",
|
|
62
|
-
)
|
|
63
|
-
model_id = "gemini-2.5-flash"
|
|
64
|
-
# model_id = "gemini-2.5-pro"
|
|
65
|
-
|
|
66
|
-
response = client.models.generate_content(
|
|
67
|
-
model=model_id,
|
|
68
|
-
contents=[
|
|
69
|
-
types.Content(
|
|
70
|
-
role="user",
|
|
71
|
-
parts=[
|
|
72
|
-
types.Part(
|
|
73
|
-
file_data=types.FileData(
|
|
74
|
-
mime_type="video/mp4", file_uri=url
|
|
75
|
-
)
|
|
76
|
-
),
|
|
77
|
-
types.Part(text=prompt),
|
|
78
|
-
],
|
|
79
|
-
)
|
|
80
|
-
],
|
|
81
|
-
)
|
|
82
|
-
|
|
83
|
-
# print(response.text)
|
|
84
|
-
return response.text
|
|
85
|
-
except Exception as e:
|
|
86
|
-
import traceback
|
|
87
|
-
|
|
88
|
-
traceback.print_exc()
|
|
89
|
-
print(f"Error getting YouTube summary: {e}")
|
|
90
|
-
return f"Error: Failed to get YouTube summary: {e}"
|
|
1
|
+
from urllib.parse import urlparse, parse_qs
|
|
2
|
+
from dotenv import load_dotenv
|
|
3
|
+
import re
|
|
4
|
+
import os
|
|
5
|
+
|
|
6
|
+
load_dotenv()
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def get_video_id(url: str) -> str:
|
|
10
|
+
"""
|
|
11
|
+
Extract video ID from various YouTube URL formats.
|
|
12
|
+
Supports:
|
|
13
|
+
- Standard watch URLs (youtube.com/watch?v=...)
|
|
14
|
+
- Shortened URLs (youtu.be/...)
|
|
15
|
+
- Embed URLs (youtube.com/embed/...)
|
|
16
|
+
"""
|
|
17
|
+
# Try parsing as standard URL first
|
|
18
|
+
parsed_url = urlparse(url)
|
|
19
|
+
|
|
20
|
+
# Handle youtu.be URLs
|
|
21
|
+
if parsed_url.netloc == "youtu.be":
|
|
22
|
+
return parsed_url.path.lstrip("/")
|
|
23
|
+
|
|
24
|
+
# Handle standard youtube.com URLs
|
|
25
|
+
if parsed_url.netloc in ["www.youtube.com", "youtube.com"]:
|
|
26
|
+
if parsed_url.path == "/watch":
|
|
27
|
+
# Standard watch URL
|
|
28
|
+
return parse_qs(parsed_url.query).get("v", [""])[0]
|
|
29
|
+
elif "/embed/" in parsed_url.path:
|
|
30
|
+
# Embed URL
|
|
31
|
+
return parsed_url.path.split("/embed/")[-1]
|
|
32
|
+
|
|
33
|
+
# Try extracting video ID using regex as fallback
|
|
34
|
+
video_id_match = re.search(r"(?:v=|\/)([0-9A-Za-z_-]{11}).*", url)
|
|
35
|
+
if video_id_match:
|
|
36
|
+
return video_id_match.group(1)
|
|
37
|
+
|
|
38
|
+
raise ValueError(f"Could not extract video ID from URL: {url}")
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def get_youtube_summary(url: str, prompt: str = None) -> str:
|
|
42
|
+
# print("[get_youtube_summary]url============>", url)
|
|
43
|
+
# print("[get_youtube_summary]prompt============>", prompt)
|
|
44
|
+
from google import genai
|
|
45
|
+
from google.genai.types import HttpOptions, Part
|
|
46
|
+
from google.oauth2 import service_account
|
|
47
|
+
from google.genai import types
|
|
48
|
+
|
|
49
|
+
if prompt is None:
|
|
50
|
+
prompt = "Write a short and engaging blog post based on this video."
|
|
51
|
+
|
|
52
|
+
try:
|
|
53
|
+
credentials = service_account.Credentials.from_service_account_file(
|
|
54
|
+
os.getenv("GOOGLE_APPLICATION_CREDENTIALS_FOR_FASTAPI"),
|
|
55
|
+
scopes=["https://www.googleapis.com/auth/cloud-platform"],
|
|
56
|
+
)
|
|
57
|
+
client = genai.Client(
|
|
58
|
+
# http_options=HttpOptions(api_version="v1"),
|
|
59
|
+
credentials=credentials,
|
|
60
|
+
project="scoop-386004",
|
|
61
|
+
location="us-central1",
|
|
62
|
+
)
|
|
63
|
+
model_id = "gemini-2.5-flash"
|
|
64
|
+
# model_id = "gemini-2.5-pro"
|
|
65
|
+
|
|
66
|
+
response = client.models.generate_content(
|
|
67
|
+
model=model_id,
|
|
68
|
+
contents=[
|
|
69
|
+
types.Content(
|
|
70
|
+
role="user",
|
|
71
|
+
parts=[
|
|
72
|
+
types.Part(
|
|
73
|
+
file_data=types.FileData(
|
|
74
|
+
mime_type="video/mp4", file_uri=url
|
|
75
|
+
)
|
|
76
|
+
),
|
|
77
|
+
types.Part(text=prompt),
|
|
78
|
+
],
|
|
79
|
+
)
|
|
80
|
+
],
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
# print(response.text)
|
|
84
|
+
return response.text
|
|
85
|
+
except Exception as e:
|
|
86
|
+
import traceback
|
|
87
|
+
|
|
88
|
+
traceback.print_exc()
|
|
89
|
+
print(f"Error getting YouTube summary: {e}")
|
|
90
|
+
return f"Error: Failed to get YouTube summary: {e}"
|
|
@@ -1,197 +1,197 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Botrun ID-based cache for LangGraph React Agent graphs.
|
|
3
|
-
Provides session isolation with parameter validation for graph caching.
|
|
4
|
-
"""
|
|
5
|
-
|
|
6
|
-
import hashlib
|
|
7
|
-
import json
|
|
8
|
-
import time
|
|
9
|
-
from typing import Dict, Any, Optional
|
|
10
|
-
from botrun_flow_lang.utils.botrun_logger import get_default_botrun_logger
|
|
11
|
-
|
|
12
|
-
logger = get_default_botrun_logger()
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
class LangGraphBotrunCache:
|
|
16
|
-
"""
|
|
17
|
-
Botrun ID-based cache for LangGraph React Agent graphs.
|
|
18
|
-
|
|
19
|
-
Cache structure:
|
|
20
|
-
{
|
|
21
|
-
"botrun_id_1": {
|
|
22
|
-
"graph": <graph_instance>,
|
|
23
|
-
"params_hash": "abc123...",
|
|
24
|
-
"created_at": <timestamp>
|
|
25
|
-
},
|
|
26
|
-
"botrun_id_2": {
|
|
27
|
-
"graph": <graph_instance>,
|
|
28
|
-
"params_hash": "def456...",
|
|
29
|
-
"created_at": <timestamp>
|
|
30
|
-
}
|
|
31
|
-
}
|
|
32
|
-
"""
|
|
33
|
-
|
|
34
|
-
def __init__(self):
|
|
35
|
-
self._cache: Dict[str, Dict[str, Any]] = {}
|
|
36
|
-
|
|
37
|
-
def get_params_hash(
|
|
38
|
-
self,
|
|
39
|
-
system_prompt: str,
|
|
40
|
-
botrun_flow_lang_url: str,
|
|
41
|
-
user_id: str,
|
|
42
|
-
model_name: str,
|
|
43
|
-
lang: str,
|
|
44
|
-
mcp_config: Optional[Dict[str, Any]] = None,
|
|
45
|
-
) -> str:
|
|
46
|
-
"""
|
|
47
|
-
Generate MD5 hash from parameters for cache validation.
|
|
48
|
-
|
|
49
|
-
Args:
|
|
50
|
-
system_prompt: The system prompt to use for the agent
|
|
51
|
-
botrun_flow_lang_url: URL for botrun flow lang
|
|
52
|
-
user_id: User ID
|
|
53
|
-
model_name: Model name to use
|
|
54
|
-
lang: Language code (e.g., "en", "zh-TW")
|
|
55
|
-
mcp_config: MCP servers configuration dict
|
|
56
|
-
|
|
57
|
-
Returns:
|
|
58
|
-
str: MD5 hash of parameters
|
|
59
|
-
"""
|
|
60
|
-
# Create a stable representation of all parameters
|
|
61
|
-
cache_data = {
|
|
62
|
-
"system_prompt": system_prompt,
|
|
63
|
-
"botrun_flow_lang_url": botrun_flow_lang_url,
|
|
64
|
-
"user_id": user_id,
|
|
65
|
-
"model_name": model_name,
|
|
66
|
-
"lang": lang,
|
|
67
|
-
"mcp_config": mcp_config or {},
|
|
68
|
-
}
|
|
69
|
-
|
|
70
|
-
# Convert to JSON string for consistent hashing
|
|
71
|
-
cache_str = json.dumps(cache_data, sort_keys=True)
|
|
72
|
-
|
|
73
|
-
# Generate MD5 hash
|
|
74
|
-
return hashlib.md5(cache_str.encode("utf-8")).hexdigest()
|
|
75
|
-
|
|
76
|
-
def get_cached_graph(self, botrun_id: str, params_hash: str) -> Optional[Any]:
|
|
77
|
-
"""
|
|
78
|
-
Get cached graph if botrun_id exists and params_hash matches.
|
|
79
|
-
|
|
80
|
-
Args:
|
|
81
|
-
botrun_id: The botrun ID to look up
|
|
82
|
-
params_hash: Expected parameter hash for validation
|
|
83
|
-
|
|
84
|
-
Returns:
|
|
85
|
-
Cached graph instance if found and valid, None otherwise
|
|
86
|
-
"""
|
|
87
|
-
if botrun_id not in self._cache:
|
|
88
|
-
logger.debug(f"Botrun ID {botrun_id} not found in cache")
|
|
89
|
-
return None
|
|
90
|
-
|
|
91
|
-
cache_entry = self._cache[botrun_id]
|
|
92
|
-
cached_hash = cache_entry.get("params_hash")
|
|
93
|
-
|
|
94
|
-
if cached_hash != params_hash:
|
|
95
|
-
logger.info(
|
|
96
|
-
f"Parameter hash mismatch for botrun_id {botrun_id}. "
|
|
97
|
-
f"Cached: {cached_hash[:8]}..., Current: {params_hash[:8]}... "
|
|
98
|
-
f"Cache will be invalidated."
|
|
99
|
-
)
|
|
100
|
-
# Clear cache for this botrun_id since parameters changed
|
|
101
|
-
self.clear_botrun_cache(botrun_id)
|
|
102
|
-
return None
|
|
103
|
-
|
|
104
|
-
logger.info(f"Cache hit for botrun_id {botrun_id}")
|
|
105
|
-
return cache_entry.get("graph")
|
|
106
|
-
|
|
107
|
-
def cache_graph(self, botrun_id: str, params_hash: str, graph: Any):
|
|
108
|
-
"""
|
|
109
|
-
Cache graph for specific botrun_id with parameter hash.
|
|
110
|
-
|
|
111
|
-
Args:
|
|
112
|
-
botrun_id: The botrun ID to cache for
|
|
113
|
-
params_hash: Parameter hash for validation
|
|
114
|
-
graph: Graph instance to cache
|
|
115
|
-
"""
|
|
116
|
-
self._cache[botrun_id] = {
|
|
117
|
-
"graph": graph,
|
|
118
|
-
"params_hash": params_hash,
|
|
119
|
-
"created_at": time.time(),
|
|
120
|
-
}
|
|
121
|
-
logger.info(
|
|
122
|
-
f"Graph cached for botrun_id {botrun_id} with hash {params_hash[:8]}..."
|
|
123
|
-
)
|
|
124
|
-
|
|
125
|
-
def clear_botrun_cache(self, botrun_id: str):
|
|
126
|
-
"""
|
|
127
|
-
Clear cache for specific botrun_id.
|
|
128
|
-
|
|
129
|
-
Args:
|
|
130
|
-
botrun_id: The botrun ID to clear cache for
|
|
131
|
-
"""
|
|
132
|
-
if botrun_id in self._cache:
|
|
133
|
-
del self._cache[botrun_id]
|
|
134
|
-
logger.info(f"Cache cleared for botrun_id {botrun_id}")
|
|
135
|
-
|
|
136
|
-
def cleanup_old_cache(self, max_age_hours: int = 24):
|
|
137
|
-
"""
|
|
138
|
-
Remove old cache entries to prevent memory buildup.
|
|
139
|
-
|
|
140
|
-
Args:
|
|
141
|
-
max_age_hours: Maximum age in hours before cache entry is removed
|
|
142
|
-
"""
|
|
143
|
-
current_time = time.time()
|
|
144
|
-
max_age_seconds = max_age_hours * 3600
|
|
145
|
-
botrun_ids_to_remove = []
|
|
146
|
-
|
|
147
|
-
for botrun_id, cache_entry in self._cache.items():
|
|
148
|
-
created_at = cache_entry.get("created_at", 0)
|
|
149
|
-
if current_time - created_at > max_age_seconds:
|
|
150
|
-
botrun_ids_to_remove.append(botrun_id)
|
|
151
|
-
|
|
152
|
-
for botrun_id in botrun_ids_to_remove:
|
|
153
|
-
del self._cache[botrun_id]
|
|
154
|
-
logger.info(f"Removed old cache entry for botrun_id {botrun_id}")
|
|
155
|
-
|
|
156
|
-
if botrun_ids_to_remove:
|
|
157
|
-
logger.info(f"Cleaned up {len(botrun_ids_to_remove)} old cache entries")
|
|
158
|
-
|
|
159
|
-
def get_cache_stats(self) -> Dict[str, Any]:
|
|
160
|
-
"""
|
|
161
|
-
Get cache statistics for monitoring.
|
|
162
|
-
|
|
163
|
-
Returns:
|
|
164
|
-
Dict with cache statistics
|
|
165
|
-
"""
|
|
166
|
-
current_time = time.time()
|
|
167
|
-
total_entries = len(self._cache)
|
|
168
|
-
|
|
169
|
-
if total_entries == 0:
|
|
170
|
-
return {
|
|
171
|
-
"total_entries": 0,
|
|
172
|
-
"oldest_entry_age_hours": 0,
|
|
173
|
-
"newest_entry_age_hours": 0,
|
|
174
|
-
"average_age_hours": 0,
|
|
175
|
-
}
|
|
176
|
-
|
|
177
|
-
ages = []
|
|
178
|
-
for cache_entry in self._cache.values():
|
|
179
|
-
created_at = cache_entry.get("created_at", current_time)
|
|
180
|
-
age_hours = (current_time - created_at) / 3600
|
|
181
|
-
ages.append(age_hours)
|
|
182
|
-
|
|
183
|
-
return {
|
|
184
|
-
"total_entries": total_entries,
|
|
185
|
-
"oldest_entry_age_hours": max(ages),
|
|
186
|
-
"newest_entry_age_hours": min(ages),
|
|
187
|
-
"average_age_hours": sum(ages) / len(ages),
|
|
188
|
-
}
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
# Global cache instance
|
|
192
|
-
_global_cache = LangGraphBotrunCache()
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
def get_botrun_cache() -> LangGraphBotrunCache:
|
|
196
|
-
"""Get the global botrun cache instance."""
|
|
197
|
-
return _global_cache
|
|
1
|
+
"""
|
|
2
|
+
Botrun ID-based cache for LangGraph React Agent graphs.
|
|
3
|
+
Provides session isolation with parameter validation for graph caching.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import hashlib
|
|
7
|
+
import json
|
|
8
|
+
import time
|
|
9
|
+
from typing import Dict, Any, Optional
|
|
10
|
+
from botrun_flow_lang.utils.botrun_logger import get_default_botrun_logger
|
|
11
|
+
|
|
12
|
+
logger = get_default_botrun_logger()
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class LangGraphBotrunCache:
|
|
16
|
+
"""
|
|
17
|
+
Botrun ID-based cache for LangGraph React Agent graphs.
|
|
18
|
+
|
|
19
|
+
Cache structure:
|
|
20
|
+
{
|
|
21
|
+
"botrun_id_1": {
|
|
22
|
+
"graph": <graph_instance>,
|
|
23
|
+
"params_hash": "abc123...",
|
|
24
|
+
"created_at": <timestamp>
|
|
25
|
+
},
|
|
26
|
+
"botrun_id_2": {
|
|
27
|
+
"graph": <graph_instance>,
|
|
28
|
+
"params_hash": "def456...",
|
|
29
|
+
"created_at": <timestamp>
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
def __init__(self):
|
|
35
|
+
self._cache: Dict[str, Dict[str, Any]] = {}
|
|
36
|
+
|
|
37
|
+
def get_params_hash(
|
|
38
|
+
self,
|
|
39
|
+
system_prompt: str,
|
|
40
|
+
botrun_flow_lang_url: str,
|
|
41
|
+
user_id: str,
|
|
42
|
+
model_name: str,
|
|
43
|
+
lang: str,
|
|
44
|
+
mcp_config: Optional[Dict[str, Any]] = None,
|
|
45
|
+
) -> str:
|
|
46
|
+
"""
|
|
47
|
+
Generate MD5 hash from parameters for cache validation.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
system_prompt: The system prompt to use for the agent
|
|
51
|
+
botrun_flow_lang_url: URL for botrun flow lang
|
|
52
|
+
user_id: User ID
|
|
53
|
+
model_name: Model name to use
|
|
54
|
+
lang: Language code (e.g., "en", "zh-TW")
|
|
55
|
+
mcp_config: MCP servers configuration dict
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
str: MD5 hash of parameters
|
|
59
|
+
"""
|
|
60
|
+
# Create a stable representation of all parameters
|
|
61
|
+
cache_data = {
|
|
62
|
+
"system_prompt": system_prompt,
|
|
63
|
+
"botrun_flow_lang_url": botrun_flow_lang_url,
|
|
64
|
+
"user_id": user_id,
|
|
65
|
+
"model_name": model_name,
|
|
66
|
+
"lang": lang,
|
|
67
|
+
"mcp_config": mcp_config or {},
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
# Convert to JSON string for consistent hashing
|
|
71
|
+
cache_str = json.dumps(cache_data, sort_keys=True)
|
|
72
|
+
|
|
73
|
+
# Generate MD5 hash
|
|
74
|
+
return hashlib.md5(cache_str.encode("utf-8")).hexdigest()
|
|
75
|
+
|
|
76
|
+
def get_cached_graph(self, botrun_id: str, params_hash: str) -> Optional[Any]:
|
|
77
|
+
"""
|
|
78
|
+
Get cached graph if botrun_id exists and params_hash matches.
|
|
79
|
+
|
|
80
|
+
Args:
|
|
81
|
+
botrun_id: The botrun ID to look up
|
|
82
|
+
params_hash: Expected parameter hash for validation
|
|
83
|
+
|
|
84
|
+
Returns:
|
|
85
|
+
Cached graph instance if found and valid, None otherwise
|
|
86
|
+
"""
|
|
87
|
+
if botrun_id not in self._cache:
|
|
88
|
+
logger.debug(f"Botrun ID {botrun_id} not found in cache")
|
|
89
|
+
return None
|
|
90
|
+
|
|
91
|
+
cache_entry = self._cache[botrun_id]
|
|
92
|
+
cached_hash = cache_entry.get("params_hash")
|
|
93
|
+
|
|
94
|
+
if cached_hash != params_hash:
|
|
95
|
+
logger.info(
|
|
96
|
+
f"Parameter hash mismatch for botrun_id {botrun_id}. "
|
|
97
|
+
f"Cached: {cached_hash[:8]}..., Current: {params_hash[:8]}... "
|
|
98
|
+
f"Cache will be invalidated."
|
|
99
|
+
)
|
|
100
|
+
# Clear cache for this botrun_id since parameters changed
|
|
101
|
+
self.clear_botrun_cache(botrun_id)
|
|
102
|
+
return None
|
|
103
|
+
|
|
104
|
+
logger.info(f"Cache hit for botrun_id {botrun_id}")
|
|
105
|
+
return cache_entry.get("graph")
|
|
106
|
+
|
|
107
|
+
def cache_graph(self, botrun_id: str, params_hash: str, graph: Any):
|
|
108
|
+
"""
|
|
109
|
+
Cache graph for specific botrun_id with parameter hash.
|
|
110
|
+
|
|
111
|
+
Args:
|
|
112
|
+
botrun_id: The botrun ID to cache for
|
|
113
|
+
params_hash: Parameter hash for validation
|
|
114
|
+
graph: Graph instance to cache
|
|
115
|
+
"""
|
|
116
|
+
self._cache[botrun_id] = {
|
|
117
|
+
"graph": graph,
|
|
118
|
+
"params_hash": params_hash,
|
|
119
|
+
"created_at": time.time(),
|
|
120
|
+
}
|
|
121
|
+
logger.info(
|
|
122
|
+
f"Graph cached for botrun_id {botrun_id} with hash {params_hash[:8]}..."
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
def clear_botrun_cache(self, botrun_id: str):
|
|
126
|
+
"""
|
|
127
|
+
Clear cache for specific botrun_id.
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
botrun_id: The botrun ID to clear cache for
|
|
131
|
+
"""
|
|
132
|
+
if botrun_id in self._cache:
|
|
133
|
+
del self._cache[botrun_id]
|
|
134
|
+
logger.info(f"Cache cleared for botrun_id {botrun_id}")
|
|
135
|
+
|
|
136
|
+
def cleanup_old_cache(self, max_age_hours: int = 24):
|
|
137
|
+
"""
|
|
138
|
+
Remove old cache entries to prevent memory buildup.
|
|
139
|
+
|
|
140
|
+
Args:
|
|
141
|
+
max_age_hours: Maximum age in hours before cache entry is removed
|
|
142
|
+
"""
|
|
143
|
+
current_time = time.time()
|
|
144
|
+
max_age_seconds = max_age_hours * 3600
|
|
145
|
+
botrun_ids_to_remove = []
|
|
146
|
+
|
|
147
|
+
for botrun_id, cache_entry in self._cache.items():
|
|
148
|
+
created_at = cache_entry.get("created_at", 0)
|
|
149
|
+
if current_time - created_at > max_age_seconds:
|
|
150
|
+
botrun_ids_to_remove.append(botrun_id)
|
|
151
|
+
|
|
152
|
+
for botrun_id in botrun_ids_to_remove:
|
|
153
|
+
del self._cache[botrun_id]
|
|
154
|
+
logger.info(f"Removed old cache entry for botrun_id {botrun_id}")
|
|
155
|
+
|
|
156
|
+
if botrun_ids_to_remove:
|
|
157
|
+
logger.info(f"Cleaned up {len(botrun_ids_to_remove)} old cache entries")
|
|
158
|
+
|
|
159
|
+
def get_cache_stats(self) -> Dict[str, Any]:
|
|
160
|
+
"""
|
|
161
|
+
Get cache statistics for monitoring.
|
|
162
|
+
|
|
163
|
+
Returns:
|
|
164
|
+
Dict with cache statistics
|
|
165
|
+
"""
|
|
166
|
+
current_time = time.time()
|
|
167
|
+
total_entries = len(self._cache)
|
|
168
|
+
|
|
169
|
+
if total_entries == 0:
|
|
170
|
+
return {
|
|
171
|
+
"total_entries": 0,
|
|
172
|
+
"oldest_entry_age_hours": 0,
|
|
173
|
+
"newest_entry_age_hours": 0,
|
|
174
|
+
"average_age_hours": 0,
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
ages = []
|
|
178
|
+
for cache_entry in self._cache.values():
|
|
179
|
+
created_at = cache_entry.get("created_at", current_time)
|
|
180
|
+
age_hours = (current_time - created_at) / 3600
|
|
181
|
+
ages.append(age_hours)
|
|
182
|
+
|
|
183
|
+
return {
|
|
184
|
+
"total_entries": total_entries,
|
|
185
|
+
"oldest_entry_age_hours": max(ages),
|
|
186
|
+
"newest_entry_age_hours": min(ages),
|
|
187
|
+
"average_age_hours": sum(ages) / len(ages),
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
# Global cache instance
|
|
192
|
+
_global_cache = LangGraphBotrunCache()
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
def get_botrun_cache() -> LangGraphBotrunCache:
|
|
196
|
+
"""Get the global botrun cache instance."""
|
|
197
|
+
return _global_cache
|
|
@@ -1,19 +1,19 @@
|
|
|
1
|
-
from pydantic import BaseModel
|
|
2
|
-
from typing import Optional
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
class LlmAgent(BaseModel):
|
|
6
|
-
"""
|
|
7
|
-
@include_in_history: 是否將這次的回答加入 history
|
|
8
|
-
@max_system_prompt_length: 如果 system prompt 超過這個長度,會整個被捨棄
|
|
9
|
-
"""
|
|
10
|
-
|
|
11
|
-
name: str = ""
|
|
12
|
-
print_output: bool = True
|
|
13
|
-
print_plotly: bool = True
|
|
14
|
-
output: Optional[str] = None
|
|
15
|
-
model: str = "openai/gpt-4o-2024-08-06"
|
|
16
|
-
system_prompt: str
|
|
17
|
-
gen_image: bool = False
|
|
18
|
-
include_in_history: bool = True
|
|
19
|
-
max_system_prompt_length: Optional[int] = None
|
|
1
|
+
from pydantic import BaseModel
|
|
2
|
+
from typing import Optional
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class LlmAgent(BaseModel):
|
|
6
|
+
"""
|
|
7
|
+
@include_in_history: 是否將這次的回答加入 history
|
|
8
|
+
@max_system_prompt_length: 如果 system prompt 超過這個長度,會整個被捨棄
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
name: str = ""
|
|
12
|
+
print_output: bool = True
|
|
13
|
+
print_plotly: bool = True
|
|
14
|
+
output: Optional[str] = None
|
|
15
|
+
model: str = "openai/gpt-4o-2024-08-06"
|
|
16
|
+
system_prompt: str
|
|
17
|
+
gen_image: bool = False
|
|
18
|
+
include_in_history: bool = True
|
|
19
|
+
max_system_prompt_length: Optional[int] = None
|