MemoryOS 0.2.1__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of MemoryOS might be problematic. Click here for more details.
- {memoryos-0.2.1.dist-info → memoryos-1.0.0.dist-info}/METADATA +7 -1
- {memoryos-0.2.1.dist-info → memoryos-1.0.0.dist-info}/RECORD +87 -64
- memos/__init__.py +1 -1
- memos/api/config.py +158 -69
- memos/api/context/context.py +147 -0
- memos/api/context/dependencies.py +101 -0
- memos/api/product_models.py +5 -1
- memos/api/routers/product_router.py +54 -26
- memos/configs/graph_db.py +49 -1
- memos/configs/internet_retriever.py +19 -0
- memos/configs/mem_os.py +5 -0
- memos/configs/mem_reader.py +9 -0
- memos/configs/mem_scheduler.py +54 -18
- memos/configs/mem_user.py +58 -0
- memos/graph_dbs/base.py +38 -3
- memos/graph_dbs/factory.py +2 -0
- memos/graph_dbs/nebular.py +1612 -0
- memos/graph_dbs/neo4j.py +18 -9
- memos/log.py +6 -1
- memos/mem_cube/utils.py +13 -6
- memos/mem_os/core.py +157 -37
- memos/mem_os/main.py +2 -2
- memos/mem_os/product.py +252 -201
- memos/mem_os/utils/default_config.py +1 -1
- memos/mem_os/utils/format_utils.py +281 -70
- memos/mem_os/utils/reference_utils.py +133 -0
- memos/mem_reader/simple_struct.py +13 -5
- memos/mem_scheduler/base_scheduler.py +239 -266
- memos/mem_scheduler/{modules → general_modules}/base.py +4 -5
- memos/mem_scheduler/{modules → general_modules}/dispatcher.py +57 -21
- memos/mem_scheduler/general_modules/misc.py +104 -0
- memos/mem_scheduler/{modules → general_modules}/rabbitmq_service.py +12 -10
- memos/mem_scheduler/{modules → general_modules}/redis_service.py +1 -1
- memos/mem_scheduler/general_modules/retriever.py +199 -0
- memos/mem_scheduler/general_modules/scheduler_logger.py +261 -0
- memos/mem_scheduler/general_scheduler.py +243 -80
- memos/mem_scheduler/monitors/__init__.py +0 -0
- memos/mem_scheduler/monitors/dispatcher_monitor.py +305 -0
- memos/mem_scheduler/{modules/monitor.py → monitors/general_monitor.py} +106 -57
- memos/mem_scheduler/mos_for_test_scheduler.py +23 -20
- memos/mem_scheduler/schemas/__init__.py +0 -0
- memos/mem_scheduler/schemas/general_schemas.py +44 -0
- memos/mem_scheduler/schemas/message_schemas.py +149 -0
- memos/mem_scheduler/schemas/monitor_schemas.py +337 -0
- memos/mem_scheduler/utils/__init__.py +0 -0
- memos/mem_scheduler/utils/filter_utils.py +176 -0
- memos/mem_scheduler/utils/misc_utils.py +102 -0
- memos/mem_user/factory.py +94 -0
- memos/mem_user/mysql_persistent_user_manager.py +271 -0
- memos/mem_user/mysql_user_manager.py +500 -0
- memos/mem_user/persistent_factory.py +96 -0
- memos/mem_user/user_manager.py +4 -4
- memos/memories/activation/item.py +5 -1
- memos/memories/activation/kv.py +20 -8
- memos/memories/textual/base.py +2 -2
- memos/memories/textual/general.py +36 -92
- memos/memories/textual/item.py +5 -33
- memos/memories/textual/tree.py +13 -7
- memos/memories/textual/tree_text_memory/organize/{conflict.py → handler.py} +34 -50
- memos/memories/textual/tree_text_memory/organize/manager.py +8 -96
- memos/memories/textual/tree_text_memory/organize/relation_reason_detector.py +49 -43
- memos/memories/textual/tree_text_memory/organize/reorganizer.py +107 -142
- memos/memories/textual/tree_text_memory/retrieve/bochasearch.py +229 -0
- memos/memories/textual/tree_text_memory/retrieve/internet_retriever.py +6 -3
- memos/memories/textual/tree_text_memory/retrieve/internet_retriever_factory.py +11 -0
- memos/memories/textual/tree_text_memory/retrieve/recall.py +15 -8
- memos/memories/textual/tree_text_memory/retrieve/reranker.py +1 -1
- memos/memories/textual/tree_text_memory/retrieve/retrieval_mid_structs.py +2 -0
- memos/memories/textual/tree_text_memory/retrieve/searcher.py +191 -116
- memos/memories/textual/tree_text_memory/retrieve/task_goal_parser.py +47 -15
- memos/memories/textual/tree_text_memory/retrieve/utils.py +11 -7
- memos/memories/textual/tree_text_memory/retrieve/xinyusearch.py +62 -58
- memos/memos_tools/dinding_report_bot.py +422 -0
- memos/memos_tools/lockfree_dict.py +120 -0
- memos/memos_tools/notification_service.py +44 -0
- memos/memos_tools/notification_utils.py +96 -0
- memos/memos_tools/thread_safe_dict.py +288 -0
- memos/settings.py +3 -1
- memos/templates/mem_reader_prompts.py +4 -1
- memos/templates/mem_scheduler_prompts.py +62 -15
- memos/templates/mos_prompts.py +116 -0
- memos/templates/tree_reorganize_prompts.py +24 -17
- memos/utils.py +19 -0
- memos/mem_scheduler/modules/misc.py +0 -39
- memos/mem_scheduler/modules/retriever.py +0 -268
- memos/mem_scheduler/modules/schemas.py +0 -328
- memos/mem_scheduler/utils.py +0 -75
- memos/memories/textual/tree_text_memory/organize/redundancy.py +0 -193
- {memoryos-0.2.1.dist-info → memoryos-1.0.0.dist-info}/LICENSE +0 -0
- {memoryos-0.2.1.dist-info → memoryos-1.0.0.dist-info}/WHEEL +0 -0
- {memoryos-0.2.1.dist-info → memoryos-1.0.0.dist-info}/entry_points.txt +0 -0
- /memos/mem_scheduler/{modules → general_modules}/__init__.py +0 -0
|
@@ -3,13 +3,15 @@
|
|
|
3
3
|
import json
|
|
4
4
|
import uuid
|
|
5
5
|
|
|
6
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
6
7
|
from datetime import datetime
|
|
7
8
|
|
|
8
9
|
import requests
|
|
9
10
|
|
|
10
11
|
from memos.embedders.factory import OllamaEmbedder
|
|
11
12
|
from memos.log import get_logger
|
|
12
|
-
from memos.
|
|
13
|
+
from memos.mem_reader.base import BaseMemReader
|
|
14
|
+
from memos.memories.textual.item import TextualMemoryItem
|
|
13
15
|
|
|
14
16
|
|
|
15
17
|
logger = get_logger(__name__)
|
|
@@ -93,8 +95,8 @@ class XinyuSearchAPI:
|
|
|
93
95
|
"online_search": {
|
|
94
96
|
"max_entries": max_results,
|
|
95
97
|
"cache_switch": False,
|
|
96
|
-
"baidu_field": {"switch":
|
|
97
|
-
"bing_field": {"switch":
|
|
98
|
+
"baidu_field": {"switch": False, "mode": "relevance", "type": "page"},
|
|
99
|
+
"bing_field": {"switch": True, "mode": "relevance", "type": "page"},
|
|
98
100
|
"sogou_field": {"switch": False, "mode": "relevance", "type": "page"},
|
|
99
101
|
},
|
|
100
102
|
"request_id": "memos" + str(uuid.uuid4()),
|
|
@@ -112,6 +114,7 @@ class XinyuSearchRetriever:
|
|
|
112
114
|
access_key: str,
|
|
113
115
|
search_engine_id: str,
|
|
114
116
|
embedder: OllamaEmbedder,
|
|
117
|
+
reader: BaseMemReader,
|
|
115
118
|
max_results: int = 20,
|
|
116
119
|
):
|
|
117
120
|
"""
|
|
@@ -121,12 +124,14 @@ class XinyuSearchRetriever:
|
|
|
121
124
|
access_key: Xinyu API access key
|
|
122
125
|
embedder: Embedder instance for generating embeddings
|
|
123
126
|
max_results: Maximum number of results to retrieve
|
|
127
|
+
reader: MemReader Moduel to deal with internet contents
|
|
124
128
|
"""
|
|
125
129
|
self.xinyu_api = XinyuSearchAPI(access_key, search_engine_id, max_results=max_results)
|
|
126
130
|
self.embedder = embedder
|
|
131
|
+
self.reader = reader
|
|
127
132
|
|
|
128
133
|
def retrieve_from_internet(
|
|
129
|
-
self, query: str, top_k: int = 10, parsed_goal=None
|
|
134
|
+
self, query: str, top_k: int = 10, parsed_goal=None, info=None
|
|
130
135
|
) -> list[TextualMemoryItem]:
|
|
131
136
|
"""
|
|
132
137
|
Retrieve information from Xinyu search and convert to TextualMemoryItem format
|
|
@@ -135,7 +140,7 @@ class XinyuSearchRetriever:
|
|
|
135
140
|
query: Search query
|
|
136
141
|
top_k: Number of results to return
|
|
137
142
|
parsed_goal: Parsed task goal (optional)
|
|
138
|
-
|
|
143
|
+
info (dict): Leave a record of memory consumption.
|
|
139
144
|
Returns:
|
|
140
145
|
List of TextualMemoryItem
|
|
141
146
|
"""
|
|
@@ -143,63 +148,25 @@ class XinyuSearchRetriever:
|
|
|
143
148
|
search_results = self.xinyu_api.search(query, max_results=top_k)
|
|
144
149
|
|
|
145
150
|
# Convert to TextualMemoryItem format
|
|
146
|
-
memory_items = []
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
publish_time = result.get("publish_time", "")
|
|
155
|
-
if publish_time:
|
|
151
|
+
memory_items: list[TextualMemoryItem] = []
|
|
152
|
+
|
|
153
|
+
with ThreadPoolExecutor(max_workers=8) as executor:
|
|
154
|
+
futures = [
|
|
155
|
+
executor.submit(self._process_result, result, query, parsed_goal, info)
|
|
156
|
+
for result in search_results
|
|
157
|
+
]
|
|
158
|
+
for future in as_completed(futures):
|
|
156
159
|
try:
|
|
157
|
-
|
|
158
|
-
"%Y-%m-%d"
|
|
159
|
-
)
|
|
160
|
+
memory_items.extend(future.result())
|
|
160
161
|
except Exception as e:
|
|
161
|
-
logger.error(f"
|
|
162
|
-
publish_time = datetime.now().strftime("%Y-%m-%d")
|
|
163
|
-
else:
|
|
164
|
-
publish_time = datetime.now().strftime("%Y-%m-%d")
|
|
165
|
-
source = result.get("source", "")
|
|
166
|
-
site = result.get("site", "")
|
|
167
|
-
if site:
|
|
168
|
-
site = site.split("|")[0]
|
|
169
|
-
|
|
170
|
-
# Combine memory content
|
|
171
|
-
memory_content = (
|
|
172
|
-
f"Title: {title}\nSummary: {summary}\nContent: {content[:200]}...\nSource: {url}"
|
|
173
|
-
)
|
|
162
|
+
logger.error(f"Error processing search result: {e}")
|
|
174
163
|
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
status="activated",
|
|
180
|
-
type="fact", # Search results are usually factual information
|
|
181
|
-
memory_time=publish_time,
|
|
182
|
-
source="web",
|
|
183
|
-
confidence=85.0, # Confidence level for search information
|
|
184
|
-
entities=self._extract_entities(title, content, summary),
|
|
185
|
-
tags=self._extract_tags(title, content, summary, parsed_goal),
|
|
186
|
-
visibility="public",
|
|
187
|
-
memory_type="LongTermMemory", # Search results as working memory
|
|
188
|
-
key=title,
|
|
189
|
-
sources=[url] if url else [],
|
|
190
|
-
embedding=self.embedder.embed([memory_content])[0],
|
|
191
|
-
created_at=datetime.now().isoformat(),
|
|
192
|
-
usage=[],
|
|
193
|
-
background=f"Xinyu search result from {site or source}",
|
|
194
|
-
)
|
|
195
|
-
# Create TextualMemoryItem
|
|
196
|
-
memory_item = TextualMemoryItem(
|
|
197
|
-
id=str(uuid.uuid4()), memory=memory_content, metadata=metadata
|
|
198
|
-
)
|
|
164
|
+
unique_memory_items = {}
|
|
165
|
+
for item in memory_items:
|
|
166
|
+
if item.memory not in unique_memory_items:
|
|
167
|
+
unique_memory_items[item.memory] = item
|
|
199
168
|
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
return memory_items
|
|
169
|
+
return list(unique_memory_items.values())
|
|
203
170
|
|
|
204
171
|
def _extract_entities(self, title: str, content: str, summary: str) -> list[str]:
|
|
205
172
|
"""
|
|
@@ -333,3 +300,40 @@ class XinyuSearchRetriever:
|
|
|
333
300
|
tags.extend(parsed_goal.tags)
|
|
334
301
|
|
|
335
302
|
return list(set(tags))[:15] # Limit to 15 tags
|
|
303
|
+
|
|
304
|
+
def _process_result(
|
|
305
|
+
self, result: dict, query: str, parsed_goal: str, info: None
|
|
306
|
+
) -> list[TextualMemoryItem]:
|
|
307
|
+
if not info:
|
|
308
|
+
info = {"user_id": "", "session_id": ""}
|
|
309
|
+
title = result.get("title", "")
|
|
310
|
+
content = result.get("content", "")
|
|
311
|
+
summary = result.get("summary", "")
|
|
312
|
+
url = result.get("url", "")
|
|
313
|
+
publish_time = result.get("publish_time", "")
|
|
314
|
+
if publish_time:
|
|
315
|
+
try:
|
|
316
|
+
publish_time = datetime.strptime(publish_time, "%Y-%m-%d %H:%M:%S").strftime(
|
|
317
|
+
"%Y-%m-%d"
|
|
318
|
+
)
|
|
319
|
+
except Exception as e:
|
|
320
|
+
logger.error(f"xinyu search error: {e}")
|
|
321
|
+
publish_time = datetime.now().strftime("%Y-%m-%d")
|
|
322
|
+
else:
|
|
323
|
+
publish_time = datetime.now().strftime("%Y-%m-%d")
|
|
324
|
+
|
|
325
|
+
read_items = self.reader.get_memory([content], type="doc", info=info)
|
|
326
|
+
|
|
327
|
+
memory_items = []
|
|
328
|
+
for read_item_i in read_items[0]:
|
|
329
|
+
read_item_i.memory = (
|
|
330
|
+
f"Title: {title}\nNewsTime: {publish_time}\nSummary: {summary}\n"
|
|
331
|
+
f"Content: {read_item_i.memory}"
|
|
332
|
+
)
|
|
333
|
+
read_item_i.metadata.source = "web"
|
|
334
|
+
read_item_i.metadata.memory_type = "OuterMemory"
|
|
335
|
+
read_item_i.metadata.sources = [url] if url else []
|
|
336
|
+
read_item_i.metadata.visibility = "public"
|
|
337
|
+
|
|
338
|
+
memory_items.append(read_item_i)
|
|
339
|
+
return memory_items
|
|
@@ -0,0 +1,422 @@
|
|
|
1
|
+
"""dinding_report_bot.py"""
|
|
2
|
+
|
|
3
|
+
import base64
|
|
4
|
+
import contextlib
|
|
5
|
+
import hashlib
|
|
6
|
+
import hmac
|
|
7
|
+
import json
|
|
8
|
+
import os
|
|
9
|
+
import time
|
|
10
|
+
import urllib.parse
|
|
11
|
+
|
|
12
|
+
from datetime import datetime
|
|
13
|
+
from uuid import uuid4
|
|
14
|
+
|
|
15
|
+
from dotenv import load_dotenv
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
load_dotenv()
|
|
19
|
+
|
|
20
|
+
try:
|
|
21
|
+
import io
|
|
22
|
+
|
|
23
|
+
import matplotlib
|
|
24
|
+
import matplotlib.font_manager as fm
|
|
25
|
+
import numpy as np
|
|
26
|
+
import oss2
|
|
27
|
+
import requests
|
|
28
|
+
|
|
29
|
+
from PIL import Image, ImageDraw, ImageFont
|
|
30
|
+
|
|
31
|
+
matplotlib.use("Agg")
|
|
32
|
+
from alibabacloud_dingtalk.robot_1_0 import models as robot_models
|
|
33
|
+
from alibabacloud_dingtalk.robot_1_0.client import Client as DingtalkRobotClient
|
|
34
|
+
from alibabacloud_tea_openapi import models as open_api_models
|
|
35
|
+
from alibabacloud_tea_util import models as util_models
|
|
36
|
+
except ImportError as e:
|
|
37
|
+
raise ImportError(
|
|
38
|
+
f"DingDing bot dependencies not found: {e}. "
|
|
39
|
+
"Please install required packages: pip install requests oss2 pillow matplotlib alibabacloud-dingtalk"
|
|
40
|
+
) from e
|
|
41
|
+
|
|
42
|
+
# =========================
|
|
43
|
+
# 🔧 common tools
|
|
44
|
+
# =========================
|
|
45
|
+
ACCESS_TOKEN_USER = os.getenv("DINGDING_ACCESS_TOKEN_USER")
|
|
46
|
+
SECRET_USER = os.getenv("DINGDING_SECRET_USER")
|
|
47
|
+
ACCESS_TOKEN_ERROR = os.getenv("DINGDING_ACCESS_TOKEN_ERROR")
|
|
48
|
+
SECRET_ERROR = os.getenv("DINGDING_SECRET_ERROR")
|
|
49
|
+
OSS_CONFIG = {
|
|
50
|
+
"endpoint": os.getenv("OSS_ENDPOINT"),
|
|
51
|
+
"region": os.getenv("OSS_REGION"),
|
|
52
|
+
"bucket_name": os.getenv("OSS_BUCKET_NAME"),
|
|
53
|
+
"oss_access_key_id": os.getenv("OSS_ACCESS_KEY_ID"),
|
|
54
|
+
"oss_access_key_secret": os.getenv("OSS_ACCESS_KEY_SECRET"),
|
|
55
|
+
"public_base_url": os.getenv("OSS_PUBLIC_BASE_URL"),
|
|
56
|
+
}
|
|
57
|
+
ROBOT_CODE = os.getenv("DINGDING_ROBOT_CODE")
|
|
58
|
+
DING_APP_KEY = os.getenv("DINGDING_APP_KEY")
|
|
59
|
+
DING_APP_SECRET = os.getenv("DINGDING_APP_SECRET")
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
# Get access_token
|
|
63
|
+
def get_access_token():
|
|
64
|
+
url = f"https://oapi.dingtalk.com/gettoken?appkey={DING_APP_KEY}&appsecret={DING_APP_SECRET}"
|
|
65
|
+
resp = requests.get(url)
|
|
66
|
+
return resp.json()["access_token"]
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def _pick_font(size: int = 48) -> ImageFont.ImageFont:
|
|
70
|
+
"""
|
|
71
|
+
Try to find a font from the following candidates (macOS / Windows / Linux are common):
|
|
72
|
+
Helvetica → Arial → DejaVu Sans
|
|
73
|
+
If found, use truetype, otherwise return the default bitmap font.
|
|
74
|
+
"""
|
|
75
|
+
candidates = ["Helvetica", "Arial", "DejaVu Sans"]
|
|
76
|
+
for name in candidates:
|
|
77
|
+
try:
|
|
78
|
+
font_path = fm.findfont(name, fallback_to_default=False)
|
|
79
|
+
return ImageFont.truetype(font_path, size)
|
|
80
|
+
except Exception:
|
|
81
|
+
continue
|
|
82
|
+
# Cannot find truetype, fallback to default and manually scale up
|
|
83
|
+
bitmap = ImageFont.load_default()
|
|
84
|
+
return ImageFont.FreeTypeFont(bitmap.path, size) if hasattr(bitmap, "path") else bitmap
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def make_header(
|
|
88
|
+
title: str,
|
|
89
|
+
subtitle: str,
|
|
90
|
+
size=(1080, 260),
|
|
91
|
+
colors=("#C8F6E1", "#E8F8F5"), # Stylish mint green → lighter green
|
|
92
|
+
fg="#00956D",
|
|
93
|
+
) -> bytes:
|
|
94
|
+
"""
|
|
95
|
+
Generate a "Notification" banner with green gradient and bold large text.
|
|
96
|
+
title: main title (suggested ≤ 35 characters)
|
|
97
|
+
subtitle: sub title (e.g. "Notification")
|
|
98
|
+
"""
|
|
99
|
+
|
|
100
|
+
# Can be placed inside or outside make_header
|
|
101
|
+
def _text_wh(draw: ImageDraw.ImageDraw, text: str, font: ImageFont.ImageFont):
|
|
102
|
+
"""
|
|
103
|
+
return (width, height), compatible with both Pillow old version (textsize) and new version (textbbox)
|
|
104
|
+
"""
|
|
105
|
+
if hasattr(draw, "textbbox"): # Pillow ≥ 8.0
|
|
106
|
+
left, top, right, bottom = draw.textbbox((0, 0), text, font=font)
|
|
107
|
+
return right - left, bottom - top
|
|
108
|
+
else: # Pillow < 10.0
|
|
109
|
+
return draw.textsize(text, font=font)
|
|
110
|
+
|
|
111
|
+
w, h = size
|
|
112
|
+
# --- 1) background gradient ---
|
|
113
|
+
g = np.linspace(0, 1, w)
|
|
114
|
+
grad = np.outer(np.ones(h), g)
|
|
115
|
+
rgb0 = tuple(int(colors[0].lstrip("#")[i : i + 2], 16) for i in (0, 2, 4))
|
|
116
|
+
rgb1 = tuple(int(colors[1].lstrip("#")[i : i + 2], 16) for i in (0, 2, 4))
|
|
117
|
+
img = np.zeros((h, w, 3), dtype=np.uint8)
|
|
118
|
+
for i in range(3):
|
|
119
|
+
img[:, :, i] = rgb0[i] * (1 - grad) + rgb1[i] * grad
|
|
120
|
+
im = Image.fromarray(img)
|
|
121
|
+
|
|
122
|
+
# --- 2) text ---
|
|
123
|
+
draw = ImageDraw.Draw(im)
|
|
124
|
+
font_title = _pick_font(54) # main title
|
|
125
|
+
font_sub = _pick_font(30) # sub title
|
|
126
|
+
|
|
127
|
+
# center alignment
|
|
128
|
+
title_w, title_h = _text_wh(draw, title, font_title)
|
|
129
|
+
sub_w, sub_h = _text_wh(draw, subtitle, font_sub)
|
|
130
|
+
|
|
131
|
+
title_x = (w - title_w) // 2
|
|
132
|
+
title_y = h // 2 - title_h
|
|
133
|
+
sub_x = (w - sub_w) // 2
|
|
134
|
+
sub_y = title_y + title_h + 8
|
|
135
|
+
|
|
136
|
+
draw.text((title_x, title_y), title, fill=fg, font=font_title)
|
|
137
|
+
draw.text((sub_x, sub_y), subtitle, fill=fg, font=font_sub)
|
|
138
|
+
|
|
139
|
+
# --- 3) PNG bytes ---
|
|
140
|
+
buf = io.BytesIO()
|
|
141
|
+
im.save(buf, "PNG")
|
|
142
|
+
return buf.getvalue()
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def _sign(secret: str, ts: str):
|
|
146
|
+
s = f"{ts}\n{secret}"
|
|
147
|
+
return urllib.parse.quote_plus(
|
|
148
|
+
base64.b64encode(hmac.new(secret.encode(), s.encode(), hashlib.sha256).digest())
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
def _send_md(title: str, md: str, type="user", at=None):
|
|
153
|
+
if type == "user":
|
|
154
|
+
access_token = ACCESS_TOKEN_USER
|
|
155
|
+
secret = SECRET_USER
|
|
156
|
+
else:
|
|
157
|
+
access_token = ACCESS_TOKEN_ERROR
|
|
158
|
+
secret = SECRET_ERROR
|
|
159
|
+
ts = str(round(time.time() * 1000))
|
|
160
|
+
url = (
|
|
161
|
+
f"https://oapi.dingtalk.com/robot/send?access_token={access_token}"
|
|
162
|
+
f"×tamp={ts}&sign={_sign(secret, ts)}"
|
|
163
|
+
)
|
|
164
|
+
payload = {
|
|
165
|
+
"msgtype": "markdown",
|
|
166
|
+
"markdown": {"title": title, "text": md},
|
|
167
|
+
"at": at or {"atUserIds": [], "isAtAll": False},
|
|
168
|
+
}
|
|
169
|
+
requests.post(url, headers={"Content-Type": "application/json"}, data=json.dumps(payload))
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
# ------------------------- OSS -------------------------
|
|
173
|
+
def upload_bytes_to_oss(
|
|
174
|
+
data: bytes,
|
|
175
|
+
oss_dir: str = "xcy-share/jfzt/",
|
|
176
|
+
filename: str | None = None,
|
|
177
|
+
keep_latest: int = 1, # Keep latest N files; 0 = delete all
|
|
178
|
+
) -> str:
|
|
179
|
+
"""
|
|
180
|
+
- If filename_prefix is provided, delete the older files in {oss_dir}/{prefix}_*.png, only keep the latest keep_latest files
|
|
181
|
+
- Always create <prefix>_<timestamp>_<uuid>.png → ensure the URL is unique
|
|
182
|
+
"""
|
|
183
|
+
filename_prefix = filename
|
|
184
|
+
|
|
185
|
+
conf = OSS_CONFIG
|
|
186
|
+
auth = oss2.Auth(conf["oss_access_key_id"], conf["oss_access_key_secret"])
|
|
187
|
+
bucket = oss2.Bucket(auth, conf["endpoint"], conf["bucket_name"])
|
|
188
|
+
|
|
189
|
+
# ---------- delete old files ----------
|
|
190
|
+
if filename_prefix and keep_latest >= 0:
|
|
191
|
+
prefix_path = f"{oss_dir.rstrip('/')}/{filename_prefix}_"
|
|
192
|
+
objs = bucket.list_objects(prefix=prefix_path).object_list
|
|
193
|
+
old_files = [(o.key, o.last_modified) for o in objs if o.key.endswith(".png")]
|
|
194
|
+
if old_files and len(old_files) > keep_latest:
|
|
195
|
+
# sort by last_modified from new to old
|
|
196
|
+
old_files.sort(key=lambda x: x[1], reverse=True)
|
|
197
|
+
to_del = [k for k, _ in old_files[keep_latest:]]
|
|
198
|
+
for k in to_del:
|
|
199
|
+
with contextlib.suppress(Exception):
|
|
200
|
+
bucket.delete_object(k)
|
|
201
|
+
|
|
202
|
+
# ---------- upload new file ----------
|
|
203
|
+
ts = int(time.time())
|
|
204
|
+
uniq = uuid4().hex
|
|
205
|
+
prefix = f"{filename_prefix}_" if filename_prefix else ""
|
|
206
|
+
object_name = f"{oss_dir.rstrip('/')}/{prefix}{ts}_{uniq}.png"
|
|
207
|
+
bucket.put_object(object_name, data)
|
|
208
|
+
|
|
209
|
+
return f"{conf['public_base_url'].rstrip('/')}/{object_name}"
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
# --------- Markdown Table Helper ---------
|
|
213
|
+
def _md_table(data: dict, is_error: bool = False) -> str:
|
|
214
|
+
"""
|
|
215
|
+
Render a dict to a DingTalk-compatible Markdown table
|
|
216
|
+
- Normal statistics: single row, multiple columns
|
|
217
|
+
- Error distribution: two columns, multiple rows (error information/occurrence count)
|
|
218
|
+
"""
|
|
219
|
+
if is_error: # {"error_info":{idx:val}, "occurrence_count":{idx:val}}
|
|
220
|
+
header = "| error | count |\n|---|---|"
|
|
221
|
+
rows = "\n".join(
|
|
222
|
+
f"| {err} | {cnt} |"
|
|
223
|
+
for err, cnt in zip(data["error"].values(), data["count"].values(), strict=False)
|
|
224
|
+
)
|
|
225
|
+
return f"{header}\n{rows}"
|
|
226
|
+
|
|
227
|
+
# normal statistics
|
|
228
|
+
header = "| " + " | ".join(data.keys()) + " |\n|" + "|".join(["---"] * len(data)) + "|"
|
|
229
|
+
row = "| " + " | ".join(map(str, data.values())) + " |"
|
|
230
|
+
return f"{header}\n{row}"
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
def upload_to_oss(
|
|
234
|
+
local_path: str,
|
|
235
|
+
oss_dir: str = "xcy-share/jfzt/",
|
|
236
|
+
filename: str | None = None, # ← Same addition
|
|
237
|
+
) -> str:
|
|
238
|
+
"""Upload a local file to OSS, support overwrite"""
|
|
239
|
+
with open(local_path, "rb") as f:
|
|
240
|
+
return upload_bytes_to_oss(f.read(), oss_dir=oss_dir, filename=filename)
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
def send_ding_reminder(
|
|
244
|
+
access_token: str, robot_code: str, user_ids: list[str], content: str, remind_type: int = 0
|
|
245
|
+
):
|
|
246
|
+
"""
|
|
247
|
+
:param access_token: DingTalk access_token (usually permanent when using a robot)
|
|
248
|
+
:param robot_code: Robot code applied on the open platform
|
|
249
|
+
:param user_ids: DingTalk user_id list
|
|
250
|
+
:param content: Message content to send
|
|
251
|
+
:param remind_type: 1=in-app notification, 2=phone reminder, 3=SMS reminder
|
|
252
|
+
"""
|
|
253
|
+
# initialize client
|
|
254
|
+
config = open_api_models.Config(protocol="https", region_id="central")
|
|
255
|
+
client = DingtalkRobotClient(config)
|
|
256
|
+
|
|
257
|
+
# request headers
|
|
258
|
+
headers = robot_models.RobotSendDingHeaders(x_acs_dingtalk_access_token=access_token)
|
|
259
|
+
|
|
260
|
+
# request body
|
|
261
|
+
req = robot_models.RobotSendDingRequest(
|
|
262
|
+
robot_code=robot_code,
|
|
263
|
+
remind_type=remind_type,
|
|
264
|
+
receiver_user_id_list=user_ids,
|
|
265
|
+
content=content,
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
# send
|
|
269
|
+
try:
|
|
270
|
+
client.robot_send_ding_with_options(req, headers, util_models.RuntimeOptions())
|
|
271
|
+
print("✅ DING message sent successfully")
|
|
272
|
+
except Exception as e:
|
|
273
|
+
print("❌ DING message sent failed:", e)
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
def error_bot(
|
|
277
|
+
err: str,
|
|
278
|
+
title: str = "Error Alert",
|
|
279
|
+
level: str = "P2", # ← Add alert level
|
|
280
|
+
user_ids: list[str] | None = None, # ← @users in group
|
|
281
|
+
):
|
|
282
|
+
"""
|
|
283
|
+
send error alert
|
|
284
|
+
level can be set to P0 / P1 / P2, corresponding to red / orange / yellow
|
|
285
|
+
if title_color is provided, it will be overridden by level
|
|
286
|
+
"""
|
|
287
|
+
# ---------- Level → Color scheme & Emoji ----------
|
|
288
|
+
level_map = {
|
|
289
|
+
"P0": {"color": "#C62828", "grad": ("#FFE4E4", "#FFD3D3"), "emoji": "🔴"},
|
|
290
|
+
"P1": {"color": "#E65100", "grad": ("#FFE9D6", "#FFD7B5"), "emoji": "🟠"},
|
|
291
|
+
"P2": {"color": "#EF6C00", "grad": ("#FFF6D8", "#FFECB5"), "emoji": "🟡"},
|
|
292
|
+
}
|
|
293
|
+
lv = level.upper()
|
|
294
|
+
if lv not in level_map:
|
|
295
|
+
lv = "P0" # Default to P0 if invalid
|
|
296
|
+
style = level_map[lv]
|
|
297
|
+
|
|
298
|
+
# If external title_color is specified, override with level color scheme
|
|
299
|
+
title_color = style["color"]
|
|
300
|
+
|
|
301
|
+
# ---------- Generate gradient banner ----------
|
|
302
|
+
banner_bytes = make_header(
|
|
303
|
+
title=f"Level {lv}", # Fixed English
|
|
304
|
+
subtitle="Error Alert", # Display level
|
|
305
|
+
colors=style["grad"],
|
|
306
|
+
fg=style["color"],
|
|
307
|
+
)
|
|
308
|
+
banner_url = upload_bytes_to_oss(
|
|
309
|
+
banner_bytes,
|
|
310
|
+
filename=f"error_banner_{title}_{lv.lower()}.png", # Overwrite fixed file for each level
|
|
311
|
+
)
|
|
312
|
+
|
|
313
|
+
# ---------- Markdown ----------
|
|
314
|
+
colored_title = f"<font color='{title_color}' size='4'><b>{title}</b></font>"
|
|
315
|
+
at_suffix = ""
|
|
316
|
+
if user_ids:
|
|
317
|
+
at_suffix = "\n\n" + " ".join([f"@{m}" for m in user_ids])
|
|
318
|
+
|
|
319
|
+
md = (
|
|
320
|
+
f"\n\n"
|
|
321
|
+
f"### {style['emoji']} <font color='{style['color']}' size='4'><b>{colored_title}</b></font>\n\n"
|
|
322
|
+
f"**Detail:**\n```\n{err}\n```\n"
|
|
323
|
+
# Visual indicator, pure color, no notification trigger
|
|
324
|
+
f"### 🔵 <font color='#1565C0' size='4'><b>Attention:{at_suffix}</b></font>\n\n"
|
|
325
|
+
f"<font color='#9E9E9E' size='1'>Time: "
|
|
326
|
+
f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}</font>\n"
|
|
327
|
+
)
|
|
328
|
+
|
|
329
|
+
# ---------- Send Markdown in group and @users ----------
|
|
330
|
+
at_config = {"atUserIds": user_ids or [], "isAtAll": False}
|
|
331
|
+
_send_md(title, md, type="error", at=at_config)
|
|
332
|
+
|
|
333
|
+
user_ids_for_ding = user_ids # DingTalk user_id list
|
|
334
|
+
message = f"{title}\nMemos system error, please handle immediately"
|
|
335
|
+
|
|
336
|
+
token = get_access_token()
|
|
337
|
+
|
|
338
|
+
send_ding_reminder(
|
|
339
|
+
access_token=token,
|
|
340
|
+
robot_code=ROBOT_CODE,
|
|
341
|
+
user_ids=user_ids_for_ding,
|
|
342
|
+
content=message,
|
|
343
|
+
remind_type=3 if level == "P0" else 1, # 1 in-app DING 2 SMS DING 3 phone DING
|
|
344
|
+
)
|
|
345
|
+
|
|
346
|
+
|
|
347
|
+
# --------- online_bot ---------
|
|
348
|
+
# ---------- Convert dict → colored KV lines ----------
|
|
349
|
+
def _kv_lines(d: dict, emoji: str = "", heading: str = "", heading_color: str = "#00956D") -> str:
|
|
350
|
+
"""
|
|
351
|
+
Returns:
|
|
352
|
+
### 📅 <font color='#00956D'><b>Daily Summary</b></font>
|
|
353
|
+
- **Request count:** 1364
|
|
354
|
+
...
|
|
355
|
+
"""
|
|
356
|
+
parts = [f"### {emoji} <font color='{heading_color}' size='3'><b>{heading}</b></font>"]
|
|
357
|
+
parts += [f"- **{k}:** {v}" for k, v in d.items()]
|
|
358
|
+
return "\n".join(parts)
|
|
359
|
+
|
|
360
|
+
|
|
361
|
+
# -------------- online_bot(colored title version) -----------------
|
|
362
|
+
def online_bot(
|
|
363
|
+
header_name: str,
|
|
364
|
+
sub_title_name: str,
|
|
365
|
+
title_color: str,
|
|
366
|
+
other_data1: dict,
|
|
367
|
+
other_data2: dict,
|
|
368
|
+
emoji: dict,
|
|
369
|
+
):
|
|
370
|
+
heading_color = "#00956D" # Green for subtitle
|
|
371
|
+
|
|
372
|
+
# 0) Banner
|
|
373
|
+
banner_bytes = make_header(header_name, sub_title_name)
|
|
374
|
+
banner_url = upload_bytes_to_oss(banner_bytes, filename="online_report.png")
|
|
375
|
+
|
|
376
|
+
# 1) Colored main title
|
|
377
|
+
colored_title = f"<font color='{title_color}' size='4'><b>{header_name}</b></font>"
|
|
378
|
+
|
|
379
|
+
# 3) Markdown
|
|
380
|
+
md = "\n\n".join(
|
|
381
|
+
filter(
|
|
382
|
+
None,
|
|
383
|
+
[
|
|
384
|
+
f"",
|
|
385
|
+
f"### 🙄 <font color='{heading_color}' size='4'><b>{colored_title}</b></font>\n\n",
|
|
386
|
+
_kv_lines(
|
|
387
|
+
other_data1,
|
|
388
|
+
next(iter(emoji.keys())),
|
|
389
|
+
next(iter(emoji.values())),
|
|
390
|
+
heading_color=heading_color,
|
|
391
|
+
),
|
|
392
|
+
_kv_lines(
|
|
393
|
+
other_data2,
|
|
394
|
+
list(emoji.keys())[1],
|
|
395
|
+
list(emoji.values())[1],
|
|
396
|
+
heading_color=heading_color,
|
|
397
|
+
),
|
|
398
|
+
f"<font color='#9E9E9E' size='1'>Time: "
|
|
399
|
+
f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}</font>\n",
|
|
400
|
+
],
|
|
401
|
+
)
|
|
402
|
+
)
|
|
403
|
+
|
|
404
|
+
_send_md(colored_title, md, type="user")
|
|
405
|
+
|
|
406
|
+
|
|
407
|
+
if __name__ == "__main__":
|
|
408
|
+
other_data = {
|
|
409
|
+
"recent_overall_data": "what is memos",
|
|
410
|
+
"site_data": "**📊 Simulated content\nLa la la <font color='red'>320</font>hahaha<font "
|
|
411
|
+
"color='red'>155</font>",
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
online_bot(
|
|
415
|
+
header_name="TextualMemory", # must in English
|
|
416
|
+
sub_title_name="Search", # must in English
|
|
417
|
+
title_color="#00956D",
|
|
418
|
+
other_data1={"Retrieval source 1": "This is plain text memory retrieval content blablabla"},
|
|
419
|
+
other_data2=other_data,
|
|
420
|
+
emoji={"Plain text memory retrieval source": "😨", "Retrieval content": "🕰🐛"},
|
|
421
|
+
)
|
|
422
|
+
print("All messages sent successfully")
|