mcp-sqlite-memory-bank 1.5.1__py3-none-any.whl → 1.6.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcp_sqlite_memory_bank/__init__.py +3 -3
- mcp_sqlite_memory_bank/__main__.py +8 -7
- mcp_sqlite_memory_bank/database.py +166 -48
- mcp_sqlite_memory_bank/prompts.py +64 -48
- mcp_sqlite_memory_bank/resources.py +218 -144
- mcp_sqlite_memory_bank/semantic.py +25 -13
- mcp_sqlite_memory_bank/server.py +174 -32
- mcp_sqlite_memory_bank/tools/__init__.py +26 -29
- mcp_sqlite_memory_bank/tools/analytics.py +179 -130
- mcp_sqlite_memory_bank/tools/basic.py +417 -4
- mcp_sqlite_memory_bank/tools/discovery.py +549 -360
- mcp_sqlite_memory_bank/tools/search.py +147 -71
- mcp_sqlite_memory_bank/types.py +6 -1
- mcp_sqlite_memory_bank/utils.py +154 -105
- {mcp_sqlite_memory_bank-1.5.1.dist-info → mcp_sqlite_memory_bank-1.6.2.dist-info}/METADATA +54 -6
- mcp_sqlite_memory_bank-1.6.2.dist-info/RECORD +21 -0
- mcp_sqlite_memory_bank-1.5.1.dist-info/RECORD +0 -21
- {mcp_sqlite_memory_bank-1.5.1.dist-info → mcp_sqlite_memory_bank-1.6.2.dist-info}/WHEEL +0 -0
- {mcp_sqlite_memory_bank-1.5.1.dist-info → mcp_sqlite_memory_bank-1.6.2.dist-info}/entry_points.txt +0 -0
- {mcp_sqlite_memory_bank-1.5.1.dist-info → mcp_sqlite_memory_bank-1.6.2.dist-info}/licenses/LICENSE +0 -0
- {mcp_sqlite_memory_bank-1.5.1.dist-info → mcp_sqlite_memory_bank-1.6.2.dist-info}/top_level.txt +0 -0
@@ -7,11 +7,10 @@ and providing insights for better knowledge organization.
|
|
7
7
|
"""
|
8
8
|
|
9
9
|
import logging
|
10
|
-
from typing import
|
11
|
-
from fastmcp import FastMCP
|
10
|
+
from typing import cast
|
12
11
|
|
13
12
|
from ..database import get_database
|
14
|
-
from ..semantic import is_semantic_search_available
|
13
|
+
from ..semantic import is_semantic_search_available
|
15
14
|
from ..types import ToolResponse
|
16
15
|
from ..utils import catch_errors
|
17
16
|
|
@@ -20,14 +19,14 @@ from ..utils import catch_errors
|
|
20
19
|
def analyze_memory_patterns() -> ToolResponse:
|
21
20
|
"""
|
22
21
|
🔍 **MEMORY PATTERN ANALYSIS** - Discover insights in your memory bank!
|
23
|
-
|
22
|
+
|
24
23
|
Analyzes content patterns, usage statistics, and identifies opportunities
|
25
24
|
for better organization and knowledge discovery.
|
26
|
-
|
25
|
+
|
27
26
|
Returns:
|
28
27
|
ToolResponse: On success: {"success": True, "analysis": dict}
|
29
28
|
On error: {"success": False, "error": str, "category": str, "details": dict}
|
30
|
-
|
29
|
+
|
31
30
|
Examples:
|
32
31
|
>>> analyze_memory_patterns()
|
33
32
|
{"success": True, "analysis": {
|
@@ -36,7 +35,7 @@ def analyze_memory_patterns() -> ToolResponse:
|
|
36
35
|
"semantic_readiness": {"ready": 2, "needs_setup": 1},
|
37
36
|
"recommendations": ["Consider embedding setup for 'notes' table"]
|
38
37
|
}}
|
39
|
-
|
38
|
+
|
40
39
|
FastMCP Tool Info:
|
41
40
|
- **CONTENT INSIGHTS**: Analyzes distribution and quality of stored content
|
42
41
|
- **SEMANTIC READINESS**: Shows which tables are ready for semantic search
|
@@ -45,13 +44,14 @@ def analyze_memory_patterns() -> ToolResponse:
|
|
45
44
|
"""
|
46
45
|
try:
|
47
46
|
from .. import server
|
47
|
+
|
48
48
|
db = get_database(server.DB_PATH)
|
49
|
-
|
49
|
+
|
50
50
|
# Get all tables
|
51
51
|
tables_result = db.list_tables()
|
52
52
|
if not tables_result.get("success"):
|
53
53
|
return cast(ToolResponse, tables_result)
|
54
|
-
|
54
|
+
|
55
55
|
tables = tables_result.get("tables", [])
|
56
56
|
analysis = {
|
57
57
|
"content_distribution": {},
|
@@ -60,34 +60,36 @@ def analyze_memory_patterns() -> ToolResponse:
|
|
60
60
|
"schema_analysis": {},
|
61
61
|
"recommendations": [],
|
62
62
|
"total_tables": len(tables),
|
63
|
-
"total_content_rows": 0
|
63
|
+
"total_content_rows": 0,
|
64
64
|
}
|
65
|
-
|
65
|
+
|
66
66
|
for table_name in tables:
|
67
67
|
try:
|
68
68
|
# Get basic table info
|
69
69
|
rows_result = db.read_rows(table_name)
|
70
70
|
if not rows_result.get("success"):
|
71
71
|
continue
|
72
|
-
|
72
|
+
|
73
73
|
rows = rows_result.get("rows", [])
|
74
74
|
row_count = len(rows)
|
75
75
|
analysis["content_distribution"][table_name] = row_count
|
76
76
|
analysis["total_content_rows"] += row_count
|
77
|
-
|
77
|
+
|
78
78
|
# Analyze schema
|
79
79
|
schema_result = db.describe_table(table_name)
|
80
80
|
if schema_result.get("success"):
|
81
81
|
columns = schema_result.get("columns", [])
|
82
82
|
text_columns = [col for col in columns if "TEXT" in col.get("type", "").upper()]
|
83
|
-
|
83
|
+
|
84
84
|
analysis["schema_analysis"][table_name] = {
|
85
85
|
"total_columns": len(columns),
|
86
86
|
"text_columns": len(text_columns),
|
87
87
|
"has_id_column": any(col.get("name") == "id" for col in columns),
|
88
|
-
"has_timestamp": any(
|
88
|
+
"has_timestamp": any(
|
89
|
+
"timestamp" in col.get("name", "").lower() for col in columns
|
90
|
+
),
|
89
91
|
}
|
90
|
-
|
92
|
+
|
91
93
|
# Analyze text density
|
92
94
|
if rows and text_columns:
|
93
95
|
text_content_lengths = []
|
@@ -96,7 +98,7 @@ def analyze_memory_patterns() -> ToolResponse:
|
|
96
98
|
content = row.get(col["name"], "")
|
97
99
|
if content:
|
98
100
|
text_content_lengths.append(len(str(content)))
|
99
|
-
|
101
|
+
|
100
102
|
if text_content_lengths:
|
101
103
|
avg_length = sum(text_content_lengths) / len(text_content_lengths)
|
102
104
|
if avg_length > 500:
|
@@ -105,7 +107,7 @@ def analyze_memory_patterns() -> ToolResponse:
|
|
105
107
|
analysis["text_density"]["medium"].append(table_name)
|
106
108
|
else:
|
107
109
|
analysis["text_density"]["low"].append(table_name)
|
108
|
-
|
110
|
+
|
109
111
|
# Check semantic readiness
|
110
112
|
if is_semantic_search_available():
|
111
113
|
embedding_stats = db.get_embedding_stats(table_name)
|
@@ -117,79 +119,101 @@ def analyze_memory_patterns() -> ToolResponse:
|
|
117
119
|
analysis["semantic_readiness"]["partial"].append(table_name)
|
118
120
|
else:
|
119
121
|
analysis["semantic_readiness"]["needs_setup"].append(table_name)
|
120
|
-
|
122
|
+
|
121
123
|
except Exception as e:
|
122
124
|
logging.warning(f"Error analyzing table {table_name}: {e}")
|
123
125
|
continue
|
124
|
-
|
126
|
+
|
125
127
|
# Generate recommendations
|
126
128
|
recommendations = []
|
127
|
-
|
129
|
+
|
128
130
|
# Semantic search recommendations
|
129
131
|
if len(analysis["semantic_readiness"]["needs_setup"]) > 0:
|
130
|
-
high_value_tables = [
|
131
|
-
|
132
|
+
high_value_tables = [
|
133
|
+
t
|
134
|
+
for t in analysis["semantic_readiness"]["needs_setup"]
|
135
|
+
if t in analysis["text_density"]["high"] + analysis["text_density"]["medium"]
|
136
|
+
]
|
132
137
|
if high_value_tables:
|
133
|
-
recommendations.append(
|
134
|
-
|
138
|
+
recommendations.append(
|
139
|
+
f"Consider setting up semantic search for high-value tables: {', '.join(high_value_tables[:3])}"
|
140
|
+
)
|
141
|
+
|
135
142
|
# Content organization recommendations
|
136
143
|
large_tables = [t for t, count in analysis["content_distribution"].items() if count > 50]
|
137
144
|
if large_tables:
|
138
|
-
recommendations.append(
|
139
|
-
|
145
|
+
recommendations.append(
|
146
|
+
f"Large tables detected: {', '.join(large_tables)}. Consider organizing with categories or tags."
|
147
|
+
)
|
148
|
+
|
140
149
|
# Empty or sparse tables
|
141
|
-
sparse_tables = [
|
150
|
+
sparse_tables = [
|
151
|
+
t for t, count in analysis["content_distribution"].items() if count < 5 and count > 0
|
152
|
+
]
|
142
153
|
if sparse_tables:
|
143
|
-
recommendations.append(
|
144
|
-
|
154
|
+
recommendations.append(
|
155
|
+
f"Sparse tables found: {', '.join(sparse_tables)}. Consider consolidating or adding more content."
|
156
|
+
)
|
157
|
+
|
145
158
|
# Schema improvements
|
146
|
-
tables_without_timestamps = [
|
147
|
-
|
159
|
+
tables_without_timestamps = [
|
160
|
+
t
|
161
|
+
for t, schema in analysis["schema_analysis"].items()
|
162
|
+
if not schema.get("has_timestamp")
|
163
|
+
]
|
148
164
|
if len(tables_without_timestamps) > 2:
|
149
|
-
recommendations.append(
|
150
|
-
|
165
|
+
recommendations.append(
|
166
|
+
"Consider adding timestamp columns to track when content was created/modified."
|
167
|
+
)
|
168
|
+
|
151
169
|
analysis["recommendations"] = recommendations
|
152
|
-
|
153
|
-
return cast(
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
"
|
158
|
-
"
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
170
|
+
|
171
|
+
return cast(
|
172
|
+
ToolResponse,
|
173
|
+
{
|
174
|
+
"success": True,
|
175
|
+
"analysis": analysis,
|
176
|
+
"summary": {
|
177
|
+
"tables_analyzed": len(tables),
|
178
|
+
"total_rows": analysis["total_content_rows"],
|
179
|
+
"semantic_ready": len(analysis["semantic_readiness"]["ready"]),
|
180
|
+
"high_value_content": len(analysis["text_density"]["high"]),
|
181
|
+
"recommendations_count": len(recommendations),
|
182
|
+
},
|
183
|
+
},
|
184
|
+
)
|
185
|
+
|
165
186
|
except Exception as e:
|
166
|
-
return cast(
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
187
|
+
return cast(
|
188
|
+
ToolResponse,
|
189
|
+
{
|
190
|
+
"success": False,
|
191
|
+
"error": f"Memory pattern analysis failed: {str(e)}",
|
192
|
+
"category": "ANALYSIS",
|
193
|
+
"details": {"exception": str(e)},
|
194
|
+
},
|
195
|
+
)
|
172
196
|
|
173
197
|
|
174
198
|
@catch_errors
|
175
199
|
def get_content_health_score() -> ToolResponse:
|
176
200
|
"""
|
177
201
|
📊 **CONTENT HEALTH ASSESSMENT** - Rate the quality of your memory bank!
|
178
|
-
|
202
|
+
|
179
203
|
Provides a comprehensive health score based on content quality, organization,
|
180
204
|
semantic search readiness, and usage patterns.
|
181
|
-
|
205
|
+
|
182
206
|
Returns:
|
183
207
|
ToolResponse: On success: {"success": True, "health_score": float, "metrics": dict}
|
184
208
|
On error: {"success": False, "error": str, "category": str, "details": dict}
|
185
|
-
|
209
|
+
|
186
210
|
Examples:
|
187
211
|
>>> get_content_health_score()
|
188
212
|
{"success": True, "health_score": 8.5, "metrics": {
|
189
213
|
"content_quality": 9.0, "organization": 7.5, "semantic_readiness": 8.0,
|
190
214
|
"accessibility": 9.0, "recommendations": [...]
|
191
215
|
}}
|
192
|
-
|
216
|
+
|
193
217
|
FastMCP Tool Info:
|
194
218
|
- **OVERALL SCORE**: Single metric (0-10) indicating memory bank health
|
195
219
|
- **DETAILED METRICS**: Breakdown by quality, organization, and readiness
|
@@ -199,20 +223,24 @@ def get_content_health_score() -> ToolResponse:
|
|
199
223
|
try:
|
200
224
|
# Get the pattern analysis first - call database methods directly
|
201
225
|
from .. import server
|
226
|
+
|
202
227
|
db = get_database(server.DB_PATH)
|
203
|
-
|
228
|
+
|
204
229
|
# Get all tables
|
205
230
|
tables_result = db.list_tables()
|
206
231
|
if not tables_result.get("success"):
|
207
|
-
return cast(
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
232
|
+
return cast(
|
233
|
+
ToolResponse,
|
234
|
+
{
|
235
|
+
"success": False,
|
236
|
+
"error": "Failed to get tables for health analysis",
|
237
|
+
"category": "DATABASE",
|
238
|
+
"details": tables_result,
|
239
|
+
},
|
240
|
+
)
|
241
|
+
|
214
242
|
tables = tables_result.get("tables", [])
|
215
|
-
|
243
|
+
|
216
244
|
# Build basic analysis for health scoring
|
217
245
|
analysis = {
|
218
246
|
"content_distribution": {},
|
@@ -220,34 +248,36 @@ def get_content_health_score() -> ToolResponse:
|
|
220
248
|
"semantic_readiness": {"ready": [], "partial": [], "needs_setup": []},
|
221
249
|
"schema_analysis": {},
|
222
250
|
"total_tables": len(tables),
|
223
|
-
"total_content_rows": 0
|
251
|
+
"total_content_rows": 0,
|
224
252
|
}
|
225
|
-
|
253
|
+
|
226
254
|
for table_name in tables:
|
227
255
|
try:
|
228
256
|
# Get basic table info
|
229
257
|
rows_result = db.read_rows(table_name)
|
230
258
|
if not rows_result.get("success"):
|
231
259
|
continue
|
232
|
-
|
260
|
+
|
233
261
|
rows = rows_result.get("rows", [])
|
234
262
|
row_count = len(rows)
|
235
263
|
analysis["content_distribution"][table_name] = row_count
|
236
264
|
analysis["total_content_rows"] += row_count
|
237
|
-
|
265
|
+
|
238
266
|
# Analyze schema
|
239
267
|
schema_result = db.describe_table(table_name)
|
240
268
|
if schema_result.get("success"):
|
241
269
|
columns = schema_result.get("columns", [])
|
242
270
|
text_columns = [col for col in columns if "TEXT" in col.get("type", "").upper()]
|
243
|
-
|
271
|
+
|
244
272
|
analysis["schema_analysis"][table_name] = {
|
245
273
|
"total_columns": len(columns),
|
246
274
|
"text_columns": len(text_columns),
|
247
275
|
"has_id_column": any(col.get("name") == "id" for col in columns),
|
248
|
-
"has_timestamp": any(
|
276
|
+
"has_timestamp": any(
|
277
|
+
"timestamp" in col.get("name", "").lower() for col in columns
|
278
|
+
),
|
249
279
|
}
|
250
|
-
|
280
|
+
|
251
281
|
# Analyze text density
|
252
282
|
if rows and text_columns:
|
253
283
|
text_content_lengths = []
|
@@ -256,7 +286,7 @@ def get_content_health_score() -> ToolResponse:
|
|
256
286
|
content = row.get(col["name"], "")
|
257
287
|
if content:
|
258
288
|
text_content_lengths.append(len(str(content)))
|
259
|
-
|
289
|
+
|
260
290
|
if text_content_lengths:
|
261
291
|
avg_length = sum(text_content_lengths) / len(text_content_lengths)
|
262
292
|
if avg_length > 500:
|
@@ -265,7 +295,7 @@ def get_content_health_score() -> ToolResponse:
|
|
265
295
|
analysis["text_density"]["medium"].append(table_name)
|
266
296
|
else:
|
267
297
|
analysis["text_density"]["low"].append(table_name)
|
268
|
-
|
298
|
+
|
269
299
|
# Check semantic readiness
|
270
300
|
if is_semantic_search_available():
|
271
301
|
embedding_stats = db.get_embedding_stats(table_name)
|
@@ -277,26 +307,26 @@ def get_content_health_score() -> ToolResponse:
|
|
277
307
|
analysis["semantic_readiness"]["partial"].append(table_name)
|
278
308
|
else:
|
279
309
|
analysis["semantic_readiness"]["needs_setup"].append(table_name)
|
280
|
-
|
310
|
+
|
281
311
|
except Exception as e:
|
282
312
|
logging.warning(f"Error analyzing table {table_name}: {e}")
|
283
313
|
continue
|
284
|
-
|
314
|
+
|
285
315
|
summary = {
|
286
316
|
"tables_analyzed": len(tables),
|
287
317
|
"total_rows": analysis["total_content_rows"],
|
288
318
|
"semantic_ready": len(analysis["semantic_readiness"]["ready"]),
|
289
|
-
"high_value_content": len(analysis["text_density"]["high"])
|
319
|
+
"high_value_content": len(analysis["text_density"]["high"]),
|
290
320
|
}
|
291
|
-
|
321
|
+
|
292
322
|
# Calculate health metrics (0-10 scale)
|
293
323
|
metrics = {}
|
294
|
-
|
324
|
+
|
295
325
|
# 1. Content Quality Score (based on text density and volume)
|
296
326
|
total_rows = summary.get("total_rows", 0)
|
297
327
|
high_quality_tables = len(analysis.get("text_density", {}).get("high", []))
|
298
328
|
total_tables = summary.get("tables_analyzed", 1)
|
299
|
-
|
329
|
+
|
300
330
|
if total_rows == 0:
|
301
331
|
metrics["content_volume"] = 0.0
|
302
332
|
elif total_rows < 10:
|
@@ -307,13 +337,13 @@ def get_content_health_score() -> ToolResponse:
|
|
307
337
|
metrics["content_volume"] = 8.0
|
308
338
|
else:
|
309
339
|
metrics["content_volume"] = 10.0
|
310
|
-
|
340
|
+
|
311
341
|
metrics["content_quality"] = min(10.0, (high_quality_tables / total_tables) * 10 + 3)
|
312
|
-
|
342
|
+
|
313
343
|
# 2. Organization Score (based on schema quality)
|
314
344
|
schema_analysis = analysis.get("schema_analysis", {})
|
315
345
|
organization_factors = []
|
316
|
-
|
346
|
+
|
317
347
|
for table_name, schema in schema_analysis.items():
|
318
348
|
table_score = 0
|
319
349
|
if schema.get("has_id_column"):
|
@@ -325,63 +355,76 @@ def get_content_health_score() -> ToolResponse:
|
|
325
355
|
if 2 <= schema.get("total_columns", 0) <= 10: # Good column count
|
326
356
|
table_score += 3
|
327
357
|
organization_factors.append(table_score)
|
328
|
-
|
329
|
-
metrics["organization"] = (
|
330
|
-
|
358
|
+
|
359
|
+
metrics["organization"] = (
|
360
|
+
(sum(organization_factors) / len(organization_factors)) if organization_factors else 5.0
|
361
|
+
)
|
362
|
+
|
331
363
|
# 3. Semantic Readiness Score
|
332
364
|
semantic_ready = len(analysis.get("semantic_readiness", {}).get("ready", []))
|
333
365
|
semantic_partial = len(analysis.get("semantic_readiness", {}).get("partial", []))
|
334
|
-
semantic_needed = len(analysis.get("semantic_readiness", {}).get("needs_setup", []))
|
335
|
-
|
336
366
|
if not is_semantic_search_available():
|
337
367
|
metrics["semantic_readiness"] = 5.0 # Neutral score if not available
|
338
368
|
metrics["semantic_note"] = "Semantic search dependencies not available"
|
339
369
|
else:
|
340
370
|
semantic_score = ((semantic_ready * 2 + semantic_partial) / (total_tables * 2)) * 10
|
341
371
|
metrics["semantic_readiness"] = min(10.0, semantic_score)
|
342
|
-
|
372
|
+
|
343
373
|
# 4. Accessibility Score (how easy it is to find and use content)
|
344
374
|
medium_density = len(analysis.get("text_density", {}).get("medium", []))
|
345
|
-
|
346
|
-
|
375
|
+
|
347
376
|
# Prefer medium density (not too verbose, not too sparse)
|
348
377
|
if total_tables == 0:
|
349
378
|
metrics["accessibility"] = 5.0
|
350
379
|
else:
|
351
|
-
accessibility_score = (
|
380
|
+
accessibility_score = (
|
381
|
+
(high_quality_tables + medium_density * 1.5) / total_tables
|
382
|
+
) * 8 + 2
|
352
383
|
metrics["accessibility"] = min(10.0, accessibility_score)
|
353
|
-
|
384
|
+
|
354
385
|
# 5. Overall Health Score (weighted average)
|
355
386
|
weights = {
|
356
387
|
"content_volume": 0.2,
|
357
388
|
"content_quality": 0.3,
|
358
389
|
"organization": 0.2,
|
359
390
|
"semantic_readiness": 0.15,
|
360
|
-
"accessibility": 0.15
|
391
|
+
"accessibility": 0.15,
|
361
392
|
}
|
362
|
-
|
393
|
+
|
363
394
|
health_score = sum(metrics[key] * weights[key] for key in weights.keys())
|
364
|
-
|
395
|
+
|
365
396
|
# Generate health-specific recommendations
|
366
397
|
health_recommendations = []
|
367
|
-
|
398
|
+
|
368
399
|
if metrics["content_volume"] < 5:
|
369
|
-
health_recommendations.append(
|
400
|
+
health_recommendations.append(
|
401
|
+
"🔴 LOW CONTENT: Add more valuable content to your memory bank"
|
402
|
+
)
|
370
403
|
elif metrics["content_volume"] < 7:
|
371
|
-
health_recommendations.append(
|
372
|
-
|
404
|
+
health_recommendations.append(
|
405
|
+
"🟡 MODERATE CONTENT: Consider expanding your knowledge base"
|
406
|
+
)
|
407
|
+
|
373
408
|
if metrics["content_quality"] < 6:
|
374
|
-
health_recommendations.append(
|
375
|
-
|
409
|
+
health_recommendations.append(
|
410
|
+
"🔴 CONTENT QUALITY: Focus on adding more detailed, rich content"
|
411
|
+
)
|
412
|
+
|
376
413
|
if metrics["organization"] < 6:
|
377
|
-
health_recommendations.append(
|
378
|
-
|
414
|
+
health_recommendations.append(
|
415
|
+
"🔴 ORGANIZATION: Improve table schemas with timestamps and proper columns"
|
416
|
+
)
|
417
|
+
|
379
418
|
if metrics["semantic_readiness"] < 5 and is_semantic_search_available():
|
380
|
-
health_recommendations.append(
|
381
|
-
|
419
|
+
health_recommendations.append(
|
420
|
+
"🟡 SEMANTIC SEARCH: Set up embeddings for better content discovery"
|
421
|
+
)
|
422
|
+
|
382
423
|
if metrics["accessibility"] < 6:
|
383
|
-
health_recommendations.append(
|
384
|
-
|
424
|
+
health_recommendations.append(
|
425
|
+
"🔴 ACCESSIBILITY: Improve content structure for easier discovery"
|
426
|
+
)
|
427
|
+
|
385
428
|
# Health grade
|
386
429
|
if health_score >= 9:
|
387
430
|
grade = "A+ (Excellent)"
|
@@ -395,25 +438,31 @@ def get_content_health_score() -> ToolResponse:
|
|
395
438
|
grade = "C (Needs Improvement)"
|
396
439
|
else:
|
397
440
|
grade = "D (Poor - Needs Attention)"
|
398
|
-
|
399
|
-
return cast(
|
400
|
-
|
401
|
-
|
402
|
-
|
403
|
-
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
"
|
408
|
-
"
|
409
|
-
|
410
|
-
|
411
|
-
|
412
|
-
|
441
|
+
|
442
|
+
return cast(
|
443
|
+
ToolResponse,
|
444
|
+
{
|
445
|
+
"success": True,
|
446
|
+
"health_score": round(health_score, 1),
|
447
|
+
"grade": grade,
|
448
|
+
"metrics": {k: round(v, 1) for k, v in metrics.items()},
|
449
|
+
"recommendations": health_recommendations,
|
450
|
+
"detailed_analysis": analysis,
|
451
|
+
"improvement_priority": {
|
452
|
+
"highest": [k for k, v in metrics.items() if v < 5],
|
453
|
+
"medium": [k for k, v in metrics.items() if 5 <= v < 7],
|
454
|
+
"good": [k for k, v in metrics.items() if v >= 7],
|
455
|
+
},
|
456
|
+
},
|
457
|
+
)
|
458
|
+
|
413
459
|
except Exception as e:
|
414
|
-
return cast(
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
|
419
|
-
|
460
|
+
return cast(
|
461
|
+
ToolResponse,
|
462
|
+
{
|
463
|
+
"success": False,
|
464
|
+
"error": f"Content health assessment failed: {str(e)}",
|
465
|
+
"category": "ANALYSIS",
|
466
|
+
"details": {"exception": str(e)},
|
467
|
+
},
|
468
|
+
)
|