adbpg-mcp-server 1.0.6__py3-none-any.whl → 1.0.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: adbpg-mcp-server
3
- Version: 1.0.6
4
- Summary: MCP server for AnalyticDB PostgreSQL
3
+ Version: 1.0.8
4
+ Summary: MCP Server for AnalyticDB PostgreSQL
5
5
  License-File: LICENSE
6
6
  Requires-Python: >=3.10
7
7
  Requires-Dist: mcp>=1.4.0
@@ -0,0 +1,6 @@
1
+ adbpg_mcp_server.py,sha256=w0ORWo_uAcsidDt_AXB3bxVTE424RswsH695wqOeNoM,45143
2
+ adbpg_mcp_server-1.0.8.dist-info/METADATA,sha256=f5HqV6BIloD80zBZzWbD8eEEsoPUcsnw4cqtlXxKADo,275
3
+ adbpg_mcp_server-1.0.8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
4
+ adbpg_mcp_server-1.0.8.dist-info/entry_points.txt,sha256=n2NpLR8WNfa3Edju2l7Ngnsp7EiCUqNdYkF0dg42dnQ,58
5
+ adbpg_mcp_server-1.0.8.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
6
+ adbpg_mcp_server-1.0.8.dist-info/RECORD,,
adbpg_mcp_server.py CHANGED
@@ -70,7 +70,7 @@ except Exception as e:
70
70
  logger.error(f"Error loading environment variables: {e}")
71
71
  sys.exit(1)
72
72
 
73
- SERVER_VERSION = "0.1.0"
73
+ SERVER_VERSION = "0.2.0"
74
74
 
75
75
 
76
76
  # 获得 graphrag 初始化配置
@@ -84,12 +84,29 @@ def get_graphrag_config():
84
84
  "embedding_url": os.getenv("GRAPHRAG_EMBEDDING_BASE_URL"),
85
85
  "language": os.getenv("GRAPHRAG_LANGUAGE", "English"),
86
86
  "entity_types": os.getenv("GRAPHRAG_ENTITY_TYPES"),
87
- "relationship_types": os.getenv("GRAPHRAG_RELATIONSHIP_TYPES")
87
+ "relationship_types": os.getenv("GRAPHRAG_RELATIONSHIP_TYPES"),
88
+ "postgres_password": os.getenv("ADBPG_PASSWORD")
88
89
  }
89
90
  return graphrag_config
90
91
 
91
92
  # 获得llmemory 初始化配置
92
93
  def get_llmemory_config():
94
+ config = get_db_config()
95
+ port = 3000
96
+ sql = """
97
+ select port from gp_segment_configuration where content = -1 and role = 'p';
98
+ """
99
+ try:
100
+ with psycopg.connect(**config) as conn:
101
+ conn.autocommit = True
102
+ with conn.cursor() as cursor:
103
+ cursor.execute(sql)
104
+ port = cursor.fetchone()[0]
105
+ except Error as e:
106
+ raise RuntimeError(f"Database error: {str(e)}")
107
+ llmemory_enable_graph = os.getenv("LLMEMORY_ENABLE_GRAPH", "False")
108
+
109
+
93
110
  llm_memory_config = {
94
111
  "llm": {
95
112
  "provider": "openai",
@@ -115,10 +132,22 @@ def get_llmemory_config():
115
132
  "password": os.getenv("ADBPG_PASSWORD"),
116
133
  "dbname": os.getenv("ADBPG_DATABASE"),
117
134
  "hnsw": "True",
118
- "embedding_model_dims": os.getenv("LLMEMORY_EMBEDDING_DIMS", 1024)
135
+ "embedding_model_dims": os.getenv("LLMEMORY_EMBEDDING_DIMS", 1024),
136
+ "port": port
119
137
  }
120
138
  }
121
139
  }
140
+ if llmemory_enable_graph == "True" or llmemory_enable_graph == "true":
141
+ llm_memory_config["graph_store"] = {
142
+ "provider": "adbpg",
143
+ "config": {
144
+ "url": "http://localhost",
145
+ "username": os.getenv("ADBPG_USER"),
146
+ "password": os.getenv("ADBPG_PASSWORD"),
147
+ "database": os.getenv("ADBPG_DATABASE"),
148
+ "port": port
149
+ }
150
+ }
122
151
  return llm_memory_config
123
152
 
124
153
  def get_db_config():
@@ -186,8 +215,8 @@ def get_graphrag_tool_connection() -> Connection:
186
215
 
187
216
  LLM_MEMORY_CONN: Connection | None = None
188
217
  def get_llm_memory_tool_connection() -> Connection:
189
- global LLM_MEMORY_CONN
190
218
  global LLMEMORY_ENV_IS_READY
219
+ global LLM_MEMORY_CONN
191
220
  config = get_db_config()
192
221
  # 如果未连接,或者连接失效 重新连接
193
222
  if LLM_MEMORY_CONN is None or LLM_MEMORY_CONN.closed:
@@ -507,7 +536,7 @@ async def list_tools() -> list[Tool]:
507
536
  #### graphrag & llm_memory tool list
508
537
  Tool(
509
538
  name = "adbpg_graphrag_upload",
510
- description = "Upload a text file (with its name) and file content to graphrag to generate a knowledge graph.",
539
+ description = "Execute graphrag upload operation",
511
540
  # 参数:filename text, context text
512
541
  # filename 表示文件名称, context 表示文件内容
513
542
  inputSchema = {
@@ -515,11 +544,11 @@ async def list_tools() -> list[Tool]:
515
544
  "properties": {
516
545
  "filename": {
517
546
  "type": "string",
518
- "description": "The name of the file to be uploaded"
547
+ "description": "The file name need to upload"
519
548
  },
520
549
  "context": {
521
550
  "type": "string",
522
- "description": "The textual content of the file."
551
+ "description": "the context of your file"
523
552
  }
524
553
  },
525
554
  "required": ["filename", "context"]
@@ -527,7 +556,7 @@ async def list_tools() -> list[Tool]:
527
556
  ),
528
557
  Tool(
529
558
  name = "adbpg_graphrag_query",
530
- description = "Query the graphrag using the specified query string and mode.",
559
+ description = "Execute graphrag query operation",
531
560
  # 参数:query_str text, [query_mode text]
532
561
  # query_str 是询问的问题,query_mode 选择查询模式
533
562
  inputSchema = {
@@ -535,11 +564,15 @@ async def list_tools() -> list[Tool]:
535
564
  "properties": {
536
565
  "query_str": {
537
566
  "type": "string",
538
- "description": "The query content."
567
+ "description": "The query you want to ask"
539
568
  },
540
569
  "query_mode": {
541
570
  "type": "string",
542
- "description": "The query mode, choose from [bypass, naive, local, global, hybrid, mix]. If null, defaults to mix."
571
+ "description": "The query mode you need to choose [ bypass,naive, local, global, hybrid, mix[default], tree ]."
572
+ },
573
+ "start_search_node_id": {
574
+ "type": "string",
575
+ "description": "If using 'tree' query mode, set the start node ID of tree."
543
576
  }
544
577
  },
545
578
  "required": ["query_str"]
@@ -593,24 +626,40 @@ async def list_tools() -> list[Tool]:
593
626
  "root_node_entity": {
594
627
  "type": "string",
595
628
  "description": "the root_noot_entity"
629
+
596
630
  }
597
631
  },
598
632
  "required": ["root_node_entity"]
599
633
  }
600
634
  ),
601
-
602
-
635
+ Tool(
636
+ name = "adbpg_graphrag_reset_tree_query",
637
+ description = " Reset the decision tree in the tree query mode",
638
+ # para:
639
+ inputSchema = {
640
+ "type": "object",
641
+ "required": []
642
+ }
643
+ ),
603
644
  Tool(
604
645
  name = "adbpg_llm_memory_add",
605
- description = "Add LLM long memory with a specific user, run or agent.",
646
+ description = "Execute llm_memory add operation",
606
647
  # 参数:messages json, user_id text, run_id text, agent_id text, metadata json
607
648
  # 增加新的记忆
608
649
  inputSchema={
609
650
  "type": "object",
610
651
  "properties": {
611
652
  "messages": {
612
- "type": "object",
613
- "description": "llm_memory messages"
653
+ "type": "array",
654
+ "items": {
655
+ "type": "object",
656
+ "properties": {
657
+ "role": {"type": "string"},
658
+ "content": {"type": "string"}
659
+ },
660
+ "required": ["role", "content"]
661
+ },
662
+ "description": "List of messages objects (e.g., conversation history)"
614
663
  },
615
664
  "user_id": {
616
665
  "type": "string",
@@ -642,7 +691,7 @@ async def list_tools() -> list[Tool]:
642
691
  ),
643
692
  Tool(
644
693
  name = "adbpg_llm_memory_get_all",
645
- description = "Retrieves all memory records associated with a specific user, run or agent.",
694
+ description = "Execute llm_memory get_all operation",
646
695
  # 参数:user_id text, run_id text, agent_id text
647
696
  # 获取某个用户或者某个agent的所有记忆
648
697
  inputSchema={
@@ -666,7 +715,7 @@ async def list_tools() -> list[Tool]:
666
715
  ),
667
716
  Tool(
668
717
  name = "adbpg_llm_memory_search",
669
- description = "Retrieves memories relevant to the given query for a specific user, run, or agent.",
718
+ description = "Execute llm_memory search operation",
670
719
  # 参数:query text, user_id text, run_id text, agent_id text, filter json
671
720
  # 获取与给定 query 相关的记忆
672
721
  inputSchema={
@@ -699,7 +748,7 @@ async def list_tools() -> list[Tool]:
699
748
  ,
700
749
  Tool(
701
750
  name = "adbpg_llm_memory_delete_all",
702
- description = "Delete all memory records associated with a specific user, run or agent.",
751
+ description = "Execute llm_memory delete_all operation",
703
752
  # 参数:user_id text, run_id text, agent_id text
704
753
  # 删除某个用户或者agent的所有记忆
705
754
  inputSchema={
@@ -741,6 +790,7 @@ def get_llm_memory_tool_result(wrapped_sql, params) -> list[TextContent]:
741
790
  try:
742
791
  conn = get_llm_memory_tool_connection()
743
792
  with conn.cursor() as cursor:
793
+
744
794
  cursor.execute(wrapped_sql, params)
745
795
 
746
796
  if cursor.description:
@@ -788,6 +838,11 @@ async def call_tool(name: str, arguments: dict) -> list[TextContent]:
788
838
  raise ValueError("Query is required")
789
839
  if not query.strip().upper().startswith("SELECT"):
790
840
  raise ValueError("Query must be a SELECT statement")
841
+ query = query.rstrip().rstrip(';')
842
+ query = f"""
843
+ SELECT json_agg(row_to_json(t))
844
+ FROM ({query}) AS t
845
+ """
791
846
  elif name == "execute_dml_sql":
792
847
  query = arguments.get("query")
793
848
  if not query:
@@ -798,7 +853,7 @@ async def call_tool(name: str, arguments: dict) -> list[TextContent]:
798
853
  query = arguments.get("query")
799
854
  if not query:
800
855
  raise ValueError("Query is required")
801
- if not any(query.strip().upper().startswith(keyword) for keyword in ["CREATE", "ALTER", "DROP"]):
856
+ if not any(query.strip().upper().startswith(keyword) for keyword in ["CREATE", "ALTER", "DROP", "TRUNCATE"]):
802
857
  raise ValueError("Query must be a DDL statement (CREATE, ALTER, DROP)")
803
858
  elif name == "analyze_table":
804
859
  schema = arguments.get("schema")
@@ -836,18 +891,36 @@ async def call_tool(name: str, arguments: dict) -> list[TextContent]:
836
891
  raise ValueError("GraphRAG Server initialization failed. This tool cannot be used.")
837
892
  query_str = arguments.get("query_str")
838
893
  query_mode = arguments.get("query_mode")
894
+ start_search_node_id = arguments.get("start_search_node_id")
895
+
839
896
  if not query_str:
840
897
  raise ValueError("Query is required")
841
898
  if not query_mode:
842
899
  # default mode
843
900
  query_mode = "mix"
844
- # 命令拼接
901
+
845
902
  wrapped_sql = f"""
846
- SELECT adbpg_graphrag.query(%s::text, %s::text)
847
- """
903
+ SELECT adbpg_graphrag.query(%s::text, %s::text)
904
+ """
848
905
  params = [query_str, query_mode]
849
- return get_graphrag_tool_result(wrapped_sql, params)
850
906
 
907
+ if start_search_node_id:
908
+ wrapped_sql = f"""
909
+ SELECT adbpg_graphrag.query(%s::text, %s::text, %s::text)
910
+ """
911
+ params = [query_str, query_mode, start_search_node_id]
912
+
913
+ return get_graphrag_tool_result(wrapped_sql, params)
914
+
915
+ elif name == "adbpg_graphrag_reset_tree_query":
916
+ if GRAPHRAG_ENV_IS_READY == False:
917
+ raise ValueError("GraphRAG Server initialization failed. This tool cannot be used.")
918
+ wrapped_sql = f"""
919
+ SELECT adbpg_graphrag.reset_tree_query()
920
+ """
921
+ params = []
922
+ return get_graphrag_tool_result(wrapped_sql, params)
923
+
851
924
  elif name == "adbpg_graphrag_upload_decision_tree":
852
925
  if GRAPHRAG_ENV_IS_READY == False:
853
926
  raise ValueError("GraphRAG Server initialization failed. This tool cannot be used.")
@@ -1040,15 +1113,8 @@ async def call_tool(name: str, arguments: dict) -> list[TextContent]:
1040
1113
  conn.autocommit = True
1041
1114
  with conn.cursor() as cursor:
1042
1115
 
1043
- #cursor.execute("SET statement_timeout = 300000")
1044
- #过滤子查询的分号
1045
- query = query.rstrip().rstrip(';')
1046
- wrapped_query = f"""
1047
- SELECT json_agg(row_to_json(t))
1048
- FROM ({query}) AS t
1049
- """
1050
- cursor.execute(wrapped_query)
1051
-
1116
+ cursor.execute(query)
1117
+
1052
1118
  if name == "analyze_table":
1053
1119
  return [TextContent(type="text", text=f"Successfully analyzed table {schema}.{table}")]
1054
1120
 
@@ -1057,7 +1123,6 @@ async def call_tool(name: str, arguments: dict) -> list[TextContent]:
1057
1123
  json_result = cursor.fetchone()[0]
1058
1124
  json_str = json.dumps(json_result, ensure_ascii = False, indent = 2)
1059
1125
  result = [TextContent(type="text", text=json_str)]
1060
-
1061
1126
  try:
1062
1127
  json.loads(result[0].text)
1063
1128
  except json.JSONDecodeError as e:
@@ -1,6 +0,0 @@
1
- adbpg_mcp_server.py,sha256=dFuiaubyCP0_KJddz2mTxzDTxQmOCGCrinGqi8DxG6I,42882
2
- adbpg_mcp_server-1.0.6.dist-info/METADATA,sha256=lgLvhiuNHXR8oUeH_igvQsqOodrwiyGO9JbSXPnBxBc,275
3
- adbpg_mcp_server-1.0.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
4
- adbpg_mcp_server-1.0.6.dist-info/entry_points.txt,sha256=n2NpLR8WNfa3Edju2l7Ngnsp7EiCUqNdYkF0dg42dnQ,58
5
- adbpg_mcp_server-1.0.6.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
6
- adbpg_mcp_server-1.0.6.dist-info/RECORD,,