pomera-ai-commander 1.2.8 → 1.2.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +113 -89
- package/core/database_schema.py +24 -1
- package/core/database_schema_manager.py +4 -2
- package/core/database_settings_manager.py +25 -2
- package/core/dialog_manager.py +4 -4
- package/core/efficient_line_numbers.py +5 -4
- package/core/load_presets_dialog.py +460 -0
- package/core/mcp/tool_registry.py +327 -0
- package/core/settings_defaults_registry.py +159 -15
- package/mcp.json +1 -1
- package/package.json +2 -1
- package/pomera.py +755 -22
- package/tools/case_tool.py +4 -4
- package/tools/curl_settings.py +12 -1
- package/tools/curl_tool.py +176 -11
- package/tools/tool_loader.py +18 -0
- package/tools/url_content_reader.py +402 -0
- package/tools/web_search.py +522 -0
|
@@ -202,6 +202,10 @@ class ToolRegistry:
|
|
|
202
202
|
# Find & Replace Diff Tool (Phase 6) - regex find/replace with diff preview and Notes backup
|
|
203
203
|
self._register_find_replace_diff_tool()
|
|
204
204
|
|
|
205
|
+
# Web Search and URL Reader Tools (Phase 7)
|
|
206
|
+
self._register_web_search_tool()
|
|
207
|
+
self._register_read_url_tool()
|
|
208
|
+
|
|
205
209
|
self._logger.info(f"Registered {len(self._tools)} built-in MCP tools")
|
|
206
210
|
|
|
207
211
|
|
|
@@ -2711,6 +2715,329 @@ class ToolRegistry:
|
|
|
2711
2715
|
registry._logger.warning(f"Failed to get note: {e}")
|
|
2712
2716
|
return None
|
|
2713
2717
|
return get_note
|
|
2718
|
+
|
|
2719
|
+
def _register_web_search_tool(self) -> None:
|
|
2720
|
+
"""Register the Web Search Tool."""
|
|
2721
|
+
self.register(MCPToolAdapter(
|
|
2722
|
+
name="pomera_web_search",
|
|
2723
|
+
description="Search the web using multiple engines. Engines: tavily (AI-optimized, recommended), "
|
|
2724
|
+
"google (100/day free), brave (2000/month free), duckduckgo (free, no key), "
|
|
2725
|
+
"serpapi (100 total free), serper (2500 total free).",
|
|
2726
|
+
input_schema={
|
|
2727
|
+
"type": "object",
|
|
2728
|
+
"properties": {
|
|
2729
|
+
"query": {
|
|
2730
|
+
"type": "string",
|
|
2731
|
+
"description": "Search query"
|
|
2732
|
+
},
|
|
2733
|
+
"engine": {
|
|
2734
|
+
"type": "string",
|
|
2735
|
+
"enum": ["tavily", "google", "brave", "duckduckgo", "serpapi", "serper"],
|
|
2736
|
+
"description": "Search engine to use",
|
|
2737
|
+
"default": "tavily"
|
|
2738
|
+
},
|
|
2739
|
+
"count": {
|
|
2740
|
+
"type": "integer",
|
|
2741
|
+
"description": "Number of results (1-20)",
|
|
2742
|
+
"default": 5,
|
|
2743
|
+
"minimum": 1,
|
|
2744
|
+
"maximum": 20
|
|
2745
|
+
}
|
|
2746
|
+
},
|
|
2747
|
+
"required": ["query"]
|
|
2748
|
+
},
|
|
2749
|
+
handler=self._handle_web_search
|
|
2750
|
+
))
|
|
2751
|
+
|
|
2752
|
+
def _handle_web_search(self, args: Dict[str, Any]) -> str:
|
|
2753
|
+
"""Handle web search tool execution using encrypted API keys from database settings."""
|
|
2754
|
+
import json
|
|
2755
|
+
import urllib.request
|
|
2756
|
+
import urllib.parse
|
|
2757
|
+
|
|
2758
|
+
query = args.get("query", "").strip()
|
|
2759
|
+
engine = args.get("engine", "duckduckgo").lower()
|
|
2760
|
+
count = args.get("count", 5)
|
|
2761
|
+
|
|
2762
|
+
# Validate inputs
|
|
2763
|
+
if not query:
|
|
2764
|
+
return json.dumps({"success": False, "error": "Query is required"})
|
|
2765
|
+
|
|
2766
|
+
valid_engines = ["duckduckgo", "tavily", "google", "brave", "serpapi", "serper"]
|
|
2767
|
+
if engine not in valid_engines:
|
|
2768
|
+
return json.dumps({
|
|
2769
|
+
"success": False,
|
|
2770
|
+
"error": f"Invalid engine: '{engine}'. Valid engines: {', '.join(valid_engines)}"
|
|
2771
|
+
})
|
|
2772
|
+
|
|
2773
|
+
try:
|
|
2774
|
+
# Get API key from encrypted database settings
|
|
2775
|
+
api_key = self._get_encrypted_web_search_api_key(engine)
|
|
2776
|
+
cse_id = self._get_web_search_setting(engine, "cse_id", "")
|
|
2777
|
+
|
|
2778
|
+
# Execute search based on engine
|
|
2779
|
+
if engine == "duckduckgo":
|
|
2780
|
+
results = self._mcp_search_duckduckgo(query, count)
|
|
2781
|
+
elif engine == "tavily":
|
|
2782
|
+
if not api_key:
|
|
2783
|
+
return json.dumps({"success": False, "error": "Tavily API key required. Configure in Web Search settings."})
|
|
2784
|
+
results = self._mcp_search_tavily(query, count, api_key)
|
|
2785
|
+
elif engine == "google":
|
|
2786
|
+
if not api_key or not cse_id:
|
|
2787
|
+
return json.dumps({"success": False, "error": "Google API key and CSE ID required. Configure in Web Search settings."})
|
|
2788
|
+
results = self._mcp_search_google(query, count, api_key, cse_id)
|
|
2789
|
+
elif engine == "brave":
|
|
2790
|
+
if not api_key:
|
|
2791
|
+
return json.dumps({"success": False, "error": "Brave API key required. Configure in Web Search settings."})
|
|
2792
|
+
results = self._mcp_search_brave(query, count, api_key)
|
|
2793
|
+
elif engine == "serpapi":
|
|
2794
|
+
if not api_key:
|
|
2795
|
+
return json.dumps({"success": False, "error": "SerpApi key required. Configure in Web Search settings."})
|
|
2796
|
+
results = self._mcp_search_serpapi(query, count, api_key)
|
|
2797
|
+
elif engine == "serper":
|
|
2798
|
+
if not api_key:
|
|
2799
|
+
return json.dumps({"success": False, "error": "Serper API key required. Configure in Web Search settings."})
|
|
2800
|
+
results = self._mcp_search_serper(query, count, api_key)
|
|
2801
|
+
else:
|
|
2802
|
+
return json.dumps({"success": False, "error": f"Unknown engine: {engine}"})
|
|
2803
|
+
|
|
2804
|
+
output = {
|
|
2805
|
+
"success": True,
|
|
2806
|
+
"query": query,
|
|
2807
|
+
"engine": engine,
|
|
2808
|
+
"count": len(results),
|
|
2809
|
+
"results": results
|
|
2810
|
+
}
|
|
2811
|
+
return json.dumps(output, indent=2, ensure_ascii=False)
|
|
2812
|
+
except Exception as e:
|
|
2813
|
+
return json.dumps({"success": False, "error": str(e)})
|
|
2814
|
+
|
|
2815
|
+
def _get_encrypted_web_search_api_key(self, engine_key: str) -> str:
|
|
2816
|
+
"""Load encrypted API key for a search engine from database settings.
|
|
2817
|
+
|
|
2818
|
+
Uses the same database path as the Pomera UI to ensure keys are loaded
|
|
2819
|
+
from the correct location.
|
|
2820
|
+
"""
|
|
2821
|
+
try:
|
|
2822
|
+
from tools.ai_tools import decrypt_api_key
|
|
2823
|
+
from core.database_settings_manager import DatabaseSettingsManager
|
|
2824
|
+
|
|
2825
|
+
# Get the correct database path (same as UI uses)
|
|
2826
|
+
try:
|
|
2827
|
+
from core.data_directory import get_database_path
|
|
2828
|
+
db_path = get_database_path("settings.db")
|
|
2829
|
+
except ImportError:
|
|
2830
|
+
# Fallback to relative path
|
|
2831
|
+
db_path = "settings.db"
|
|
2832
|
+
|
|
2833
|
+
settings_manager = DatabaseSettingsManager(db_path=db_path)
|
|
2834
|
+
web_search_settings = settings_manager.get_tool_settings("Web Search")
|
|
2835
|
+
|
|
2836
|
+
encrypted = web_search_settings.get(f"{engine_key}_api_key", "")
|
|
2837
|
+
if encrypted:
|
|
2838
|
+
return decrypt_api_key(encrypted)
|
|
2839
|
+
except Exception as e:
|
|
2840
|
+
self._logger.warning(f"Failed to load API key for {engine_key}: {e}")
|
|
2841
|
+
return ""
|
|
2842
|
+
|
|
2843
|
+
def _get_web_search_setting(self, engine_key: str, setting: str, default: str) -> str:
|
|
2844
|
+
"""Get a web search setting from database.
|
|
2845
|
+
|
|
2846
|
+
Uses the same database path as the Pomera UI.
|
|
2847
|
+
"""
|
|
2848
|
+
try:
|
|
2849
|
+
from core.database_settings_manager import DatabaseSettingsManager
|
|
2850
|
+
|
|
2851
|
+
# Get the correct database path (same as UI uses)
|
|
2852
|
+
try:
|
|
2853
|
+
from core.data_directory import get_database_path
|
|
2854
|
+
db_path = get_database_path("settings.db")
|
|
2855
|
+
except ImportError:
|
|
2856
|
+
db_path = "settings.db"
|
|
2857
|
+
|
|
2858
|
+
settings_manager = DatabaseSettingsManager(db_path=db_path)
|
|
2859
|
+
web_search_settings = settings_manager.get_tool_settings("Web Search")
|
|
2860
|
+
|
|
2861
|
+
return web_search_settings.get(f"{engine_key}_{setting}", default)
|
|
2862
|
+
except Exception:
|
|
2863
|
+
return default
|
|
2864
|
+
|
|
2865
|
+
def _mcp_search_duckduckgo(self, query: str, count: int) -> list:
|
|
2866
|
+
"""Search DuckDuckGo (free, no API key)."""
|
|
2867
|
+
try:
|
|
2868
|
+
from ddgs import DDGS
|
|
2869
|
+
except ImportError:
|
|
2870
|
+
return [{"title": "Error", "snippet": "DuckDuckGo requires: pip install ddgs", "url": ""}]
|
|
2871
|
+
|
|
2872
|
+
try:
|
|
2873
|
+
with DDGS() as ddgs:
|
|
2874
|
+
results = []
|
|
2875
|
+
for r in ddgs.text(query, max_results=count):
|
|
2876
|
+
results.append({
|
|
2877
|
+
"title": r.get("title", ""),
|
|
2878
|
+
"snippet": r.get("body", ""),
|
|
2879
|
+
"url": r.get("href", ""),
|
|
2880
|
+
"source": "duckduckgo"
|
|
2881
|
+
})
|
|
2882
|
+
return results
|
|
2883
|
+
except Exception as e:
|
|
2884
|
+
return [{"title": "Error", "snippet": str(e), "url": ""}]
|
|
2885
|
+
|
|
2886
|
+
def _mcp_search_tavily(self, query: str, count: int, api_key: str) -> list:
|
|
2887
|
+
"""Search using Tavily API."""
|
|
2888
|
+
import urllib.request
|
|
2889
|
+
import json
|
|
2890
|
+
|
|
2891
|
+
url = "https://api.tavily.com/search"
|
|
2892
|
+
data = json.dumps({
|
|
2893
|
+
"api_key": api_key,
|
|
2894
|
+
"query": query,
|
|
2895
|
+
"search_depth": "basic",
|
|
2896
|
+
"max_results": count
|
|
2897
|
+
}).encode()
|
|
2898
|
+
|
|
2899
|
+
req = urllib.request.Request(url, data=data, headers={"Content-Type": "application/json"})
|
|
2900
|
+
with urllib.request.urlopen(req, timeout=30) as response:
|
|
2901
|
+
result = json.loads(response.read().decode())
|
|
2902
|
+
|
|
2903
|
+
return [
|
|
2904
|
+
{"title": item.get("title", ""), "snippet": item.get("content", ""),
|
|
2905
|
+
"url": item.get("url", ""), "source": "tavily"}
|
|
2906
|
+
for item in result.get("results", [])
|
|
2907
|
+
]
|
|
2908
|
+
|
|
2909
|
+
def _mcp_search_google(self, query: str, count: int, api_key: str, cse_id: str) -> list:
|
|
2910
|
+
"""Search using Google Custom Search API."""
|
|
2911
|
+
import urllib.request
|
|
2912
|
+
import urllib.parse
|
|
2913
|
+
import json
|
|
2914
|
+
|
|
2915
|
+
url = f"https://www.googleapis.com/customsearch/v1?key={api_key}&cx={cse_id}&q={urllib.parse.quote(query)}&num={min(count, 10)}"
|
|
2916
|
+
|
|
2917
|
+
req = urllib.request.Request(url)
|
|
2918
|
+
with urllib.request.urlopen(req, timeout=30) as response:
|
|
2919
|
+
data = json.loads(response.read().decode())
|
|
2920
|
+
|
|
2921
|
+
return [
|
|
2922
|
+
{"title": item.get("title", ""), "snippet": item.get("snippet", ""),
|
|
2923
|
+
"url": item.get("link", ""), "source": "google"}
|
|
2924
|
+
for item in data.get("items", [])
|
|
2925
|
+
]
|
|
2926
|
+
|
|
2927
|
+
def _mcp_search_brave(self, query: str, count: int, api_key: str) -> list:
|
|
2928
|
+
"""Search using Brave Search API."""
|
|
2929
|
+
import urllib.request
|
|
2930
|
+
import urllib.parse
|
|
2931
|
+
import json
|
|
2932
|
+
|
|
2933
|
+
url = f"https://api.search.brave.com/res/v1/web/search?q={urllib.parse.quote(query)}&count={min(count, 20)}"
|
|
2934
|
+
|
|
2935
|
+
req = urllib.request.Request(url, headers={
|
|
2936
|
+
"Accept": "application/json",
|
|
2937
|
+
"X-Subscription-Token": api_key
|
|
2938
|
+
})
|
|
2939
|
+
with urllib.request.urlopen(req, timeout=30) as response:
|
|
2940
|
+
data = json.loads(response.read().decode())
|
|
2941
|
+
|
|
2942
|
+
return [
|
|
2943
|
+
{"title": item.get("title", ""), "snippet": item.get("description", ""),
|
|
2944
|
+
"url": item.get("url", ""), "source": "brave"}
|
|
2945
|
+
for item in data.get("web", {}).get("results", [])
|
|
2946
|
+
]
|
|
2947
|
+
|
|
2948
|
+
def _mcp_search_serpapi(self, query: str, count: int, api_key: str) -> list:
|
|
2949
|
+
"""Search using SerpApi."""
|
|
2950
|
+
import urllib.request
|
|
2951
|
+
import urllib.parse
|
|
2952
|
+
import json
|
|
2953
|
+
|
|
2954
|
+
url = f"https://serpapi.com/search?q={urllib.parse.quote(query)}&api_key={api_key}&num={min(count, 10)}"
|
|
2955
|
+
|
|
2956
|
+
req = urllib.request.Request(url)
|
|
2957
|
+
with urllib.request.urlopen(req, timeout=30) as response:
|
|
2958
|
+
data = json.loads(response.read().decode())
|
|
2959
|
+
|
|
2960
|
+
return [
|
|
2961
|
+
{"title": item.get("title", ""), "snippet": item.get("snippet", ""),
|
|
2962
|
+
"url": item.get("link", ""), "source": "serpapi"}
|
|
2963
|
+
for item in data.get("organic_results", [])[:count]
|
|
2964
|
+
]
|
|
2965
|
+
|
|
2966
|
+
def _mcp_search_serper(self, query: str, count: int, api_key: str) -> list:
|
|
2967
|
+
"""Search using Serper.dev."""
|
|
2968
|
+
import urllib.request
|
|
2969
|
+
import json
|
|
2970
|
+
|
|
2971
|
+
url = "https://google.serper.dev/search"
|
|
2972
|
+
data = json.dumps({"q": query, "num": min(count, 10)}).encode()
|
|
2973
|
+
|
|
2974
|
+
req = urllib.request.Request(url, data=data, headers={
|
|
2975
|
+
"X-API-KEY": api_key,
|
|
2976
|
+
"Content-Type": "application/json"
|
|
2977
|
+
})
|
|
2978
|
+
with urllib.request.urlopen(req, timeout=30) as response:
|
|
2979
|
+
result = json.loads(response.read().decode())
|
|
2980
|
+
|
|
2981
|
+
return [
|
|
2982
|
+
{"title": item.get("title", ""), "snippet": item.get("snippet", ""),
|
|
2983
|
+
"url": item.get("link", ""), "source": "serper"}
|
|
2984
|
+
for item in result.get("organic", [])
|
|
2985
|
+
]
|
|
2986
|
+
|
|
2987
|
+
def _register_read_url_tool(self) -> None:
|
|
2988
|
+
"""Register the URL Content Reader Tool."""
|
|
2989
|
+
self.register(MCPToolAdapter(
|
|
2990
|
+
name="pomera_read_url",
|
|
2991
|
+
description="Fetch URL content and convert HTML to Markdown. "
|
|
2992
|
+
"Extracts main content area and outputs clean markdown format.",
|
|
2993
|
+
input_schema={
|
|
2994
|
+
"type": "object",
|
|
2995
|
+
"properties": {
|
|
2996
|
+
"url": {
|
|
2997
|
+
"type": "string",
|
|
2998
|
+
"description": "URL to fetch"
|
|
2999
|
+
},
|
|
3000
|
+
"timeout": {
|
|
3001
|
+
"type": "integer",
|
|
3002
|
+
"description": "Request timeout in seconds",
|
|
3003
|
+
"default": 30,
|
|
3004
|
+
"minimum": 5,
|
|
3005
|
+
"maximum": 120
|
|
3006
|
+
},
|
|
3007
|
+
"extract_main_content": {
|
|
3008
|
+
"type": "boolean",
|
|
3009
|
+
"description": "Try to extract main content area only",
|
|
3010
|
+
"default": True
|
|
3011
|
+
}
|
|
3012
|
+
},
|
|
3013
|
+
"required": ["url"]
|
|
3014
|
+
},
|
|
3015
|
+
handler=self._handle_read_url
|
|
3016
|
+
))
|
|
3017
|
+
|
|
3018
|
+
def _handle_read_url(self, args: Dict[str, Any]) -> str:
|
|
3019
|
+
"""Handle URL content reader tool execution."""
|
|
3020
|
+
from tools.url_content_reader import URLContentReader
|
|
3021
|
+
import json
|
|
3022
|
+
|
|
3023
|
+
url = args.get("url", "")
|
|
3024
|
+
timeout = args.get("timeout", 30)
|
|
3025
|
+
extract_main = args.get("extract_main_content", True)
|
|
3026
|
+
|
|
3027
|
+
if not url:
|
|
3028
|
+
return json.dumps({"success": False, "error": "URL is required"})
|
|
3029
|
+
|
|
3030
|
+
try:
|
|
3031
|
+
reader = URLContentReader()
|
|
3032
|
+
markdown = reader.fetch_and_convert(url, timeout=timeout, extract_main_content=extract_main)
|
|
3033
|
+
return json.dumps({
|
|
3034
|
+
"success": True,
|
|
3035
|
+
"url": url,
|
|
3036
|
+
"markdown": markdown,
|
|
3037
|
+
"length": len(markdown)
|
|
3038
|
+
}, ensure_ascii=False)
|
|
3039
|
+
except Exception as e:
|
|
3040
|
+
return json.dumps({"success": False, "error": f"Error fetching URL: {str(e)}"})
|
|
2714
3041
|
|
|
2715
3042
|
|
|
2716
3043
|
# Singleton instance for convenience
|
|
@@ -5,6 +5,8 @@ Centralizes all tool default settings into a single registry system for consiste
|
|
|
5
5
|
first-launch initialization. Provides schema validation, deep merge capability,
|
|
6
6
|
and backward compatibility with existing tools.
|
|
7
7
|
|
|
8
|
+
Supports loading custom defaults from external defaults.json file.
|
|
9
|
+
|
|
8
10
|
Author: Pomera AI Commander Team
|
|
9
11
|
"""
|
|
10
12
|
|
|
@@ -12,6 +14,8 @@ from dataclasses import dataclass, field
|
|
|
12
14
|
from typing import Any, Dict, List, Optional, Callable, Set, Tuple
|
|
13
15
|
from copy import deepcopy
|
|
14
16
|
import logging
|
|
17
|
+
import json
|
|
18
|
+
import os
|
|
15
19
|
|
|
16
20
|
|
|
17
21
|
@dataclass
|
|
@@ -47,6 +51,7 @@ class SettingsDefaultsRegistry:
|
|
|
47
51
|
- Schema validation for tool settings
|
|
48
52
|
- Deep merge capability for user settings with defaults
|
|
49
53
|
- Backward compatibility with existing tools
|
|
54
|
+
- External defaults.json file support for user customization
|
|
50
55
|
"""
|
|
51
56
|
|
|
52
57
|
_instance: Optional['SettingsDefaultsRegistry'] = None
|
|
@@ -67,9 +72,103 @@ class SettingsDefaultsRegistry:
|
|
|
67
72
|
self._tool_specs: Dict[str, ToolDefaultsSpec] = {}
|
|
68
73
|
self._app_defaults: Dict[str, Any] = {}
|
|
69
74
|
self._initialized = True
|
|
75
|
+
self._json_defaults_loaded = False
|
|
70
76
|
|
|
71
77
|
# Register all built-in tool defaults
|
|
72
78
|
self._register_builtin_defaults()
|
|
79
|
+
|
|
80
|
+
# Load custom defaults from external JSON file (overrides builtins)
|
|
81
|
+
self._load_from_json_file()
|
|
82
|
+
|
|
83
|
+
def _load_from_json_file(self) -> bool:
|
|
84
|
+
"""
|
|
85
|
+
Load custom defaults from external defaults.json file.
|
|
86
|
+
|
|
87
|
+
This allows users to customize tool defaults without modifying Python code.
|
|
88
|
+
The JSON file overrides built-in defaults for matching tool names.
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
True if JSON was loaded successfully, False otherwise.
|
|
92
|
+
"""
|
|
93
|
+
try:
|
|
94
|
+
# Try multiple locations for defaults.json
|
|
95
|
+
possible_paths = [
|
|
96
|
+
os.path.join(os.path.dirname(__file__), "..", "defaults.json"), # app root
|
|
97
|
+
os.path.join(os.path.dirname(__file__), "defaults.json"), # core directory
|
|
98
|
+
]
|
|
99
|
+
|
|
100
|
+
json_path = None
|
|
101
|
+
for path in possible_paths:
|
|
102
|
+
if os.path.exists(path):
|
|
103
|
+
json_path = os.path.abspath(path)
|
|
104
|
+
break
|
|
105
|
+
|
|
106
|
+
if not json_path:
|
|
107
|
+
self.logger.debug("No external defaults.json found - using built-in defaults")
|
|
108
|
+
return False
|
|
109
|
+
|
|
110
|
+
with open(json_path, 'r', encoding='utf-8') as f:
|
|
111
|
+
data = json.load(f)
|
|
112
|
+
|
|
113
|
+
tools_data = data.get("tools", {})
|
|
114
|
+
|
|
115
|
+
# Override defaults for matching tools
|
|
116
|
+
for tool_name, custom_defaults in tools_data.items():
|
|
117
|
+
# Skip metadata keys
|
|
118
|
+
if tool_name.startswith("_"):
|
|
119
|
+
continue
|
|
120
|
+
|
|
121
|
+
if tool_name in self._tool_specs:
|
|
122
|
+
# Merge custom defaults with existing (custom takes precedence)
|
|
123
|
+
existing = self._tool_specs[tool_name]
|
|
124
|
+
merged_defaults = deepcopy(existing.defaults)
|
|
125
|
+
for key, value in custom_defaults.items():
|
|
126
|
+
if not key.startswith("_"): # Skip metadata keys like _note
|
|
127
|
+
merged_defaults[key] = value
|
|
128
|
+
|
|
129
|
+
# Update the spec with merged defaults
|
|
130
|
+
self._tool_specs[tool_name] = ToolDefaultsSpec(
|
|
131
|
+
tool_name=existing.tool_name,
|
|
132
|
+
defaults=merged_defaults,
|
|
133
|
+
required_keys=existing.required_keys,
|
|
134
|
+
description=existing.description,
|
|
135
|
+
version=existing.version
|
|
136
|
+
)
|
|
137
|
+
self.logger.debug(f"Loaded custom defaults for '{tool_name}' from JSON")
|
|
138
|
+
else:
|
|
139
|
+
# New tool not in builtins - create new spec
|
|
140
|
+
self._tool_specs[tool_name] = ToolDefaultsSpec(
|
|
141
|
+
tool_name=tool_name,
|
|
142
|
+
defaults={k: v for k, v in custom_defaults.items() if not k.startswith("_")},
|
|
143
|
+
description=f"Custom tool from defaults.json"
|
|
144
|
+
)
|
|
145
|
+
self.logger.debug(f"Added new tool '{tool_name}' from JSON")
|
|
146
|
+
|
|
147
|
+
self._json_defaults_loaded = True
|
|
148
|
+
self.logger.info(f"Loaded custom defaults from: {json_path}")
|
|
149
|
+
return True
|
|
150
|
+
|
|
151
|
+
except json.JSONDecodeError as e:
|
|
152
|
+
self.logger.error(f"Invalid JSON in defaults.json: {e}")
|
|
153
|
+
return False
|
|
154
|
+
except Exception as e:
|
|
155
|
+
self.logger.warning(f"Could not load defaults.json: {e}")
|
|
156
|
+
return False
|
|
157
|
+
|
|
158
|
+
def get_json_defaults_path(self) -> Optional[str]:
|
|
159
|
+
"""Get the path to the defaults.json file if it exists."""
|
|
160
|
+
possible_paths = [
|
|
161
|
+
os.path.join(os.path.dirname(__file__), "..", "defaults.json"),
|
|
162
|
+
os.path.join(os.path.dirname(__file__), "defaults.json"),
|
|
163
|
+
]
|
|
164
|
+
for path in possible_paths:
|
|
165
|
+
if os.path.exists(path):
|
|
166
|
+
return os.path.abspath(path)
|
|
167
|
+
return None
|
|
168
|
+
|
|
169
|
+
def is_json_defaults_loaded(self) -> bool:
|
|
170
|
+
"""Check if external defaults.json was loaded."""
|
|
171
|
+
return self._json_defaults_loaded
|
|
73
172
|
|
|
74
173
|
def _register_builtin_defaults(self) -> None:
|
|
75
174
|
"""Register all built-in tool default settings."""
|
|
@@ -406,21 +505,25 @@ class SettingsDefaultsRegistry:
|
|
|
406
505
|
description="Cohere AI integration"
|
|
407
506
|
))
|
|
408
507
|
|
|
409
|
-
# HuggingFace AI - Updated
|
|
410
|
-
#
|
|
508
|
+
# HuggingFace AI - Updated January 2026
|
|
509
|
+
# Free inference API - most popular open models
|
|
411
510
|
self.register_tool(ToolDefaultsSpec(
|
|
412
511
|
tool_name="HuggingFace AI",
|
|
413
512
|
defaults={
|
|
414
513
|
"API_KEY": "putinyourkey",
|
|
415
514
|
"MODEL": "meta-llama/Llama-3.3-70B-Instruct",
|
|
416
515
|
"MODELS_LIST": [
|
|
516
|
+
# Most popular free inference models
|
|
417
517
|
"meta-llama/Llama-3.3-70B-Instruct",
|
|
418
518
|
"meta-llama/Meta-Llama-3.1-70B-Instruct",
|
|
419
519
|
"meta-llama/Meta-Llama-3.1-8B-Instruct",
|
|
420
|
-
"mistralai/Mistral-Small-3-Instruct",
|
|
421
|
-
"mistralai/Mistral-7B-Instruct-v0.3",
|
|
422
520
|
"Qwen/Qwen2.5-72B-Instruct",
|
|
423
|
-
"
|
|
521
|
+
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
522
|
+
"mistralai/Mistral-Small-3.1-24B-Instruct-2503",
|
|
523
|
+
"mistralai/Mistral-7B-Instruct-v0.3",
|
|
524
|
+
"deepseek-ai/DeepSeek-V3",
|
|
525
|
+
"google/gemma-2-27b-it",
|
|
526
|
+
"microsoft/Phi-3.5-mini-instruct"
|
|
424
527
|
],
|
|
425
528
|
"system_prompt": "You are a helpful assistant.",
|
|
426
529
|
"max_tokens": 4096,
|
|
@@ -430,7 +533,7 @@ class SettingsDefaultsRegistry:
|
|
|
430
533
|
"seed": ""
|
|
431
534
|
},
|
|
432
535
|
required_keys={"API_KEY", "MODEL"},
|
|
433
|
-
description="HuggingFace AI
|
|
536
|
+
description="HuggingFace AI free inference (popular open models)"
|
|
434
537
|
))
|
|
435
538
|
|
|
436
539
|
# Groq AI - Updated December 2025
|
|
@@ -462,23 +565,31 @@ class SettingsDefaultsRegistry:
|
|
|
462
565
|
description="Groq AI integration"
|
|
463
566
|
))
|
|
464
567
|
|
|
465
|
-
# OpenRouterAI - Updated
|
|
466
|
-
# Latest: Claude Opus 4.5, GPT-4.1, Gemini 2.5, DeepSeek
|
|
568
|
+
# OpenRouterAI - Updated January 2026
|
|
569
|
+
# Latest: Claude Opus 4.5, GPT-4.1, Gemini 2.5, DeepSeek V3
|
|
570
|
+
# Includes FREE tier models marked with :free suffix
|
|
467
571
|
self.register_tool(ToolDefaultsSpec(
|
|
468
572
|
tool_name="OpenRouterAI",
|
|
469
573
|
defaults={
|
|
470
574
|
"API_KEY": "putinyourkey",
|
|
471
575
|
"MODEL": "anthropic/claude-sonnet-4.5",
|
|
472
576
|
"MODELS_LIST": [
|
|
577
|
+
# Premium models
|
|
473
578
|
"anthropic/claude-sonnet-4.5",
|
|
474
579
|
"anthropic/claude-opus-4.5",
|
|
475
|
-
"openai/gpt-
|
|
580
|
+
"openai/gpt-4o",
|
|
476
581
|
"google/gemini-2.5-pro",
|
|
477
582
|
"google/gemini-2.5-flash",
|
|
478
583
|
"deepseek/deepseek-chat",
|
|
479
584
|
"meta-llama/llama-3.3-70b-instruct",
|
|
585
|
+
# FREE models (no cost)
|
|
586
|
+
"google/gemini-2.5-flash:free",
|
|
480
587
|
"google/gemini-2.0-flash:free",
|
|
481
|
-
"meta-llama/llama-3.
|
|
588
|
+
"meta-llama/llama-3.3-70b-instruct:free",
|
|
589
|
+
"meta-llama/llama-3.1-8b-instruct:free",
|
|
590
|
+
"qwen/qwen-2.5-72b-instruct:free",
|
|
591
|
+
"mistralai/mistral-7b-instruct:free",
|
|
592
|
+
"deepseek/deepseek-chat:free"
|
|
482
593
|
],
|
|
483
594
|
"system_prompt": "You are a helpful assistant.",
|
|
484
595
|
"temperature": 0.7,
|
|
@@ -492,7 +603,34 @@ class SettingsDefaultsRegistry:
|
|
|
492
603
|
"stop": ""
|
|
493
604
|
},
|
|
494
605
|
required_keys={"API_KEY", "MODEL"},
|
|
495
|
-
description="OpenRouter AI integration"
|
|
606
|
+
description="OpenRouter AI integration (includes free models)"
|
|
607
|
+
))
|
|
608
|
+
|
|
609
|
+
# Studio LM - Local LLM support via LM Studio or Ollama (January 2026)
|
|
610
|
+
# Default endpoint is LM Studio (port 1234), Ollama uses port 11434
|
|
611
|
+
self.register_tool(ToolDefaultsSpec(
|
|
612
|
+
tool_name="Studio LM",
|
|
613
|
+
defaults={
|
|
614
|
+
"API_KEY": "", # Often not required for local
|
|
615
|
+
"MODEL": "local-model",
|
|
616
|
+
"MODELS_LIST": [
|
|
617
|
+
"local-model",
|
|
618
|
+
"llama-3.3-70b",
|
|
619
|
+
"qwen2.5-72b",
|
|
620
|
+
"mistral-7b",
|
|
621
|
+
"phi-3.5-mini",
|
|
622
|
+
"gemma-2-9b",
|
|
623
|
+
"deepseek-coder-v2"
|
|
624
|
+
],
|
|
625
|
+
"ENDPOINT": "http://127.0.0.1:1234/v1", # LM Studio default
|
|
626
|
+
"system_prompt": "You are a helpful assistant.",
|
|
627
|
+
"temperature": 0.7,
|
|
628
|
+
"max_tokens": 4096,
|
|
629
|
+
"top_p": 0.95,
|
|
630
|
+
"stream": True
|
|
631
|
+
},
|
|
632
|
+
required_keys={"ENDPOINT", "MODEL"},
|
|
633
|
+
description="Local LLM via LM Studio or Ollama (OpenAI-compatible API)"
|
|
496
634
|
))
|
|
497
635
|
|
|
498
636
|
# AWS Bedrock - Updated December 2025
|
|
@@ -913,15 +1051,21 @@ class SettingsDefaultsRegistry:
|
|
|
913
1051
|
"""
|
|
914
1052
|
Get complete default settings including app-level and all tool defaults.
|
|
915
1053
|
|
|
1054
|
+
NOTE: This method intentionally EXCLUDES input_tabs and output_tabs.
|
|
1055
|
+
Tab content is user data that must be loaded from persistent storage,
|
|
1056
|
+
not overwritten with empty defaults. Including tabs here caused a
|
|
1057
|
+
regression where saved tab content was lost on app restart.
|
|
1058
|
+
|
|
916
1059
|
Args:
|
|
917
|
-
tab_count: Number of tabs for input/output (default 7)
|
|
1060
|
+
tab_count: Number of tabs for input/output (default 7, unused but kept for API compatibility)
|
|
918
1061
|
|
|
919
1062
|
Returns:
|
|
920
|
-
Complete default settings dictionary
|
|
1063
|
+
Complete default settings dictionary (excludes tab content)
|
|
921
1064
|
"""
|
|
922
1065
|
defaults = deepcopy(self._app_defaults)
|
|
923
|
-
|
|
924
|
-
|
|
1066
|
+
# NOTE: input_tabs and output_tabs are intentionally NOT included here.
|
|
1067
|
+
# They are user data stored in the database, not default configurations.
|
|
1068
|
+
# Adding them here would overwrite saved user content with empty arrays.
|
|
925
1069
|
defaults["tool_settings"] = self.get_all_tool_defaults()
|
|
926
1070
|
return defaults
|
|
927
1071
|
|
package/mcp.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "pomera-ai-commander",
|
|
3
|
-
"version": "1.2.
|
|
3
|
+
"version": "1.2.10",
|
|
4
4
|
"description": "Text processing toolkit with 22 MCP tools including case transformation, encoding, hashing, text analysis, and notes management for AI assistants.",
|
|
5
5
|
"homepage": "https://github.com/matbanik/Pomera-AI-Commander",
|
|
6
6
|
"repository": "https://github.com/matbanik/Pomera-AI-Commander",
|
package/package.json
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "pomera-ai-commander",
|
|
3
|
-
"
|
|
3
|
+
"mcpName": "io.github.matbanik/pomera",
|
|
4
|
+
"version": "1.2.10",
|
|
4
5
|
"description": "Text processing toolkit with 22 MCP tools for AI assistants - case transformation, encoding, hashing, text analysis, and notes management",
|
|
5
6
|
"main": "pomera_mcp_server.py",
|
|
6
7
|
"bin": {
|