sigma-terminal 3.4.1__py3-none-any.whl → 3.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sigma/__init__.py +4 -5
- sigma/analytics/__init__.py +11 -9
- sigma/app.py +384 -1194
- sigma/backtest/__init__.py +2 -0
- sigma/backtest/service.py +116 -0
- sigma/charts.py +2 -2
- sigma/cli.py +14 -12
- sigma/comparison.py +2 -2
- sigma/config.py +14 -4
- sigma/core/command_router.py +93 -0
- sigma/llm/__init__.py +3 -0
- sigma/llm/providers/anthropic_provider.py +196 -0
- sigma/llm/providers/base.py +29 -0
- sigma/llm/providers/google_provider.py +197 -0
- sigma/llm/providers/ollama_provider.py +156 -0
- sigma/llm/providers/openai_provider.py +168 -0
- sigma/llm/providers/sigma_cloud_provider.py +57 -0
- sigma/llm/rate_limit.py +40 -0
- sigma/llm/registry.py +66 -0
- sigma/llm/router.py +122 -0
- sigma/setup_agent.py +188 -0
- sigma/tools/__init__.py +23 -0
- sigma/tools/adapter.py +38 -0
- sigma/{tools.py → tools/library.py} +2 -1
- sigma/tools/registry.py +108 -0
- sigma/utils/extraction.py +83 -0
- sigma_terminal-3.5.0.dist-info/METADATA +184 -0
- sigma_terminal-3.5.0.dist-info/RECORD +46 -0
- sigma/llm.py +0 -786
- sigma/setup.py +0 -440
- sigma_terminal-3.4.1.dist-info/METADATA +0 -272
- sigma_terminal-3.4.1.dist-info/RECORD +0 -30
- /sigma/{backtest.py → backtest/simple_engine.py} +0 -0
- {sigma_terminal-3.4.1.dist-info → sigma_terminal-3.5.0.dist-info}/WHEEL +0 -0
- {sigma_terminal-3.4.1.dist-info → sigma_terminal-3.5.0.dist-info}/entry_points.txt +0 -0
- {sigma_terminal-3.4.1.dist-info → sigma_terminal-3.5.0.dist-info}/licenses/LICENSE +0 -0
sigma/llm/router.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
from typing import Any, Callable, Dict, List, Optional, Union, AsyncIterator
|
|
2
|
+
import logging
|
|
3
|
+
|
|
4
|
+
from .providers.base import BaseLLM
|
|
5
|
+
from .providers.openai_provider import OpenAIProvider
|
|
6
|
+
from .providers.anthropic_provider import AnthropicProvider
|
|
7
|
+
from .providers.google_provider import GoogleProvider
|
|
8
|
+
from .providers.ollama_provider import OllamaProvider
|
|
9
|
+
from .providers.sigma_cloud_provider import SigmaCloudProvider
|
|
10
|
+
from .registry import REGISTRY
|
|
11
|
+
from .rate_limit import RateLimiter
|
|
12
|
+
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
14
|
+
|
|
15
|
+
class LLMRouter:
|
|
16
|
+
def __init__(self, settings):
|
|
17
|
+
self.settings = settings
|
|
18
|
+
self.providers: Dict[str, BaseLLM] = {}
|
|
19
|
+
|
|
20
|
+
# Initialize providers based on settings
|
|
21
|
+
self._init_providers()
|
|
22
|
+
|
|
23
|
+
def _init_providers(self):
|
|
24
|
+
# Sigma Cloud (Default)
|
|
25
|
+
# Always available due to embedded key fallback in the provider itself
|
|
26
|
+
self.providers["sigma_cloud"] = SigmaCloudProvider(
|
|
27
|
+
api_key=self.settings.sigma_cloud_api_key,
|
|
28
|
+
rate_limiter=RateLimiter(60, 0.5)
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
# OpenAI
|
|
32
|
+
if self.settings.openai_api_key:
|
|
33
|
+
self.providers["openai"] = OpenAIProvider(
|
|
34
|
+
api_key=self.settings.openai_api_key,
|
|
35
|
+
rate_limiter=RateLimiter(60, 0.2)
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
# Anthropic
|
|
39
|
+
if self.settings.anthropic_api_key:
|
|
40
|
+
self.providers["anthropic"] = AnthropicProvider(
|
|
41
|
+
api_key=self.settings.anthropic_api_key,
|
|
42
|
+
rate_limiter=RateLimiter(40, 0.5)
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
# Google
|
|
46
|
+
if self.settings.google_api_key:
|
|
47
|
+
self.providers["google"] = GoogleProvider(
|
|
48
|
+
api_key=self.settings.google_api_key,
|
|
49
|
+
rate_limiter=RateLimiter(60, 0.2)
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
# Ollama (always available usually)
|
|
53
|
+
self.providers["ollama"] = OllamaProvider(
|
|
54
|
+
base_url=getattr(self.settings, "ollama_url", "http://localhost:11434"),
|
|
55
|
+
rate_limiter=RateLimiter(100, 0.01)
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
async def chat(
|
|
59
|
+
self,
|
|
60
|
+
messages: List[Dict[str, str]],
|
|
61
|
+
model: Optional[str] = None,
|
|
62
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
63
|
+
on_tool_call: Optional[Callable] = None,
|
|
64
|
+
stream: bool = True,
|
|
65
|
+
json_mode: bool = False,
|
|
66
|
+
provider: Optional[str] = None
|
|
67
|
+
) -> Union[str, AsyncIterator[str]]:
|
|
68
|
+
|
|
69
|
+
# Determine model and provider
|
|
70
|
+
selected_model = model or self.settings.default_model
|
|
71
|
+
selected_provider = provider
|
|
72
|
+
|
|
73
|
+
if not selected_provider:
|
|
74
|
+
selected_provider = REGISTRY.get_provider(selected_model)
|
|
75
|
+
|
|
76
|
+
# Get client
|
|
77
|
+
client = self.providers.get(selected_provider)
|
|
78
|
+
if not client:
|
|
79
|
+
# Fallback logic
|
|
80
|
+
if "ollama" in self.providers:
|
|
81
|
+
logger.warning(f"Provider {selected_provider} not available, falling back to Ollama")
|
|
82
|
+
client = self.providers["ollama"]
|
|
83
|
+
# Find a fallback model?
|
|
84
|
+
selected_model = self.settings.default_fallback_model or "llama3.2"
|
|
85
|
+
else:
|
|
86
|
+
raise ValueError(f"Provider {selected_provider} not configured and no fallback available.")
|
|
87
|
+
|
|
88
|
+
# Execute
|
|
89
|
+
try:
|
|
90
|
+
return await client.generate(
|
|
91
|
+
messages=messages,
|
|
92
|
+
model=selected_model,
|
|
93
|
+
tools=tools,
|
|
94
|
+
on_tool_call=on_tool_call,
|
|
95
|
+
stream=stream,
|
|
96
|
+
json_mode=json_mode
|
|
97
|
+
)
|
|
98
|
+
except Exception as e:
|
|
99
|
+
logger.error(f"Error generation with {selected_provider}/{selected_model}: {e}")
|
|
100
|
+
# Circuit breaker / fallback could go here
|
|
101
|
+
if selected_provider != "ollama" and "ollama" in self.providers:
|
|
102
|
+
logger.info("Falling back to Ollama due to error")
|
|
103
|
+
return await self.providers["ollama"].generate(
|
|
104
|
+
messages=messages,
|
|
105
|
+
model="llama3.2", # Hardcoded fallback
|
|
106
|
+
tools=tools,
|
|
107
|
+
on_tool_call=on_tool_call,
|
|
108
|
+
stream=stream,
|
|
109
|
+
json_mode=json_mode
|
|
110
|
+
)
|
|
111
|
+
raise
|
|
112
|
+
|
|
113
|
+
_router_instance: Optional[LLMRouter] = None
|
|
114
|
+
|
|
115
|
+
def get_router(settings: Any = None) -> LLMRouter:
|
|
116
|
+
global _router_instance
|
|
117
|
+
if not _router_instance:
|
|
118
|
+
if settings:
|
|
119
|
+
_router_instance = LLMRouter(settings)
|
|
120
|
+
else:
|
|
121
|
+
raise RuntimeError("LLM Router not initialized and no settings provided")
|
|
122
|
+
return _router_instance
|
sigma/setup_agent.py
ADDED
|
@@ -0,0 +1,188 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
import os
|
|
3
|
+
import subprocess
|
|
4
|
+
import platform
|
|
5
|
+
import shutil
|
|
6
|
+
import time
|
|
7
|
+
from rich.console import Console
|
|
8
|
+
from rich.prompt import Prompt, Confirm
|
|
9
|
+
from rich.panel import Panel
|
|
10
|
+
from rich.progress import Progress, SpinnerColumn, TextColumn
|
|
11
|
+
from rich.markdown import Markdown
|
|
12
|
+
|
|
13
|
+
from .config import get_settings, save_api_key, save_setting, mark_first_run_complete
|
|
14
|
+
from .llm.registry import REGISTRY
|
|
15
|
+
|
|
16
|
+
console = Console()
|
|
17
|
+
|
|
18
|
+
class SetupAgent:
|
|
19
|
+
def __init__(self):
|
|
20
|
+
self.os_type = platform.system().lower()
|
|
21
|
+
self.lean_ok = False
|
|
22
|
+
self.ollama_ok = False
|
|
23
|
+
|
|
24
|
+
def run(self):
|
|
25
|
+
console.clear()
|
|
26
|
+
console.print(Panel.fit("[bold blue]Welcome to Sigma Setup Agent[/bold blue]", border_style="blue"))
|
|
27
|
+
console.print("[dim]Initializing your financial research environment...[/dim]\n")
|
|
28
|
+
|
|
29
|
+
self.check_environment()
|
|
30
|
+
self.install_lean()
|
|
31
|
+
self.install_ollama()
|
|
32
|
+
self.configure_ollama_model()
|
|
33
|
+
self.configure_api_tools()
|
|
34
|
+
|
|
35
|
+
mark_first_run_complete()
|
|
36
|
+
console.print("\n[bold green]Setup Complete![/bold green] Launching Sigma...")
|
|
37
|
+
time.sleep(2)
|
|
38
|
+
|
|
39
|
+
def check_environment(self):
|
|
40
|
+
console.print("[bold]1. Checking Environment[/bold]")
|
|
41
|
+
|
|
42
|
+
# Python
|
|
43
|
+
py_ver = sys.version.split()[0]
|
|
44
|
+
console.print(f" ✓ Python {py_ver}")
|
|
45
|
+
|
|
46
|
+
# OS
|
|
47
|
+
console.print(f" ✓ OS: {self.os_type}")
|
|
48
|
+
|
|
49
|
+
def install_lean(self):
|
|
50
|
+
console.print("\n[bold]2. Setting up LEAN Engine[/bold]")
|
|
51
|
+
|
|
52
|
+
# Check if lean is installed
|
|
53
|
+
if shutil.which("lean"):
|
|
54
|
+
console.print(" ✓ LEAN CLI found")
|
|
55
|
+
self.lean_ok = True
|
|
56
|
+
return
|
|
57
|
+
|
|
58
|
+
if Confirm.ask(" [yellow]LEAN CLI not found. Install it now?[/yellow] (Required for backtesting)"):
|
|
59
|
+
with Progress(SpinnerColumn(), TextColumn("[progress.description]{task.description}")) as progress:
|
|
60
|
+
task = progress.add_task("Installing LEAN CLI...", total=None)
|
|
61
|
+
try:
|
|
62
|
+
# Attempt pipx first
|
|
63
|
+
if shutil.which("pipx"):
|
|
64
|
+
subprocess.run(["pipx", "install", "lean-cli"], check=True, capture_output=True)
|
|
65
|
+
else:
|
|
66
|
+
subprocess.check_call([sys.executable, "-m", "pip", "install", "lean-cli"])
|
|
67
|
+
progress.update(task, completed=True)
|
|
68
|
+
console.print(" ✓ LEAN CLI installed successfully")
|
|
69
|
+
self.lean_ok = True
|
|
70
|
+
|
|
71
|
+
# Should run lean init? Might require user interaction.
|
|
72
|
+
# Providing basic config is safer.
|
|
73
|
+
self._create_minimal_lean_config()
|
|
74
|
+
|
|
75
|
+
except Exception as e:
|
|
76
|
+
console.print(f" [red]Failed to install LEAN: {e}[/red]")
|
|
77
|
+
console.print(" Please install manually: `pip install lean-cli`")
|
|
78
|
+
|
|
79
|
+
def _create_minimal_lean_config(self):
|
|
80
|
+
# Create a directory for lean data
|
|
81
|
+
lean_dir = os.path.expanduser("~/.sigma/lean_data")
|
|
82
|
+
os.makedirs(lean_dir, exist_ok=True)
|
|
83
|
+
# We might need to run `lean init` eventually, but for now just ensure folder exists.
|
|
84
|
+
|
|
85
|
+
def install_ollama(self):
|
|
86
|
+
console.print("\n[bold]3. Setting up Ollama[/bold]")
|
|
87
|
+
|
|
88
|
+
if shutil.which("ollama"):
|
|
89
|
+
console.print(" ✓ Ollama binary found")
|
|
90
|
+
self.ollama_ok = True
|
|
91
|
+
else:
|
|
92
|
+
console.print(" [yellow]Ollama not found.[/yellow]")
|
|
93
|
+
if self.os_type == "darwin" and shutil.which("brew"):
|
|
94
|
+
if Confirm.ask(" Install via Homebrew?"):
|
|
95
|
+
subprocess.run(["brew", "install", "ollama"], check=True)
|
|
96
|
+
self.ollama_ok = True
|
|
97
|
+
elif self.os_type == "linux":
|
|
98
|
+
console.print(" Please install Ollama: curl -fsSL https://ollama.com/install.sh | sh")
|
|
99
|
+
Confirm.ask(" Press Enter once installed")
|
|
100
|
+
|
|
101
|
+
# Verify again
|
|
102
|
+
if shutil.which("ollama"):
|
|
103
|
+
self.ollama_ok = True
|
|
104
|
+
|
|
105
|
+
if self.ollama_ok:
|
|
106
|
+
# Check if running
|
|
107
|
+
try:
|
|
108
|
+
subprocess.run(["ollama", "list"], check=True, capture_output=True)
|
|
109
|
+
console.print(" ✓ Ollama daemon is running")
|
|
110
|
+
except subprocess.CalledProcessError:
|
|
111
|
+
console.print(" [yellow]Ollama daemon not running. Attempting to start...[/yellow]")
|
|
112
|
+
# Attempt start (background)
|
|
113
|
+
if self.os_type == "darwin":
|
|
114
|
+
subprocess.Popen(["open", "-a", "Ollama"])
|
|
115
|
+
elif self.os_type == "linux":
|
|
116
|
+
subprocess.Popen(["ollama", "serve"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
|
117
|
+
|
|
118
|
+
time.sleep(5) # Wait for start
|
|
119
|
+
console.print(" ✓ Attempted start. Please verify in taskbar if needed.")
|
|
120
|
+
|
|
121
|
+
def configure_ollama_model(self):
|
|
122
|
+
console.print("\n[bold]4. Configuring AI Models[/bold]")
|
|
123
|
+
|
|
124
|
+
if not self.ollama_ok:
|
|
125
|
+
console.print(" [dim]Skipping Ollama model setup (Ollama not available)[/dim]")
|
|
126
|
+
return
|
|
127
|
+
|
|
128
|
+
choices = ["llama3.2", "mistral", "phi3", "custom"]
|
|
129
|
+
console.print("Select a default local model:")
|
|
130
|
+
for i, m in enumerate(choices):
|
|
131
|
+
console.print(f" {i+1}) {m}")
|
|
132
|
+
|
|
133
|
+
selection = Prompt.ask("Choose [1-4]", choices=["1", "2", "3", "4"], default="1")
|
|
134
|
+
model = choices[int(selection)-1]
|
|
135
|
+
|
|
136
|
+
if model == "custom":
|
|
137
|
+
model = Prompt.ask("Enter model name (e.g. deepseek-coder)")
|
|
138
|
+
|
|
139
|
+
console.print(f" Selected: [bold]{model}[/bold]")
|
|
140
|
+
|
|
141
|
+
# Check if pulled
|
|
142
|
+
try:
|
|
143
|
+
result = subprocess.run(["ollama", "list"], capture_output=True, text=True)
|
|
144
|
+
if model not in result.stdout:
|
|
145
|
+
if Confirm.ask(f" Pull {model} now? (Required to use it)"):
|
|
146
|
+
with Progress(SpinnerColumn(), TextColumn("[progress.description]{task.description}")) as progress:
|
|
147
|
+
task = progress.add_task(f"Pulling {model}...", total=None)
|
|
148
|
+
subprocess.run(["ollama", "pull", model], check=True)
|
|
149
|
+
except Exception as e:
|
|
150
|
+
console.print(f" [red]Error checking/pulling model: {e}[/red]")
|
|
151
|
+
|
|
152
|
+
save_setting("default_model", model)
|
|
153
|
+
save_setting("ollama_url", "http://localhost:11434")
|
|
154
|
+
|
|
155
|
+
def configure_api_tools(self):
|
|
156
|
+
console.print("\n[bold]5. Enabling Data Providers[/bold]")
|
|
157
|
+
|
|
158
|
+
providers = [
|
|
159
|
+
("Alpha Vantage", "alpha_vantage_key"),
|
|
160
|
+
("Polygon.io", "polygon_api_key"),
|
|
161
|
+
("Financial Modeling Prep", "fmp_api_key"),
|
|
162
|
+
("Exa Search", "exa_api_key"),
|
|
163
|
+
("Massive", "massive_api_key"),
|
|
164
|
+
("OpenAI", "openai_api_key"),
|
|
165
|
+
("Anthropic", "anthropic_api_key"),
|
|
166
|
+
("Google Gemini", "google_api_key"),
|
|
167
|
+
]
|
|
168
|
+
|
|
169
|
+
for name, key_id in providers:
|
|
170
|
+
settings = get_settings()
|
|
171
|
+
existing = getattr(settings, key_id, None)
|
|
172
|
+
|
|
173
|
+
if not existing:
|
|
174
|
+
if Confirm.ask(f" Configure {name}?"):
|
|
175
|
+
key = Prompt.ask(f" Enter API Key for {name}", password=True)
|
|
176
|
+
if key:
|
|
177
|
+
save_api_key(key_id, key)
|
|
178
|
+
console.print(f" ✓ Saved {name}")
|
|
179
|
+
else:
|
|
180
|
+
console.print(f" ✓ {name} configured")
|
|
181
|
+
|
|
182
|
+
def run_setup():
|
|
183
|
+
agent = SetupAgent()
|
|
184
|
+
agent.run()
|
|
185
|
+
return True
|
|
186
|
+
|
|
187
|
+
if __name__ == "__main__":
|
|
188
|
+
run_setup()
|
sigma/tools/__init__.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
from .registry import TOOL_REGISTRY, ToolDefinition
|
|
2
|
+
from .adapter import register_legacy_tools
|
|
3
|
+
from .library import *
|
|
4
|
+
|
|
5
|
+
# Ensure legacy tools are registered
|
|
6
|
+
register_legacy_tools()
|
|
7
|
+
|
|
8
|
+
# Expose execute_tool for backward compatibility if needed, using the registry
|
|
9
|
+
def execute_tool(name: str, args: dict):
|
|
10
|
+
# Registry doesn't have sync execute exposed directly?
|
|
11
|
+
# But library functions are sync.
|
|
12
|
+
# Check definition.
|
|
13
|
+
tool = TOOL_REGISTRY.get_tool(name)
|
|
14
|
+
if tool:
|
|
15
|
+
try:
|
|
16
|
+
return tool.func(**args)
|
|
17
|
+
except Exception as e:
|
|
18
|
+
return {"error": str(e)}
|
|
19
|
+
return {"error": "Tool not found"}
|
|
20
|
+
|
|
21
|
+
# Helpers
|
|
22
|
+
def get_tools_for_llm():
|
|
23
|
+
return TOOL_REGISTRY.to_llm_format()
|
sigma/tools/adapter.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
from typing import Any, Dict
|
|
2
|
+
from .registry import TOOL_REGISTRY, ToolDefinition
|
|
3
|
+
from .library import TOOLS, TOOL_FUNCTIONS
|
|
4
|
+
|
|
5
|
+
def register_legacy_tools():
|
|
6
|
+
"""Import tools from the legacy library defined in library.py"""
|
|
7
|
+
|
|
8
|
+
# Map from TOOLS list (which has schema) to TOOL_FUNCTIONS (which has implementation)
|
|
9
|
+
|
|
10
|
+
for tool_def in TOOLS:
|
|
11
|
+
if tool_def["type"] != "function": continue
|
|
12
|
+
|
|
13
|
+
func_def = tool_def["function"]
|
|
14
|
+
name = func_def["name"]
|
|
15
|
+
description = func_def.get("description", "")
|
|
16
|
+
parameters = func_def.get("parameters", {})
|
|
17
|
+
|
|
18
|
+
func = TOOL_FUNCTIONS.get(name)
|
|
19
|
+
if not func:
|
|
20
|
+
continue
|
|
21
|
+
|
|
22
|
+
# Determine provider based on name prefix or guess
|
|
23
|
+
provider = "yfinance"
|
|
24
|
+
if name.startswith("polygon"): provider = "polygon"
|
|
25
|
+
if name.startswith("alpha"): provider = "alpha_vantage"
|
|
26
|
+
# etc...
|
|
27
|
+
|
|
28
|
+
# Register manually to bypass decorator
|
|
29
|
+
TOOL_REGISTRY._tools[name] = ToolDefinition(
|
|
30
|
+
name=name,
|
|
31
|
+
description=description,
|
|
32
|
+
input_schema=parameters,
|
|
33
|
+
func=func,
|
|
34
|
+
provider=provider
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
# Run registration
|
|
38
|
+
register_legacy_tools()
|
|
@@ -1450,7 +1450,8 @@ TOOLS = [
|
|
|
1450
1450
|
"type": "object",
|
|
1451
1451
|
"properties": {
|
|
1452
1452
|
"symbol": {"type": "string", "description": "Stock ticker symbol"},
|
|
1453
|
-
"period": {"type": "string", "description": "Time period: 1d, 5d, 1mo, 3mo, 6mo, 1y, 2y, 5y, max", "default": "3mo"}
|
|
1453
|
+
"period": {"type": "string", "description": "Time period: 1d, 5d, 1mo, 3mo, 6mo, 1y, 2y, 5y, max", "default": "3mo"},
|
|
1454
|
+
"interval": {"type": "string", "description": "Data interval: 1m, 2m, 5m, 15m, 30m, 60m, 90m, 1h, 1d, 5d, 1wk, 1mo, 3mo", "default": "1d"}
|
|
1454
1455
|
},
|
|
1455
1456
|
"required": ["symbol"]
|
|
1456
1457
|
}
|
sigma/tools/registry.py
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
from typing import Any, Callable, Dict, List, Optional
|
|
2
|
+
from pydantic import BaseModel
|
|
3
|
+
import inspect
|
|
4
|
+
|
|
5
|
+
class ToolDefinition(BaseModel):
|
|
6
|
+
name: str
|
|
7
|
+
description: str
|
|
8
|
+
input_schema: Dict[str, Any]
|
|
9
|
+
output_schema: Optional[Dict[str, Any]] = None
|
|
10
|
+
func: Callable
|
|
11
|
+
enabled: bool = True
|
|
12
|
+
provider: str = "internal" # yfinance, polygon, etc.
|
|
13
|
+
|
|
14
|
+
class ToolRegistry:
|
|
15
|
+
def __init__(self):
|
|
16
|
+
self._tools: Dict[str, ToolDefinition] = {}
|
|
17
|
+
|
|
18
|
+
def register(self, name: str, description: str, provider: str = "internal"):
|
|
19
|
+
def decorator(func):
|
|
20
|
+
# Extract schema from type hints (simplified)
|
|
21
|
+
# In a real impl, utilize pydantic.TypeAdapter or similar if args are models.
|
|
22
|
+
# Here we assume simple args or we use a manual schema if provided?
|
|
23
|
+
# For now, let's keep it simple: no schema auto-extraction in this snippet
|
|
24
|
+
# unless we implement a Schema generator.
|
|
25
|
+
# But the prompt says "typed tool registry... input_schema (pydantic)".
|
|
26
|
+
|
|
27
|
+
# minimal schema generation
|
|
28
|
+
sig = inspect.signature(func)
|
|
29
|
+
params = {}
|
|
30
|
+
required = []
|
|
31
|
+
for pname, p in sig.parameters.items():
|
|
32
|
+
if pname == "self": continue
|
|
33
|
+
p_type = "string"
|
|
34
|
+
if p.annotation == int: p_type = "integer"
|
|
35
|
+
if p.annotation == float: p_type = "number"
|
|
36
|
+
if p.annotation == bool: p_type = "boolean"
|
|
37
|
+
|
|
38
|
+
params[pname] = {"type": p_type}
|
|
39
|
+
if p.default == inspect.Parameter.empty:
|
|
40
|
+
required.append(pname)
|
|
41
|
+
|
|
42
|
+
schema = {
|
|
43
|
+
"type": "object",
|
|
44
|
+
"properties": params,
|
|
45
|
+
"required": required
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
self._tools[name] = ToolDefinition(
|
|
49
|
+
name=name,
|
|
50
|
+
description=description,
|
|
51
|
+
input_schema=schema,
|
|
52
|
+
func=func,
|
|
53
|
+
provider=provider
|
|
54
|
+
)
|
|
55
|
+
return func
|
|
56
|
+
return decorator
|
|
57
|
+
|
|
58
|
+
def get_tool(self, name: str) -> Optional[ToolDefinition]:
|
|
59
|
+
return self._tools.get(name)
|
|
60
|
+
|
|
61
|
+
def list_tools(self) -> List[ToolDefinition]:
|
|
62
|
+
return [t for t in self._tools.values() if t.enabled]
|
|
63
|
+
|
|
64
|
+
def to_llm_format(self) -> List[Dict[str, Any]]:
|
|
65
|
+
tools_list = []
|
|
66
|
+
for t in self.list_tools():
|
|
67
|
+
# Clone schema to avoid mutating original
|
|
68
|
+
import copy
|
|
69
|
+
schema = copy.deepcopy(t.input_schema)
|
|
70
|
+
|
|
71
|
+
# Inject thought parameter for Gemini/HackClub compatibility
|
|
72
|
+
# Some providers like Gemini via proxies require a "thought" field in tool calls
|
|
73
|
+
if "properties" in schema:
|
|
74
|
+
schema["properties"]["thought_signature"] = {
|
|
75
|
+
"type": "string",
|
|
76
|
+
"description": "Internal reasoning for why this tool is being called. MUST be provided."
|
|
77
|
+
}
|
|
78
|
+
# Force model to use it
|
|
79
|
+
if "required" in schema and isinstance(schema["required"], list):
|
|
80
|
+
schema["required"].append("thought_signature")
|
|
81
|
+
else:
|
|
82
|
+
schema["required"] = ["thought_signature"]
|
|
83
|
+
|
|
84
|
+
tools_list.append({
|
|
85
|
+
"type": "function",
|
|
86
|
+
"function": {
|
|
87
|
+
"name": t.name,
|
|
88
|
+
"description": t.description,
|
|
89
|
+
"parameters": schema
|
|
90
|
+
}
|
|
91
|
+
})
|
|
92
|
+
return tools_list
|
|
93
|
+
|
|
94
|
+
async def execute(self, name: str, args: Dict[str, Any]) -> Any:
|
|
95
|
+
tool = self.get_tool(name)
|
|
96
|
+
if not tool:
|
|
97
|
+
raise ValueError(f"Tool {name} not found")
|
|
98
|
+
|
|
99
|
+
# Remove provider-specific fields if present
|
|
100
|
+
args.pop("thought", None)
|
|
101
|
+
args.pop("thought_signature", None)
|
|
102
|
+
|
|
103
|
+
if inspect.iscoroutinefunction(tool.func):
|
|
104
|
+
return await tool.func(**args)
|
|
105
|
+
else:
|
|
106
|
+
return tool.func(**args)
|
|
107
|
+
|
|
108
|
+
TOOL_REGISTRY = ToolRegistry()
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
import re
|
|
2
|
+
from datetime import datetime, timedelta
|
|
3
|
+
from typing import List, Optional, Tuple
|
|
4
|
+
|
|
5
|
+
COMMON_TICKERS = {
|
|
6
|
+
# Ticker map for normalization if needed
|
|
7
|
+
"APPLE": "AAPL",
|
|
8
|
+
"GOOGLE": "GOOGL",
|
|
9
|
+
"MICROSOFT": "MSFT",
|
|
10
|
+
"AMAZON": "AMZN",
|
|
11
|
+
"TESLA": "TSLA",
|
|
12
|
+
"NVIDIA": "NVDA",
|
|
13
|
+
"META": "META",
|
|
14
|
+
"FACEBOOK": "META",
|
|
15
|
+
"NETFLIX": "NFLX",
|
|
16
|
+
"S&P 500": "SPY",
|
|
17
|
+
"SP500": "SPY",
|
|
18
|
+
"S&P": "SPY",
|
|
19
|
+
"NASDAQ": "QQQ",
|
|
20
|
+
"DOW": "DIA",
|
|
21
|
+
"DOW JONES": "DIA",
|
|
22
|
+
"BITCOIN": "BTC-USD",
|
|
23
|
+
"ETHEREUM": "ETH-USD",
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
def extract_tickers(text: str) -> List[str]:
|
|
27
|
+
"""Extract and normalize tickers from text."""
|
|
28
|
+
found = []
|
|
29
|
+
|
|
30
|
+
# Check known names first (case insensitive bounds)
|
|
31
|
+
upper_text = text.upper()
|
|
32
|
+
for name, ticker in COMMON_TICKERS.items():
|
|
33
|
+
# Simple word boundary check
|
|
34
|
+
if re.search(r'\b' + re.escape(name) + r'\b', upper_text):
|
|
35
|
+
found.append(ticker)
|
|
36
|
+
|
|
37
|
+
# Regex for standard tickers (capitals, 1-5 chars)
|
|
38
|
+
# Exclude common words like I, A, AM, PM, IS, AT, VS, OR, AND...
|
|
39
|
+
# Strict mode: must be uppercase in original text? The prompt implies natural language which might be mixed.
|
|
40
|
+
# But usually users type tickers in caps or "Apple".
|
|
41
|
+
|
|
42
|
+
# For simplicity, extract probable tickers
|
|
43
|
+
matches = re.findall(r'\b[A-Z]{2,5}\b', text)
|
|
44
|
+
stopwords = {"AND", "OR", "THE", "FOR", "GET", "SET", "NOT", "BUT", "BY", "OF", "AT", "IN", "ON", "TO", "FROM", "VS", "GDP", "CPI", "USD", "YTD", "CEO", "CFO", "SEC", "API", "LLM", "AI"}
|
|
45
|
+
|
|
46
|
+
for m in matches:
|
|
47
|
+
if m not in stopwords and m not in found:
|
|
48
|
+
found.append(m)
|
|
49
|
+
|
|
50
|
+
return list(set(found))
|
|
51
|
+
|
|
52
|
+
def extract_timeframe(text: str) -> Tuple[str, Optional[str], Optional[str]]:
|
|
53
|
+
"""Extract timeframe description, start date, end date."""
|
|
54
|
+
|
|
55
|
+
today = datetime.now()
|
|
56
|
+
|
|
57
|
+
# "5y", "10 years", "start of 2020"
|
|
58
|
+
|
|
59
|
+
# Simple regex for periods
|
|
60
|
+
match_years = re.search(r'\b(\d+)\s*y(ears?)?\b', text)
|
|
61
|
+
if match_years:
|
|
62
|
+
years = int(match_years.group(1))
|
|
63
|
+
start_date = (today - timedelta(days=years*365)).strftime("%Y-%m-%d")
|
|
64
|
+
return f"{years}y", start_date, None
|
|
65
|
+
|
|
66
|
+
match_months = re.search(r'\b(\d+)\s*m(onths?)?\b', text)
|
|
67
|
+
if match_months:
|
|
68
|
+
months = int(match_months.group(1))
|
|
69
|
+
start_date = (today - timedelta(days=months*30)).strftime("%Y-%m-%d")
|
|
70
|
+
return f"{months}m", start_date, None
|
|
71
|
+
|
|
72
|
+
# "Since 2021"
|
|
73
|
+
match_since = re.search(r'\bsince\s+(\d{4})\b', text)
|
|
74
|
+
if match_since:
|
|
75
|
+
year = int(match_since.group(1))
|
|
76
|
+
return f"since {year}", f"{year}-01-01", None
|
|
77
|
+
|
|
78
|
+
# "YTD"
|
|
79
|
+
if "YTD" in text.upper():
|
|
80
|
+
start_date = f"{today.year}-01-01"
|
|
81
|
+
return "YTD", start_date, None
|
|
82
|
+
|
|
83
|
+
return "default", None, None
|