sigma-terminal 3.4.0__py3-none-any.whl → 3.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
sigma/llm/router.py ADDED
@@ -0,0 +1,122 @@
1
+ from typing import Any, Callable, Dict, List, Optional, Union, AsyncIterator
2
+ import logging
3
+
4
+ from .providers.base import BaseLLM
5
+ from .providers.openai_provider import OpenAIProvider
6
+ from .providers.anthropic_provider import AnthropicProvider
7
+ from .providers.google_provider import GoogleProvider
8
+ from .providers.ollama_provider import OllamaProvider
9
+ from .providers.sigma_cloud_provider import SigmaCloudProvider
10
+ from .registry import REGISTRY
11
+ from .rate_limit import RateLimiter
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+ class LLMRouter:
16
+ def __init__(self, settings):
17
+ self.settings = settings
18
+ self.providers: Dict[str, BaseLLM] = {}
19
+
20
+ # Initialize providers based on settings
21
+ self._init_providers()
22
+
23
+ def _init_providers(self):
24
+ # Sigma Cloud (Default)
25
+ # Always available due to embedded key fallback in the provider itself
26
+ self.providers["sigma_cloud"] = SigmaCloudProvider(
27
+ api_key=self.settings.sigma_cloud_api_key,
28
+ rate_limiter=RateLimiter(60, 0.5)
29
+ )
30
+
31
+ # OpenAI
32
+ if self.settings.openai_api_key:
33
+ self.providers["openai"] = OpenAIProvider(
34
+ api_key=self.settings.openai_api_key,
35
+ rate_limiter=RateLimiter(60, 0.2)
36
+ )
37
+
38
+ # Anthropic
39
+ if self.settings.anthropic_api_key:
40
+ self.providers["anthropic"] = AnthropicProvider(
41
+ api_key=self.settings.anthropic_api_key,
42
+ rate_limiter=RateLimiter(40, 0.5)
43
+ )
44
+
45
+ # Google
46
+ if self.settings.google_api_key:
47
+ self.providers["google"] = GoogleProvider(
48
+ api_key=self.settings.google_api_key,
49
+ rate_limiter=RateLimiter(60, 0.2)
50
+ )
51
+
52
+ # Ollama (always available usually)
53
+ self.providers["ollama"] = OllamaProvider(
54
+ base_url=getattr(self.settings, "ollama_url", "http://localhost:11434"),
55
+ rate_limiter=RateLimiter(100, 0.01)
56
+ )
57
+
58
+ async def chat(
59
+ self,
60
+ messages: List[Dict[str, str]],
61
+ model: Optional[str] = None,
62
+ tools: Optional[List[Dict[str, Any]]] = None,
63
+ on_tool_call: Optional[Callable] = None,
64
+ stream: bool = True,
65
+ json_mode: bool = False,
66
+ provider: Optional[str] = None
67
+ ) -> Union[str, AsyncIterator[str]]:
68
+
69
+ # Determine model and provider
70
+ selected_model = model or self.settings.default_model
71
+ selected_provider = provider
72
+
73
+ if not selected_provider:
74
+ selected_provider = REGISTRY.get_provider(selected_model)
75
+
76
+ # Get client
77
+ client = self.providers.get(selected_provider)
78
+ if not client:
79
+ # Fallback logic
80
+ if "ollama" in self.providers:
81
+ logger.warning(f"Provider {selected_provider} not available, falling back to Ollama")
82
+ client = self.providers["ollama"]
83
+ # Find a fallback model?
84
+ selected_model = self.settings.default_fallback_model or "llama3.2"
85
+ else:
86
+ raise ValueError(f"Provider {selected_provider} not configured and no fallback available.")
87
+
88
+ # Execute
89
+ try:
90
+ return await client.generate(
91
+ messages=messages,
92
+ model=selected_model,
93
+ tools=tools,
94
+ on_tool_call=on_tool_call,
95
+ stream=stream,
96
+ json_mode=json_mode
97
+ )
98
+ except Exception as e:
99
+ logger.error(f"Error generation with {selected_provider}/{selected_model}: {e}")
100
+ # Circuit breaker / fallback could go here
101
+ if selected_provider != "ollama" and "ollama" in self.providers:
102
+ logger.info("Falling back to Ollama due to error")
103
+ return await self.providers["ollama"].generate(
104
+ messages=messages,
105
+ model="llama3.2", # Hardcoded fallback
106
+ tools=tools,
107
+ on_tool_call=on_tool_call,
108
+ stream=stream,
109
+ json_mode=json_mode
110
+ )
111
+ raise
112
+
113
+ _router_instance: Optional[LLMRouter] = None
114
+
115
+ def get_router(settings: Any = None) -> LLMRouter:
116
+ global _router_instance
117
+ if not _router_instance:
118
+ if settings:
119
+ _router_instance = LLMRouter(settings)
120
+ else:
121
+ raise RuntimeError("LLM Router not initialized and no settings provided")
122
+ return _router_instance
sigma/setup_agent.py ADDED
@@ -0,0 +1,188 @@
1
+ import sys
2
+ import os
3
+ import subprocess
4
+ import platform
5
+ import shutil
6
+ import time
7
+ from rich.console import Console
8
+ from rich.prompt import Prompt, Confirm
9
+ from rich.panel import Panel
10
+ from rich.progress import Progress, SpinnerColumn, TextColumn
11
+ from rich.markdown import Markdown
12
+
13
+ from .config import get_settings, save_api_key, save_setting, mark_first_run_complete
14
+ from .llm.registry import REGISTRY
15
+
16
+ console = Console()
17
+
18
+ class SetupAgent:
19
+ def __init__(self):
20
+ self.os_type = platform.system().lower()
21
+ self.lean_ok = False
22
+ self.ollama_ok = False
23
+
24
+ def run(self):
25
+ console.clear()
26
+ console.print(Panel.fit("[bold blue]Welcome to Sigma Setup Agent[/bold blue]", border_style="blue"))
27
+ console.print("[dim]Initializing your financial research environment...[/dim]\n")
28
+
29
+ self.check_environment()
30
+ self.install_lean()
31
+ self.install_ollama()
32
+ self.configure_ollama_model()
33
+ self.configure_api_tools()
34
+
35
+ mark_first_run_complete()
36
+ console.print("\n[bold green]Setup Complete![/bold green] Launching Sigma...")
37
+ time.sleep(2)
38
+
39
+ def check_environment(self):
40
+ console.print("[bold]1. Checking Environment[/bold]")
41
+
42
+ # Python
43
+ py_ver = sys.version.split()[0]
44
+ console.print(f" ✓ Python {py_ver}")
45
+
46
+ # OS
47
+ console.print(f" ✓ OS: {self.os_type}")
48
+
49
+ def install_lean(self):
50
+ console.print("\n[bold]2. Setting up LEAN Engine[/bold]")
51
+
52
+ # Check if lean is installed
53
+ if shutil.which("lean"):
54
+ console.print(" ✓ LEAN CLI found")
55
+ self.lean_ok = True
56
+ return
57
+
58
+ if Confirm.ask(" [yellow]LEAN CLI not found. Install it now?[/yellow] (Required for backtesting)"):
59
+ with Progress(SpinnerColumn(), TextColumn("[progress.description]{task.description}")) as progress:
60
+ task = progress.add_task("Installing LEAN CLI...", total=None)
61
+ try:
62
+ # Attempt pipx first
63
+ if shutil.which("pipx"):
64
+ subprocess.run(["pipx", "install", "lean-cli"], check=True, capture_output=True)
65
+ else:
66
+ subprocess.check_call([sys.executable, "-m", "pip", "install", "lean-cli"])
67
+ progress.update(task, completed=True)
68
+ console.print(" ✓ LEAN CLI installed successfully")
69
+ self.lean_ok = True
70
+
71
+ # Should run lean init? Might require user interaction.
72
+ # Providing basic config is safer.
73
+ self._create_minimal_lean_config()
74
+
75
+ except Exception as e:
76
+ console.print(f" [red]Failed to install LEAN: {e}[/red]")
77
+ console.print(" Please install manually: `pip install lean-cli`")
78
+
79
+ def _create_minimal_lean_config(self):
80
+ # Create a directory for lean data
81
+ lean_dir = os.path.expanduser("~/.sigma/lean_data")
82
+ os.makedirs(lean_dir, exist_ok=True)
83
+ # We might need to run `lean init` eventually, but for now just ensure folder exists.
84
+
85
+ def install_ollama(self):
86
+ console.print("\n[bold]3. Setting up Ollama[/bold]")
87
+
88
+ if shutil.which("ollama"):
89
+ console.print(" ✓ Ollama binary found")
90
+ self.ollama_ok = True
91
+ else:
92
+ console.print(" [yellow]Ollama not found.[/yellow]")
93
+ if self.os_type == "darwin" and shutil.which("brew"):
94
+ if Confirm.ask(" Install via Homebrew?"):
95
+ subprocess.run(["brew", "install", "ollama"], check=True)
96
+ self.ollama_ok = True
97
+ elif self.os_type == "linux":
98
+ console.print(" Please install Ollama: curl -fsSL https://ollama.com/install.sh | sh")
99
+ Confirm.ask(" Press Enter once installed")
100
+
101
+ # Verify again
102
+ if shutil.which("ollama"):
103
+ self.ollama_ok = True
104
+
105
+ if self.ollama_ok:
106
+ # Check if running
107
+ try:
108
+ subprocess.run(["ollama", "list"], check=True, capture_output=True)
109
+ console.print(" ✓ Ollama daemon is running")
110
+ except subprocess.CalledProcessError:
111
+ console.print(" [yellow]Ollama daemon not running. Attempting to start...[/yellow]")
112
+ # Attempt start (background)
113
+ if self.os_type == "darwin":
114
+ subprocess.Popen(["open", "-a", "Ollama"])
115
+ elif self.os_type == "linux":
116
+ subprocess.Popen(["ollama", "serve"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
117
+
118
+ time.sleep(5) # Wait for start
119
+ console.print(" ✓ Attempted start. Please verify in taskbar if needed.")
120
+
121
+ def configure_ollama_model(self):
122
+ console.print("\n[bold]4. Configuring AI Models[/bold]")
123
+
124
+ if not self.ollama_ok:
125
+ console.print(" [dim]Skipping Ollama model setup (Ollama not available)[/dim]")
126
+ return
127
+
128
+ choices = ["llama3.2", "mistral", "phi3", "custom"]
129
+ console.print("Select a default local model:")
130
+ for i, m in enumerate(choices):
131
+ console.print(f" {i+1}) {m}")
132
+
133
+ selection = Prompt.ask("Choose [1-4]", choices=["1", "2", "3", "4"], default="1")
134
+ model = choices[int(selection)-1]
135
+
136
+ if model == "custom":
137
+ model = Prompt.ask("Enter model name (e.g. deepseek-coder)")
138
+
139
+ console.print(f" Selected: [bold]{model}[/bold]")
140
+
141
+ # Check if pulled
142
+ try:
143
+ result = subprocess.run(["ollama", "list"], capture_output=True, text=True)
144
+ if model not in result.stdout:
145
+ if Confirm.ask(f" Pull {model} now? (Required to use it)"):
146
+ with Progress(SpinnerColumn(), TextColumn("[progress.description]{task.description}")) as progress:
147
+ task = progress.add_task(f"Pulling {model}...", total=None)
148
+ subprocess.run(["ollama", "pull", model], check=True)
149
+ except Exception as e:
150
+ console.print(f" [red]Error checking/pulling model: {e}[/red]")
151
+
152
+ save_setting("default_model", model)
153
+ save_setting("ollama_url", "http://localhost:11434")
154
+
155
+ def configure_api_tools(self):
156
+ console.print("\n[bold]5. Enabling Data Providers[/bold]")
157
+
158
+ providers = [
159
+ ("Alpha Vantage", "alpha_vantage_key"),
160
+ ("Polygon.io", "polygon_api_key"),
161
+ ("Financial Modeling Prep", "fmp_api_key"),
162
+ ("Exa Search", "exa_api_key"),
163
+ ("Massive", "massive_api_key"),
164
+ ("OpenAI", "openai_api_key"),
165
+ ("Anthropic", "anthropic_api_key"),
166
+ ("Google Gemini", "google_api_key"),
167
+ ]
168
+
169
+ for name, key_id in providers:
170
+ settings = get_settings()
171
+ existing = getattr(settings, key_id, None)
172
+
173
+ if not existing:
174
+ if Confirm.ask(f" Configure {name}?"):
175
+ key = Prompt.ask(f" Enter API Key for {name}", password=True)
176
+ if key:
177
+ save_api_key(key_id, key)
178
+ console.print(f" ✓ Saved {name}")
179
+ else:
180
+ console.print(f" ✓ {name} configured")
181
+
182
+ def run_setup():
183
+ agent = SetupAgent()
184
+ agent.run()
185
+ return True
186
+
187
+ if __name__ == "__main__":
188
+ run_setup()
@@ -0,0 +1,23 @@
1
+ from .registry import TOOL_REGISTRY, ToolDefinition
2
+ from .adapter import register_legacy_tools
3
+ from .library import *
4
+
5
+ # Ensure legacy tools are registered
6
+ register_legacy_tools()
7
+
8
+ # Expose execute_tool for backward compatibility if needed, using the registry
9
+ def execute_tool(name: str, args: dict):
10
+ # Registry doesn't have sync execute exposed directly?
11
+ # But library functions are sync.
12
+ # Check definition.
13
+ tool = TOOL_REGISTRY.get_tool(name)
14
+ if tool:
15
+ try:
16
+ return tool.func(**args)
17
+ except Exception as e:
18
+ return {"error": str(e)}
19
+ return {"error": "Tool not found"}
20
+
21
+ # Helpers
22
+ def get_tools_for_llm():
23
+ return TOOL_REGISTRY.to_llm_format()
sigma/tools/adapter.py ADDED
@@ -0,0 +1,38 @@
1
+ from typing import Any, Dict
2
+ from .registry import TOOL_REGISTRY, ToolDefinition
3
+ from .library import TOOLS, TOOL_FUNCTIONS
4
+
5
+ def register_legacy_tools():
6
+ """Import tools from the legacy library defined in library.py"""
7
+
8
+ # Map from TOOLS list (which has schema) to TOOL_FUNCTIONS (which has implementation)
9
+
10
+ for tool_def in TOOLS:
11
+ if tool_def["type"] != "function": continue
12
+
13
+ func_def = tool_def["function"]
14
+ name = func_def["name"]
15
+ description = func_def.get("description", "")
16
+ parameters = func_def.get("parameters", {})
17
+
18
+ func = TOOL_FUNCTIONS.get(name)
19
+ if not func:
20
+ continue
21
+
22
+ # Determine provider based on name prefix or guess
23
+ provider = "yfinance"
24
+ if name.startswith("polygon"): provider = "polygon"
25
+ if name.startswith("alpha"): provider = "alpha_vantage"
26
+ # etc...
27
+
28
+ # Register manually to bypass decorator
29
+ TOOL_REGISTRY._tools[name] = ToolDefinition(
30
+ name=name,
31
+ description=description,
32
+ input_schema=parameters,
33
+ func=func,
34
+ provider=provider
35
+ )
36
+
37
+ # Run registration
38
+ register_legacy_tools()