sigma-terminal 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sigma/__init__.py +9 -0
- sigma/__main__.py +6 -0
- sigma/app.py +947 -0
- sigma/core/__init__.py +18 -0
- sigma/core/agent.py +205 -0
- sigma/core/config.py +119 -0
- sigma/core/llm.py +794 -0
- sigma/core/models.py +153 -0
- sigma/setup.py +455 -0
- sigma/tools/__init__.py +5 -0
- sigma/tools/backtest.py +1506 -0
- sigma/tools/charts.py +400 -0
- sigma/tools/financial.py +1457 -0
- sigma/ui/__init__.py +1 -0
- sigma_terminal-2.0.0.dist-info/METADATA +222 -0
- sigma_terminal-2.0.0.dist-info/RECORD +19 -0
- sigma_terminal-2.0.0.dist-info/WHEEL +4 -0
- sigma_terminal-2.0.0.dist-info/entry_points.txt +2 -0
- sigma_terminal-2.0.0.dist-info/licenses/LICENSE +42 -0
sigma/core/__init__.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
"""Core module initialization."""
|
|
2
|
+
|
|
3
|
+
from sigma.core.agent import SigmaAgent
|
|
4
|
+
from sigma.core.config import LLMProvider, Settings, get_settings
|
|
5
|
+
from sigma.core.llm import get_llm
|
|
6
|
+
from sigma.core.models import Message, MessageRole, ToolCall, ToolResult
|
|
7
|
+
|
|
8
|
+
__all__ = [
|
|
9
|
+
"SigmaAgent",
|
|
10
|
+
"LLMProvider",
|
|
11
|
+
"Settings",
|
|
12
|
+
"get_settings",
|
|
13
|
+
"get_llm",
|
|
14
|
+
"Message",
|
|
15
|
+
"MessageRole",
|
|
16
|
+
"ToolCall",
|
|
17
|
+
"ToolResult",
|
|
18
|
+
]
|
sigma/core/agent.py
ADDED
|
@@ -0,0 +1,205 @@
|
|
|
1
|
+
"""Sigma Research Agent."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import time
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
from typing import Any, Callable, Optional
|
|
7
|
+
|
|
8
|
+
from sigma.core.config import LLMProvider, get_settings
|
|
9
|
+
from sigma.core.llm import get_llm, BaseLLM
|
|
10
|
+
from sigma.core.models import Message, MessageRole, ToolCall, ToolResult
|
|
11
|
+
from sigma.tools.financial import get_all_tools, execute_tool
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
SYSTEM_PROMPT = """You are Sigma, an elite quantitative financial research analyst and AI-powered trading strategist. You have institutional-grade expertise in markets, securities analysis, algorithmic trading, and investment research.
|
|
15
|
+
|
|
16
|
+
CRITICAL CONSTRAINT: You ONLY respond to finance, investing, trading, and market-related queries.
|
|
17
|
+
- If a user asks about non-financial topics, politely redirect them to financial topics
|
|
18
|
+
- Example: "I specialize in financial analysis. I can help you with stock analysis, market research, portfolio optimization, or trading strategies. What financial topic would you like to explore?"
|
|
19
|
+
|
|
20
|
+
Your capabilities include:
|
|
21
|
+
|
|
22
|
+
MARKET DATA & ANALYSIS
|
|
23
|
+
- Real-time stock quotes and market data
|
|
24
|
+
- Historical price analysis and charting
|
|
25
|
+
- Sector performance and market indices
|
|
26
|
+
- Options chain analysis
|
|
27
|
+
|
|
28
|
+
TECHNICAL ANALYSIS
|
|
29
|
+
- Price charts (line, candle, area)
|
|
30
|
+
- Moving averages (SMA, EMA)
|
|
31
|
+
- RSI, MACD, Bollinger Bands
|
|
32
|
+
- Support/resistance levels
|
|
33
|
+
- Technical signals and patterns
|
|
34
|
+
|
|
35
|
+
FUNDAMENTAL ANALYSIS
|
|
36
|
+
- Financial statements (income, balance sheet, cash flow)
|
|
37
|
+
- Valuation metrics (P/E, P/B, PEG, EV/EBITDA)
|
|
38
|
+
- Earnings and revenue analysis
|
|
39
|
+
- Profitability ratios (ROE, ROA, margins)
|
|
40
|
+
|
|
41
|
+
PREDICTIONS & FORECASTING
|
|
42
|
+
- Price predictions using multiple models
|
|
43
|
+
- Sentiment analysis
|
|
44
|
+
- Risk assessment
|
|
45
|
+
- Monte Carlo simulations
|
|
46
|
+
|
|
47
|
+
INSTITUTIONAL FEATURES
|
|
48
|
+
- Analyst recommendations and price targets
|
|
49
|
+
- Insider trading activity
|
|
50
|
+
- Institutional holdings
|
|
51
|
+
- Short interest data
|
|
52
|
+
|
|
53
|
+
ALGORITHMIC TRADING
|
|
54
|
+
- Generate LEAN engine backtests
|
|
55
|
+
- Strategy templates (SMA crossover, RSI mean reversion, MACD momentum, etc.)
|
|
56
|
+
- Custom strategy builder
|
|
57
|
+
- Backtest parameter optimization
|
|
58
|
+
|
|
59
|
+
When answering:
|
|
60
|
+
1. ALWAYS use tools to get real, current data - never make up numbers
|
|
61
|
+
2. Provide specific metrics and data points
|
|
62
|
+
3. Present analysis in clear, structured formats
|
|
63
|
+
4. Include risk factors and caveats
|
|
64
|
+
5. Give actionable insights with reasoning
|
|
65
|
+
6. Use charts when visualizing would help
|
|
66
|
+
7. Compare to benchmarks and peers when relevant
|
|
67
|
+
|
|
68
|
+
For comprehensive analysis, use multiple tools:
|
|
69
|
+
1. get_stock_quote → Current price and key metrics
|
|
70
|
+
2. get_company_info → Business context and fundamentals
|
|
71
|
+
3. get_analyst_recommendations → Wall Street sentiment
|
|
72
|
+
4. technical_analysis → Price action and signals
|
|
73
|
+
5. get_financial_statements → Deep fundamental analysis
|
|
74
|
+
6. sentiment_analysis → Multi-factor sentiment
|
|
75
|
+
7. price_forecast → Forward-looking projections
|
|
76
|
+
|
|
77
|
+
For charts and visualization:
|
|
78
|
+
- generate_price_chart → Beautiful terminal price charts
|
|
79
|
+
- generate_comparison_chart → Compare multiple stocks
|
|
80
|
+
- generate_rsi_chart → Price with RSI indicator
|
|
81
|
+
- generate_sector_chart → Sector performance overview
|
|
82
|
+
|
|
83
|
+
For backtesting and strategy:
|
|
84
|
+
- list_backtest_strategies → Available strategies
|
|
85
|
+
- generate_backtest → Create LEAN algorithm
|
|
86
|
+
- generate_custom_backtest → Custom strategy builder
|
|
87
|
+
|
|
88
|
+
Your analysis should match the quality of Goldman Sachs, Morgan Stanley, and Citadel research."""
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
class SigmaAgent:
|
|
92
|
+
"""Financial research agent."""
|
|
93
|
+
|
|
94
|
+
def __init__(
|
|
95
|
+
self,
|
|
96
|
+
provider: Optional[LLMProvider] = None,
|
|
97
|
+
model: Optional[str] = None,
|
|
98
|
+
):
|
|
99
|
+
self.settings = get_settings()
|
|
100
|
+
self.provider = provider or self.settings.default_provider
|
|
101
|
+
self.llm: BaseLLM = get_llm(self.provider, model)
|
|
102
|
+
self.messages: list[Message] = []
|
|
103
|
+
self.tools = get_all_tools()
|
|
104
|
+
self.tool_results: list[ToolResult] = []
|
|
105
|
+
self._reset()
|
|
106
|
+
|
|
107
|
+
def _reset(self):
|
|
108
|
+
"""Reset conversation."""
|
|
109
|
+
self.messages = [
|
|
110
|
+
Message(role=MessageRole.SYSTEM, content=SYSTEM_PROMPT)
|
|
111
|
+
]
|
|
112
|
+
self.tool_results = []
|
|
113
|
+
|
|
114
|
+
async def run(
|
|
115
|
+
self,
|
|
116
|
+
query: str,
|
|
117
|
+
on_tool_start: Optional[Callable[[str, dict], None]] = None,
|
|
118
|
+
on_tool_end: Optional[Callable[[str, Any, float], None]] = None,
|
|
119
|
+
on_thinking: Optional[Callable[[str], None]] = None,
|
|
120
|
+
on_response: Optional[Callable[[str], None]] = None,
|
|
121
|
+
) -> str:
|
|
122
|
+
"""Run the agent on a query."""
|
|
123
|
+
self.messages.append(Message(role=MessageRole.USER, content=query))
|
|
124
|
+
self.tool_results = []
|
|
125
|
+
|
|
126
|
+
iteration = 0
|
|
127
|
+
start_time = time.time()
|
|
128
|
+
|
|
129
|
+
while iteration < self.settings.max_iterations:
|
|
130
|
+
iteration += 1
|
|
131
|
+
|
|
132
|
+
# Get LLM response
|
|
133
|
+
try:
|
|
134
|
+
content, tool_calls = await self.llm.generate(
|
|
135
|
+
messages=self.messages,
|
|
136
|
+
tools=self.tools,
|
|
137
|
+
)
|
|
138
|
+
except Exception as e:
|
|
139
|
+
return f"Error: {str(e)}"
|
|
140
|
+
|
|
141
|
+
# If no tool calls, we're done
|
|
142
|
+
if not tool_calls:
|
|
143
|
+
if content:
|
|
144
|
+
self.messages.append(Message(role=MessageRole.ASSISTANT, content=content))
|
|
145
|
+
if on_response:
|
|
146
|
+
on_response(content)
|
|
147
|
+
return content
|
|
148
|
+
continue
|
|
149
|
+
|
|
150
|
+
# Record assistant message with tool calls
|
|
151
|
+
self.messages.append(Message(
|
|
152
|
+
role=MessageRole.ASSISTANT,
|
|
153
|
+
content=content,
|
|
154
|
+
tool_calls=tool_calls,
|
|
155
|
+
))
|
|
156
|
+
|
|
157
|
+
if on_thinking and content:
|
|
158
|
+
on_thinking(content)
|
|
159
|
+
|
|
160
|
+
# Execute tools
|
|
161
|
+
for tc in tool_calls:
|
|
162
|
+
tool_start = time.time()
|
|
163
|
+
|
|
164
|
+
if on_tool_start:
|
|
165
|
+
on_tool_start(tc.name, tc.arguments)
|
|
166
|
+
|
|
167
|
+
result = await execute_tool(tc.name, tc.arguments)
|
|
168
|
+
|
|
169
|
+
duration_ms = (time.time() - tool_start) * 1000
|
|
170
|
+
|
|
171
|
+
tool_result = ToolResult(
|
|
172
|
+
tool_name=tc.name,
|
|
173
|
+
tool_call_id=tc.id,
|
|
174
|
+
success="error" not in str(result).lower(),
|
|
175
|
+
result=result,
|
|
176
|
+
duration_ms=duration_ms,
|
|
177
|
+
)
|
|
178
|
+
self.tool_results.append(tool_result)
|
|
179
|
+
|
|
180
|
+
if on_tool_end:
|
|
181
|
+
on_tool_end(tc.name, result, duration_ms)
|
|
182
|
+
|
|
183
|
+
# Add tool result message
|
|
184
|
+
self.messages.append(Message(
|
|
185
|
+
role=MessageRole.TOOL,
|
|
186
|
+
content=str(result),
|
|
187
|
+
tool_call_id=tc.id,
|
|
188
|
+
name=tc.name,
|
|
189
|
+
))
|
|
190
|
+
|
|
191
|
+
return "Max iterations reached. Please try a simpler query."
|
|
192
|
+
|
|
193
|
+
def get_stats(self) -> dict[str, Any]:
|
|
194
|
+
"""Get execution statistics."""
|
|
195
|
+
total_time = sum(r.duration_ms for r in self.tool_results)
|
|
196
|
+
return {
|
|
197
|
+
"tools_called": len(self.tool_results),
|
|
198
|
+
"total_time_ms": total_time,
|
|
199
|
+
"successful": sum(1 for r in self.tool_results if r.success),
|
|
200
|
+
"failed": sum(1 for r in self.tool_results if not r.success),
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
def clear(self):
|
|
204
|
+
"""Clear conversation history."""
|
|
205
|
+
self._reset()
|
sigma/core/config.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
"""Configuration for Sigma."""
|
|
2
|
+
|
|
3
|
+
from enum import Enum
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Optional
|
|
6
|
+
|
|
7
|
+
from pydantic import Field, SecretStr
|
|
8
|
+
from pydantic_settings import BaseSettings, SettingsConfigDict
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class LLMProvider(str, Enum):
|
|
12
|
+
"""LLM providers."""
|
|
13
|
+
OPENAI = "openai"
|
|
14
|
+
ANTHROPIC = "anthropic"
|
|
15
|
+
GOOGLE = "google"
|
|
16
|
+
OLLAMA = "ollama"
|
|
17
|
+
XAI = "xai"
|
|
18
|
+
GROQ = "groq"
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class Settings(BaseSettings):
|
|
22
|
+
"""Application settings."""
|
|
23
|
+
|
|
24
|
+
model_config = SettingsConfigDict(
|
|
25
|
+
env_file=".env",
|
|
26
|
+
env_file_encoding="utf-8",
|
|
27
|
+
extra="ignore",
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
# App
|
|
31
|
+
app_name: str = "Sigma"
|
|
32
|
+
debug: bool = False
|
|
33
|
+
|
|
34
|
+
# LLM
|
|
35
|
+
default_provider: LLMProvider = LLMProvider.GOOGLE
|
|
36
|
+
|
|
37
|
+
# API Keys
|
|
38
|
+
openai_api_key: Optional[SecretStr] = Field(default=None, alias="OPENAI_API_KEY")
|
|
39
|
+
anthropic_api_key: Optional[SecretStr] = Field(default=None, alias="ANTHROPIC_API_KEY")
|
|
40
|
+
google_api_key: Optional[SecretStr] = Field(default=None, alias="GOOGLE_API_KEY")
|
|
41
|
+
xai_api_key: Optional[SecretStr] = Field(default=None, alias="XAI_API_KEY")
|
|
42
|
+
groq_api_key: Optional[SecretStr] = Field(default=None, alias="GROQ_API_KEY")
|
|
43
|
+
|
|
44
|
+
# Models
|
|
45
|
+
openai_model: str = "gpt-4o"
|
|
46
|
+
anthropic_model: str = "claude-sonnet-4-20250514"
|
|
47
|
+
google_model: str = "gemini-2.0-flash"
|
|
48
|
+
ollama_model: str = "llama3.2"
|
|
49
|
+
xai_model: str = "grok-beta"
|
|
50
|
+
groq_model: str = "llama-3.3-70b-versatile"
|
|
51
|
+
|
|
52
|
+
# Ollama
|
|
53
|
+
ollama_base_url: str = "http://127.0.0.1:11434"
|
|
54
|
+
|
|
55
|
+
# Search
|
|
56
|
+
exa_api_key: Optional[SecretStr] = Field(default=None, alias="EXASEARCH_API_KEY")
|
|
57
|
+
tavily_api_key: Optional[SecretStr] = Field(default=None, alias="TAVILY_API_KEY")
|
|
58
|
+
serper_api_key: Optional[SecretStr] = Field(default=None, alias="SERPER_API_KEY")
|
|
59
|
+
|
|
60
|
+
# Financial
|
|
61
|
+
financial_datasets_api_key: Optional[SecretStr] = Field(default=None, alias="FINANCIAL_DATASETS_API_KEY")
|
|
62
|
+
fmp_api_key: Optional[SecretStr] = Field(default=None, alias="FMP_API_KEY")
|
|
63
|
+
polygon_api_key: Optional[SecretStr] = Field(default=None, alias="POLYGON_API_KEY")
|
|
64
|
+
alpha_vantage_api_key: Optional[SecretStr] = Field(default=None, alias="ALPHA_VANTAGE_API_KEY")
|
|
65
|
+
|
|
66
|
+
# Agent
|
|
67
|
+
max_iterations: int = 25
|
|
68
|
+
max_tokens: int = 8192
|
|
69
|
+
temperature: float = 0.1
|
|
70
|
+
|
|
71
|
+
# Storage
|
|
72
|
+
cache_dir: Path = Path.home() / ".sigma" / "cache"
|
|
73
|
+
db_path: Path = Path.home() / ".sigma" / "sigma.db"
|
|
74
|
+
|
|
75
|
+
def get_api_key(self, provider: LLMProvider) -> Optional[str]:
|
|
76
|
+
"""Get API key for provider."""
|
|
77
|
+
keys = {
|
|
78
|
+
LLMProvider.OPENAI: self.openai_api_key,
|
|
79
|
+
LLMProvider.ANTHROPIC: self.anthropic_api_key,
|
|
80
|
+
LLMProvider.GOOGLE: self.google_api_key,
|
|
81
|
+
LLMProvider.XAI: self.xai_api_key,
|
|
82
|
+
LLMProvider.GROQ: self.groq_api_key,
|
|
83
|
+
}
|
|
84
|
+
key = keys.get(provider)
|
|
85
|
+
return key.get_secret_value() if key else None
|
|
86
|
+
|
|
87
|
+
def get_model(self, provider: Optional[LLMProvider] = None) -> str:
|
|
88
|
+
"""Get model for provider."""
|
|
89
|
+
provider = provider or self.default_provider
|
|
90
|
+
models = {
|
|
91
|
+
LLMProvider.OPENAI: self.openai_model,
|
|
92
|
+
LLMProvider.ANTHROPIC: self.anthropic_model,
|
|
93
|
+
LLMProvider.GOOGLE: self.google_model,
|
|
94
|
+
LLMProvider.OLLAMA: self.ollama_model,
|
|
95
|
+
LLMProvider.XAI: self.xai_model,
|
|
96
|
+
LLMProvider.GROQ: self.groq_model,
|
|
97
|
+
}
|
|
98
|
+
return models.get(provider, self.google_model)
|
|
99
|
+
|
|
100
|
+
def get_available_providers(self) -> list[LLMProvider]:
|
|
101
|
+
"""Get providers with API keys."""
|
|
102
|
+
available = []
|
|
103
|
+
for p in LLMProvider:
|
|
104
|
+
if p == LLMProvider.OLLAMA:
|
|
105
|
+
available.append(p)
|
|
106
|
+
elif self.get_api_key(p):
|
|
107
|
+
available.append(p)
|
|
108
|
+
return available
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
_settings: Optional[Settings] = None
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def get_settings() -> Settings:
|
|
115
|
+
"""Get settings singleton."""
|
|
116
|
+
global _settings
|
|
117
|
+
if _settings is None:
|
|
118
|
+
_settings = Settings()
|
|
119
|
+
return _settings
|