puli-plg 0.1.26__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- puli_plg-0.1.26/.gitignore +25 -0
- puli_plg-0.1.26/PKG-INFO +14 -0
- puli_plg-0.1.26/puli_mcp_server/__init__.py +0 -0
- puli_plg-0.1.26/puli_mcp_server/credentials/.gitkeep +0 -0
- puli_plg-0.1.26/puli_mcp_server/credentials/service-account.json +13 -0
- puli_plg-0.1.26/puli_mcp_server/embedding_client/__init__.py +0 -0
- puli_plg-0.1.26/puli_mcp_server/embedding_client/client.py +34 -0
- puli_plg-0.1.26/puli_mcp_server/embedding_client/config.py +49 -0
- puli_plg-0.1.26/puli_mcp_server/llm_agent/__init__.py +0 -0
- puli_plg-0.1.26/puli_mcp_server/llm_agent/config.py +85 -0
- puli_plg-0.1.26/puli_mcp_server/llm_agent/llm_agent.py +46 -0
- puli_plg-0.1.26/puli_mcp_server/llm_agent/models.py +284 -0
- puli_plg-0.1.26/puli_mcp_server/mcp_server/__init__.py +0 -0
- puli_plg-0.1.26/puli_mcp_server/mcp_server/models.py +63 -0
- puli_plg-0.1.26/puli_mcp_server/mcp_server/server.py +123 -0
- puli_plg-0.1.26/puli_mcp_server/proxy_client/__init__.py +3 -0
- puli_plg-0.1.26/puli_mcp_server/proxy_client/client.py +264 -0
- puli_plg-0.1.26/puli_mcp_server/proxy_client/config.py +74 -0
- puli_plg-0.1.26/puli_mcp_server/proxy_client/token_manager.py +36 -0
- puli_plg-0.1.26/puli_mcp_server/test_diff.json +16 -0
- puli_plg-0.1.26/puli_models/__init__.py +9 -0
- puli_plg-0.1.26/puli_models/chaos_patterns.py +89 -0
- puli_plg-0.1.26/puli_models/incidents.py +78 -0
- puli_plg-0.1.26/pyproject.toml +44 -0
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
# Environment files
|
|
2
|
+
.env
|
|
3
|
+
.env.local
|
|
4
|
+
|
|
5
|
+
# Python
|
|
6
|
+
__pycache__/
|
|
7
|
+
*.py[cod]
|
|
8
|
+
*$py.class
|
|
9
|
+
*.so
|
|
10
|
+
.Python
|
|
11
|
+
venv/
|
|
12
|
+
env/
|
|
13
|
+
.venv/
|
|
14
|
+
ENV/
|
|
15
|
+
|
|
16
|
+
# IDE
|
|
17
|
+
.idea/
|
|
18
|
+
.vscode/
|
|
19
|
+
*.swp
|
|
20
|
+
*.swo
|
|
21
|
+
|
|
22
|
+
# Terraform (handled in terraform/zilliz/.gitignore)
|
|
23
|
+
|
|
24
|
+
# uv
|
|
25
|
+
.python-version
|
puli_plg-0.1.26/PKG-INFO
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: puli-plg
|
|
3
|
+
Version: 0.1.26
|
|
4
|
+
Summary: Zilliz vector database client for incident management
|
|
5
|
+
Requires-Python: <3.14,>=3.10
|
|
6
|
+
Requires-Dist: google-auth>=2.0.0
|
|
7
|
+
Requires-Dist: mcp[cli]>=0.1.0
|
|
8
|
+
Requires-Dist: openai>=1.0.0
|
|
9
|
+
Requires-Dist: pydantic-ai>=0.0.18
|
|
10
|
+
Requires-Dist: python-dotenv>=1.0.0
|
|
11
|
+
Requires-Dist: pyyaml>=6.0.1
|
|
12
|
+
Provides-Extra: dev
|
|
13
|
+
Requires-Dist: pytest>=8.0.0; extra == 'dev'
|
|
14
|
+
Requires-Dist: ruff>=0.1.0; extra == 'dev'
|
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
{
|
|
2
|
+
"type": "service_account",
|
|
3
|
+
"project_id": "totemic-formula-484216-s6",
|
|
4
|
+
"private_key_id": "8078be8f617d2c0c2c6e4cc7c94132135664c3c0",
|
|
5
|
+
"private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDNQfmPOGwiNCNu\njztooUcrhOG6upIdA8BZxFg7UJKa4UrugXowoUZyyu1E3JzMr7kNRZMa3perDDl6\nP04HtA8mtqdBEnJeNIQlwV0FiGHuPuzEQBpKBjz2LCPOHaG0m5tQZYPngynzHdF6\nGLgpMaYpnPZGsqpRi6F44zI2+wj4MsGyGuA+pukeMiS1fAhjMUDhHDpN6kryLU9D\nLSx79oY04I3SE/kroGhtH5BPmv/fxpkhM8e1KzvHeZ3FKosfCskzsdaCNR0xCNSi\n+8SXaq0SJrXIkDDxQ0W3pxmfH0y17hX8tJSbsyeiCg3/+mJpsEbO0Z6colmfoJu+\nIOmuyCA/AgMBAAECggEAWMEe+u2gYHfgHFYGD9uxLSHEA9zaAn7VRVuv9VIFWsBx\niMlm/zE1h7y0tVKZ4K5ZW+JKVlOLDLT4A6LtCEq1RH21u98QoiiePb8rjJFpGz6N\nXd0EIR8qbB4xC1bnzfN9SNnknA6s7Innwx5+P53O5m6PVYm7ORCiLWY8l+ab/coZ\npY82vMrO0ohQwIbFiJjgnL2rO/sF/Z3sKR4Iw6EUbLV/oY33geGJ45hqsmVOxQ3C\nvfDxxVbot1wdGMHuVDQrrcQx7pNcODVaDwQ47rHPNmCu12L4ZAHL5UWkhNtlGV1N\nbO+zJUOVhlUKbs3Y2aOg4Z0QpRSFp9k9h9I6P24NwQKBgQD8rTbExPCJYDpV6R63\ndY1cbQ6+L2kKLHFyoH7XuPZfa/Z6PCgL2Msdp20as1vCDRgzAjAeO69cSfzbxrxC\ngvymyLub9IegHBQjjvxQz/w/TyPS3jo/buCarv43HNhpQr8rOLbk4fqAblvcwrau\ngDIYlxA+Zqmes8IKdy49UG6VQQKBgQDP9RjTM6IrURWOu6y1MaRMCVzeRLSzLFkK\nUFHIT0NOoS9hK641Z4diKw5uz0hXZgK+T2//AKcYD5utA68BITBHNmH0AwIxxtmr\nbutCDK+Fg5iUuoEXESD/tvDk1caTMCsFW8XuFcbPjlGmNoAQcHglWDWQcv4MgWG4\nFKMhpX3VfwKBgQDQKUcQjgp3sm38rsPechqWRUY7CkXn2rtPqsc0oy1daU0yYHLB\nZ8XV1UO+FnsGf9Eq3KeVkKgkSNPmn3Ai/1RzdrWQgsBk+BzjOn3FecMeyO0DYI7u\nCUNCS94kuz/SX+msCtop7712pvRJB6SGWBhtR65bKiqdEwxNoOfYHXYswQKBgGKz\nQIoVSpXmkSXCe3EwU3tnQjWYRG95z/TuNDKvNBHgvL0Q30knjwEP0HTVWs9ockrj\nJxPPun7YAZo3Ultl5iUWcZ0/xE3dxDfymCQsIy5qTAPjFRvenFvh9ymVMP9w8CpF\n6YZbvZWLRcNuwI+M4elPmXSqr1s+GSQmX1q70iJ/AoGARTjZDvA3wV4Y6+lAhydV\niLtkCm99Bs5bn/Rqpg2dJpX6pnZIWJerDczFK6I3hdl2YZxXx0pvsKL61MPhrqMS\nL99SXviX0709DG48kytIrnJkBvZMTk2WQVWMmswzbpDBa9/KTheKkMJuSLu+VtxU\nghBOZjGr/hILrSuUeQ9eC68=\n-----END PRIVATE KEY-----\n",
|
|
6
|
+
"client_email": "puli-mcp-client@totemic-formula-484216-s6.iam.gserviceaccount.com",
|
|
7
|
+
"client_id": "115504467731897363185",
|
|
8
|
+
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
|
|
9
|
+
"token_uri": "https://oauth2.googleapis.com/token",
|
|
10
|
+
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
|
|
11
|
+
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/puli-mcp-client%40totemic-formula-484216-s6.iam.gserviceaccount.com",
|
|
12
|
+
"universe_domain": "googleapis.com"
|
|
13
|
+
}
|
|
File without changes
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
from typing import List, Optional
|
|
2
|
+
from openai import OpenAI
|
|
3
|
+
from .config import EmbeddingConfig
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class EmbeddingClient:
|
|
7
|
+
"""Client for generating text embeddings using OpenAI."""
|
|
8
|
+
|
|
9
|
+
def __init__(self, config: Optional[EmbeddingConfig] = None):
|
|
10
|
+
"""
|
|
11
|
+
Initialize the embedding client.
|
|
12
|
+
|
|
13
|
+
Args:
|
|
14
|
+
config: EmbeddingConfig instance. If None, loads from environment variables.
|
|
15
|
+
"""
|
|
16
|
+
self.config = config or EmbeddingConfig.from_env()
|
|
17
|
+
self.client = OpenAI(api_key=self.config.api_key)
|
|
18
|
+
|
|
19
|
+
def generate_embedding(self, text: str) -> List[float]:
|
|
20
|
+
"""
|
|
21
|
+
Generate an embedding for the given text.
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
text: The text to embed.
|
|
25
|
+
|
|
26
|
+
Returns:
|
|
27
|
+
A list of floats representing the embedding vector.
|
|
28
|
+
"""
|
|
29
|
+
# Ensure text isn't empty and replace newlines
|
|
30
|
+
clean_text = text.replace("\n", " ")
|
|
31
|
+
return self.client.embeddings.create(
|
|
32
|
+
input=[clean_text],
|
|
33
|
+
model=self.config.model
|
|
34
|
+
).data[0].embedding
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from dataclasses import dataclass
|
|
3
|
+
from typing import Dict, Any
|
|
4
|
+
|
|
5
|
+
EMBEDDING_ALGORITHM = "text-embedding-3-large"
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass
|
|
9
|
+
class EmbeddingConfig:
|
|
10
|
+
"""Configuration for Embedding client, loaded from environment variables."""
|
|
11
|
+
|
|
12
|
+
api_key: str
|
|
13
|
+
model: str
|
|
14
|
+
|
|
15
|
+
@classmethod
|
|
16
|
+
def from_env(cls) -> "EmbeddingConfig":
|
|
17
|
+
"""Load configuration from environment variables."""
|
|
18
|
+
api_key = os.environ.get("OPENAI_API_KEY")
|
|
19
|
+
if not api_key:
|
|
20
|
+
raise ValueError("OpenAI API key is required. Set OPENAI_API_KEY environment variable.")
|
|
21
|
+
|
|
22
|
+
model = os.environ.get("EMBEDDING_ALGORITHM", EMBEDDING_ALGORITHM)
|
|
23
|
+
|
|
24
|
+
return cls(
|
|
25
|
+
api_key=api_key,
|
|
26
|
+
model=model,
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
@classmethod
|
|
30
|
+
def from_remote(cls, config: Dict[str, Any]) -> "EmbeddingConfig":
|
|
31
|
+
"""Load configuration from remote config dict (fetched from proxy).
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
config: Configuration dictionary from proxy /config/mcp endpoint
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
EmbeddingConfig instance with settings from remote config
|
|
38
|
+
"""
|
|
39
|
+
api_key = os.environ.get("OPENAI_API_KEY")
|
|
40
|
+
if not api_key:
|
|
41
|
+
raise ValueError("OpenAI API key is required. Set OPENAI_API_KEY environment variable.")
|
|
42
|
+
|
|
43
|
+
model = config.get("EMBEDDING_ALGORITHM", EMBEDDING_ALGORITHM)
|
|
44
|
+
|
|
45
|
+
return cls(
|
|
46
|
+
api_key=api_key,
|
|
47
|
+
model=model,
|
|
48
|
+
)
|
|
49
|
+
|
|
File without changes
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import yaml
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Dict, Any
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
LLM_PROVIDER = "openai"
|
|
9
|
+
LLM_MODEL = "gpt-4o"
|
|
10
|
+
LLM_TEMPERATURE = 0.7
|
|
11
|
+
|
|
12
|
+
# Resolve relative to project root: config.py → llm_agent → puli_mcp_server → src → project root
|
|
13
|
+
_PROJECT_ROOT = Path(__file__).resolve().parent.parent.parent.parent
|
|
14
|
+
PROMPT_FILE_PATH = _PROJECT_ROOT / "prompts" / "analyze_code_prompt.yaml"
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@dataclass
|
|
18
|
+
class LLMAgentConfig:
|
|
19
|
+
"""Configuration for LLM Agent, loaded from environment variables."""
|
|
20
|
+
|
|
21
|
+
provider: str
|
|
22
|
+
model: str
|
|
23
|
+
temperature: float
|
|
24
|
+
system_prompt: str
|
|
25
|
+
|
|
26
|
+
@classmethod
|
|
27
|
+
def from_env(cls) -> "LLMAgentConfig":
|
|
28
|
+
"""Load configuration from environment variables and local prompt file."""
|
|
29
|
+
provider = os.environ.get("LLM_PROVIDER", LLM_PROVIDER)
|
|
30
|
+
model = os.environ.get("LLM_MODEL", LLM_MODEL)
|
|
31
|
+
temperature = float(os.environ.get("LLM_TEMPERATURE", LLM_TEMPERATURE))
|
|
32
|
+
|
|
33
|
+
# Load prompt from absolute path
|
|
34
|
+
try:
|
|
35
|
+
with open(PROMPT_FILE_PATH, 'r', encoding='utf-8') as f:
|
|
36
|
+
prompt_text = f.read()
|
|
37
|
+
|
|
38
|
+
try:
|
|
39
|
+
data = yaml.safe_load(prompt_text)
|
|
40
|
+
system_prompt = data.get("prompt")
|
|
41
|
+
except yaml.YAMLError as e:
|
|
42
|
+
raise ValueError(f"Error parsing YAML prompt file: {e}")
|
|
43
|
+
|
|
44
|
+
if not system_prompt:
|
|
45
|
+
raise ValueError("System prompt 'prompt' key not found in YAML file")
|
|
46
|
+
|
|
47
|
+
except FileNotFoundError:
|
|
48
|
+
raise ValueError(f"Prompt file not found at {PROMPT_FILE_PATH}")
|
|
49
|
+
except Exception as e:
|
|
50
|
+
raise ValueError(f"Could not load prompt from {PROMPT_FILE_PATH}: {e}")
|
|
51
|
+
|
|
52
|
+
return cls(
|
|
53
|
+
provider=provider,
|
|
54
|
+
model=model,
|
|
55
|
+
temperature=temperature,
|
|
56
|
+
system_prompt=system_prompt,
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
@classmethod
|
|
60
|
+
def from_remote(cls, config: Dict[str, Any], prompts: Dict[str, Any]) -> "LLMAgentConfig":
|
|
61
|
+
"""Load configuration from remote config dict (fetched from proxy).
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
config: Configuration dictionary from proxy /config/mcp endpoint
|
|
65
|
+
prompts: Prompts dictionary from proxy /config/mcp endpoint
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
LLMAgentConfig instance with settings from remote config
|
|
69
|
+
"""
|
|
70
|
+
provider = config.get("LLM_PROVIDER", LLM_PROVIDER)
|
|
71
|
+
model = config.get("LLM_MODEL", LLM_MODEL)
|
|
72
|
+
temperature = config.get("LLM_TEMPERATURE", LLM_TEMPERATURE)
|
|
73
|
+
|
|
74
|
+
system_prompt = prompts.get("analyze_prompt")
|
|
75
|
+
if not system_prompt:
|
|
76
|
+
print(f"analyze_prompt not found in prompts configuration: {prompts}")
|
|
77
|
+
raise ValueError("analyze_prompt not found in prompts configuration")
|
|
78
|
+
|
|
79
|
+
return cls(
|
|
80
|
+
provider=provider,
|
|
81
|
+
model=model,
|
|
82
|
+
temperature=temperature,
|
|
83
|
+
system_prompt=system_prompt,
|
|
84
|
+
)
|
|
85
|
+
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
|
|
3
|
+
from pydantic_ai import Agent
|
|
4
|
+
|
|
5
|
+
from .config import LLMAgentConfig
|
|
6
|
+
from .models import LLMQueryRequest, RiskAssessment
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class LLMAgent:
|
|
10
|
+
"""Agent for querying an LLM using pydantic-ai."""
|
|
11
|
+
|
|
12
|
+
def __init__(self, config: Optional[LLMAgentConfig] = None):
|
|
13
|
+
"""
|
|
14
|
+
Initialize the LLM agent.
|
|
15
|
+
|
|
16
|
+
Args:
|
|
17
|
+
config: LLMAgentConfig instance. If None, loads from environment variables.
|
|
18
|
+
"""
|
|
19
|
+
self.config = config or LLMAgentConfig.from_env()
|
|
20
|
+
|
|
21
|
+
# Build the model name string (e.g., "openai:gpt-4" or "anthropic:claude-3")
|
|
22
|
+
model_name = f"{self.config.provider}:{self.config.model}"
|
|
23
|
+
|
|
24
|
+
# Initialize the Agent with the model name string
|
|
25
|
+
# pydantic-ai will automatically read API keys from environment variables
|
|
26
|
+
self.agent = Agent(
|
|
27
|
+
model_name,
|
|
28
|
+
system_prompt=self.config.system_prompt,
|
|
29
|
+
output_type=RiskAssessment,
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
async def query(self, request: LLMQueryRequest) -> RiskAssessment:
|
|
33
|
+
"""
|
|
34
|
+
Query the LLM with a ChangeSet and historical incidents.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
request: LLMQueryRequest containing the data to analyze.
|
|
38
|
+
|
|
39
|
+
Returns:
|
|
40
|
+
The LLM response as a RiskAssessment object.
|
|
41
|
+
"""
|
|
42
|
+
# Format the prompt with the request data
|
|
43
|
+
prompt = request.to_prompt_str()
|
|
44
|
+
result = await self.agent.run(prompt)
|
|
45
|
+
return result.output
|
|
46
|
+
|
|
@@ -0,0 +1,284 @@
|
|
|
1
|
+
from pydantic import BaseModel
|
|
2
|
+
from typing import Optional, List
|
|
3
|
+
from pydantic import BaseModel, Field, conint
|
|
4
|
+
from enum import Enum
|
|
5
|
+
import sys
|
|
6
|
+
import os
|
|
7
|
+
|
|
8
|
+
from puli_mcp_server.mcp_server.models import ChangeSet
|
|
9
|
+
from puli_models import IncidentQueryResult, ChaosPatternQueryResult
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
# ANSI color codes
|
|
13
|
+
class ANSIColors:
|
|
14
|
+
"""ANSI escape codes for terminal colors."""
|
|
15
|
+
RED = '\033[91m'
|
|
16
|
+
ORANGE = '\033[38;5;208m' # Orange (256-color mode)
|
|
17
|
+
YELLOW = '\033[93m'
|
|
18
|
+
GREEN = '\033[92m'
|
|
19
|
+
MAGENTA = '\033[95m'
|
|
20
|
+
RESET = '\033[0m'
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def _should_use_colors() -> bool:
|
|
24
|
+
"""
|
|
25
|
+
Detect if ANSI colors should be used based on environment.
|
|
26
|
+
|
|
27
|
+
Returns:
|
|
28
|
+
True if colors should be used, False otherwise.
|
|
29
|
+
"""
|
|
30
|
+
# Check for FORCE_COLOR override (takes precedence)
|
|
31
|
+
if os.getenv("DISABLE_COLOR"):
|
|
32
|
+
return False
|
|
33
|
+
|
|
34
|
+
# Check if output is a TTY
|
|
35
|
+
if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty():
|
|
36
|
+
return True
|
|
37
|
+
|
|
38
|
+
return False
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def risk_meter(rate: int, total: int = 100, bar_length: int = 20, color: str = "") -> str:
|
|
42
|
+
"""
|
|
43
|
+
Generate a risk meter progress bar.
|
|
44
|
+
|
|
45
|
+
Args:
|
|
46
|
+
rate: Current value (0-100)
|
|
47
|
+
total: Maximum value (default: 100)
|
|
48
|
+
bar_length: Number of characters in the bar (default: 20)
|
|
49
|
+
color: ANSI color code to apply to the meter (optional)
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
String with format: ▰▰▰▰▰▰▰▰▰▰▰▰▰▰▰▱▱▱▱▱
|
|
53
|
+
"""
|
|
54
|
+
# Calculate how many filled blocks
|
|
55
|
+
filled = int((rate / total) * bar_length)
|
|
56
|
+
empty = bar_length - filled
|
|
57
|
+
|
|
58
|
+
# Create the bar
|
|
59
|
+
bar = '▰' * filled + '▱' * empty
|
|
60
|
+
|
|
61
|
+
# Apply color if provided
|
|
62
|
+
if color:
|
|
63
|
+
return f"{color}{bar}{ANSIColors.RESET}"
|
|
64
|
+
|
|
65
|
+
# Return formatted string
|
|
66
|
+
return bar
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
class LLMQueryRequest(BaseModel):
|
|
70
|
+
"""Request object for the LLM Agent query API."""
|
|
71
|
+
change_set: ChangeSet
|
|
72
|
+
historical_incidents: List[IncidentQueryResult]
|
|
73
|
+
relevant_chaos_patterns: List[ChaosPatternQueryResult]
|
|
74
|
+
|
|
75
|
+
def to_prompt_str(self) -> str:
|
|
76
|
+
|
|
77
|
+
prompt = "Analyze the following code changes:\n"
|
|
78
|
+
|
|
79
|
+
# Change set
|
|
80
|
+
prompt += self.change_set.to_embedding_string()
|
|
81
|
+
|
|
82
|
+
# add code diff
|
|
83
|
+
for change in self.change_set.changes:
|
|
84
|
+
prompt += f"\n{change.to_str()}"
|
|
85
|
+
|
|
86
|
+
# Add chaos patterns
|
|
87
|
+
chaos_patterns_str_list = [chaos_pattern.to_prompt_str() for chaos_pattern in self.relevant_chaos_patterns]
|
|
88
|
+
prompt += f"\nChaos patterns to consider:\n"
|
|
89
|
+
for ind, chaos_pattern_str in enumerate(chaos_patterns_str_list):
|
|
90
|
+
prompt += f"\n{ind}. {chaos_pattern_str}"
|
|
91
|
+
|
|
92
|
+
# Add incidents
|
|
93
|
+
incidents_str_list = [incident.to_prompt_str() for incident in self.historical_incidents]
|
|
94
|
+
prompt += f"\nRelated incidents:\n"
|
|
95
|
+
for ind, incident_str in enumerate(incidents_str_list):
|
|
96
|
+
prompt += f"\n{ind}. {incident_str}"
|
|
97
|
+
|
|
98
|
+
return prompt
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
# Icon mapping for risk assessment levels
|
|
102
|
+
_RISK_LEVEL_ICONS = {
|
|
103
|
+
"CRITICAL": "⛔",
|
|
104
|
+
"HIGH_RISK": "⚠️",
|
|
105
|
+
"MODERATE": "🔶",
|
|
106
|
+
"LOW_RISK": "✅",
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
class RiskAssessmentLevel(str, Enum):
|
|
111
|
+
CRITICAL = "CRITICAL"
|
|
112
|
+
HIGH_RISK = "HIGH_RISK"
|
|
113
|
+
MODERATE = "MODERATE"
|
|
114
|
+
LOW_RISK = "LOW_RISK"
|
|
115
|
+
|
|
116
|
+
def to_icon(self) -> str:
|
|
117
|
+
return _RISK_LEVEL_ICONS.get(self.value, "❓")
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
class TechnicalFinding(BaseModel):
|
|
121
|
+
"""
|
|
122
|
+
Precise, factual, code-level details of the finding.
|
|
123
|
+
"""
|
|
124
|
+
file_path: str = Field(..., description="The relative path to the file.")
|
|
125
|
+
line_number: str = Field(..., description="The specific line number or range (e.g., '47' or '47-52').")
|
|
126
|
+
change_description: str = Field(..., description="Brief summary of what changed in the code.")
|
|
127
|
+
technical_reason: str = Field(
|
|
128
|
+
...,
|
|
129
|
+
description="Why this breaks. Must be technical and specific (e.g., 'Missing index causes table scan'). Avoid vague phrases like 'might cause issues'."
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
class BusinessContext(BaseModel):
|
|
133
|
+
"""
|
|
134
|
+
Contextualizes the code within the broader business process.
|
|
135
|
+
Requirement: You must break the flow down into atomic steps.
|
|
136
|
+
Requirement: One step must clearly identify where the failure occurs (e.g., ["User Clicks", "API Request", "[DB DEADLOCK]", "Response Timeout"]).
|
|
137
|
+
"""
|
|
138
|
+
process_description: str = Field(..., description="A description of the business process served by this code.")
|
|
139
|
+
flow_steps: List[str] = Field(
|
|
140
|
+
...,
|
|
141
|
+
description="List of steps in the process. The point of failure should be one of the steps. Add a marker on the step that"
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
class RealIncident(BaseModel):
|
|
145
|
+
"""
|
|
146
|
+
Historical context if a similar pattern has caused a major outage before.
|
|
147
|
+
"""
|
|
148
|
+
company: str = Field(..., description="The name of the company that suffered the incident.")
|
|
149
|
+
year: str = Field(..., description="The year the incident occurred.")
|
|
150
|
+
description: str = Field(..., description="Brief description of what happened in that specific incident.")
|
|
151
|
+
|
|
152
|
+
class RiskAssessment(BaseModel):
|
|
153
|
+
"""
|
|
154
|
+
The main structure for the code review output.
|
|
155
|
+
"""
|
|
156
|
+
risk_assessment_level: RiskAssessmentLevel = Field(
|
|
157
|
+
...,
|
|
158
|
+
description="The level of risk associated with the code change."
|
|
159
|
+
)
|
|
160
|
+
risk_score: conint(ge=0, le=100) = Field(
|
|
161
|
+
...,
|
|
162
|
+
description="Risk score from 0 (Critical) to 100 (Safe)."
|
|
163
|
+
)
|
|
164
|
+
business_flow_name: str = Field(..., description="Top level name of the flow (e.g. 'Checkout Process').")
|
|
165
|
+
technical_finding: TechnicalFinding
|
|
166
|
+
business_context: BusinessContext
|
|
167
|
+
consequence: str = Field(
|
|
168
|
+
...,
|
|
169
|
+
description="Description: What happens to the user or the business. Style Rule: Be strictly factual. No drama. No hyperbole. Example: \"User is double-charged. Support ticket generated.\" (NOT \"Catastrophic failure destroys trust\")."
|
|
170
|
+
)
|
|
171
|
+
chaos_scenario: Optional[str] = Field(
|
|
172
|
+
None,
|
|
173
|
+
description="Description of the chaos scenario that was run to test this risk assessment."
|
|
174
|
+
)
|
|
175
|
+
historical_incident: Optional[RealIncident] = Field(
|
|
176
|
+
None,
|
|
177
|
+
description="Only populate if a famous/known incident matches this exact failure pattern."
|
|
178
|
+
)
|
|
179
|
+
closing_line: str = Field(
|
|
180
|
+
...,
|
|
181
|
+
description="A short line describing the risk assessment. No more then 15 words, typically 7 words."
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
def to_str(self, use_colors: Optional[bool] = None) -> str:
|
|
185
|
+
"""
|
|
186
|
+
Converts the object into the specific text format required for the prompt.
|
|
187
|
+
|
|
188
|
+
Args:
|
|
189
|
+
use_colors: Whether to use ANSI colors. If None, auto-detect based on TTY.
|
|
190
|
+
"""
|
|
191
|
+
# Auto-detect color support if not explicitly specified
|
|
192
|
+
if use_colors is None:
|
|
193
|
+
use_colors = _should_use_colors()
|
|
194
|
+
|
|
195
|
+
sep = "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
|
196
|
+
|
|
197
|
+
# Conditionally apply colors
|
|
198
|
+
colored_sep = sep
|
|
199
|
+
risk_color = ""
|
|
200
|
+
if use_colors:
|
|
201
|
+
colored_sep = f"{ANSIColors.MAGENTA}{sep}{ANSIColors.RESET}"
|
|
202
|
+
# Map risk levels to colors
|
|
203
|
+
risk_colors = {
|
|
204
|
+
"CRITICAL": ANSIColors.RED,
|
|
205
|
+
"HIGH_RISK": ANSIColors.ORANGE,
|
|
206
|
+
"MODERATE": ANSIColors.YELLOW,
|
|
207
|
+
"LOW_RISK": ANSIColors.GREEN,
|
|
208
|
+
}
|
|
209
|
+
risk_color = risk_colors.get(self.risk_assessment_level.value, "")
|
|
210
|
+
|
|
211
|
+
# Build the sections list
|
|
212
|
+
sections = []
|
|
213
|
+
|
|
214
|
+
# 1. Header Section
|
|
215
|
+
sections.append(colored_sep)
|
|
216
|
+
icon = self.risk_assessment_level.to_icon()
|
|
217
|
+
meter = risk_meter(100 -self.risk_score, color=risk_color)
|
|
218
|
+
|
|
219
|
+
risk_assessment_level_str = f"{icon}"
|
|
220
|
+
if use_colors:
|
|
221
|
+
risk_assessment_level_str = f"{risk_color}{icon} {ANSIColors.RESET}"
|
|
222
|
+
|
|
223
|
+
sections.append(f"Puli Risk Assessment: {risk_assessment_level_str}")
|
|
224
|
+
sections.append(f"{meter}")
|
|
225
|
+
sections.append(colored_sep)
|
|
226
|
+
|
|
227
|
+
# 2. Technical Finding
|
|
228
|
+
sections.append("[TECHNICAL FINDING]")
|
|
229
|
+
tf = self.technical_finding
|
|
230
|
+
sections.append(f"File: {tf.file_path}, Line: {tf.line_number}")
|
|
231
|
+
sections.append(f"Change: {tf.change_description}")
|
|
232
|
+
sections.append(f"Why it breaks: {tf.technical_reason}\n")
|
|
233
|
+
|
|
234
|
+
# 3. Business Flow
|
|
235
|
+
sections.append("[BUSINESS FLOW]")
|
|
236
|
+
bc = self.business_context
|
|
237
|
+
sections.append(f"Process: {bc.process_description}")
|
|
238
|
+
|
|
239
|
+
# JOIN LOGIC: Join the list with arrows
|
|
240
|
+
formatted_flow = " → ".join(bc.flow_steps)
|
|
241
|
+
sections.append(f"Flow: {formatted_flow}\n")
|
|
242
|
+
|
|
243
|
+
# 4. Consequence
|
|
244
|
+
sections.append("[CONSEQUENCE]")
|
|
245
|
+
sections.append(f"{self.consequence}\n")
|
|
246
|
+
|
|
247
|
+
# 5. Real Incident (Only if present)
|
|
248
|
+
if self.historical_incident:
|
|
249
|
+
inc = self.historical_incident
|
|
250
|
+
sections.append("[REAL INCIDENT]")
|
|
251
|
+
sections.append(f"{inc.company} ({inc.year}): {inc.description}")
|
|
252
|
+
sections.append(colored_sep)
|
|
253
|
+
else:
|
|
254
|
+
sections.append(colored_sep)
|
|
255
|
+
|
|
256
|
+
# 6. Closing Line
|
|
257
|
+
sections.append(f"{self.closing_line}")
|
|
258
|
+
sections.append(colored_sep)
|
|
259
|
+
|
|
260
|
+
return "\n".join(sections)
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
"""
|
|
264
|
+
|
|
265
|
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
266
|
+
[SEVERITY ICON] [BUSINESS FLOW NAME]
|
|
267
|
+
Score: [X]/100
|
|
268
|
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
269
|
+
💀 SCENARIO 1: [Catchy 2-4 Word Name] ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
270
|
+
|
|
271
|
+
In [file] line [N], you're [specific description of what code does].
|
|
272
|
+
|
|
273
|
+
WHAT IF: [Specific chaos injection — use actual values from their code]
|
|
274
|
+
|
|
275
|
+
THEN: [Immediate technical consequence]
|
|
276
|
+
|
|
277
|
+
BUSINESS IMPACT: [Real consequence — money, customers, reputation, legal]
|
|
278
|
+
|
|
279
|
+
THIS IS REAL: [Company] [Year] — "[Brief quote or description]"
|
|
280
|
+
|
|
281
|
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
282
|
+
[CLOSING LINE - based on score tier]
|
|
283
|
+
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
|
|
284
|
+
"""
|
|
File without changes
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
from pydantic import BaseModel, Field
|
|
2
|
+
from typing import List, Literal
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class FileChange(BaseModel):
|
|
6
|
+
"""
|
|
7
|
+
Represents a change to a single file.
|
|
8
|
+
"""
|
|
9
|
+
file_path: str = Field(
|
|
10
|
+
...,
|
|
11
|
+
description="The full path of the file being modified (e.g., 'src/utils/parser.py')."
|
|
12
|
+
)
|
|
13
|
+
change_type: Literal["modify", "create", "delete"] = Field(
|
|
14
|
+
default="modify",
|
|
15
|
+
description="Whether this file is being edited, created new, or removed."
|
|
16
|
+
)
|
|
17
|
+
diff_content: str = Field(
|
|
18
|
+
...,
|
|
19
|
+
description=(
|
|
20
|
+
"The standard Unified Diff of the change. "
|
|
21
|
+
"Must include '@@' headers and 3 lines of context around changes."
|
|
22
|
+
)
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
def to_str(self) -> str:
|
|
26
|
+
"""Returns a string representation of the FileChange."""
|
|
27
|
+
return f"File: {self.file_path}\nType: {self.change_type}\nDiff:\n{self.diff_content}"
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class ChangeSet(BaseModel):
|
|
32
|
+
"""
|
|
33
|
+
A logical grouping of changes across multiple files to achieve a single goal.
|
|
34
|
+
"""
|
|
35
|
+
goal: str = Field(
|
|
36
|
+
...,
|
|
37
|
+
description="High-level description of WHY this change is happening (e.g., 'Fix parsing bug in date conversion')."
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
# Renaming 'Code_diffs' to 'changes' for clarity
|
|
41
|
+
changes: List[FileChange]
|
|
42
|
+
|
|
43
|
+
related_infrastructure: str | None = Field(
|
|
44
|
+
None,
|
|
45
|
+
description="A list of specific system components, external services, or resources "
|
|
46
|
+
"that this code interacts with or affects. "
|
|
47
|
+
"Be specific (e.g., 'PostgreSQL: Users Table', 'Redis Cache', 'AWS S3', 'Stripe API', 'Kafka'). "
|
|
48
|
+
"Do not list generic terms like 'Backend' or 'Server'."
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
additional_context: str | None = Field(
|
|
52
|
+
None,
|
|
53
|
+
description="Any extra notes, ticket numbers, or constraints which are important to this area of the code and it's meanning."
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
def to_embedding_string(self) -> str:
|
|
57
|
+
"""Returns a string representation of the ChangeSet for embedding, excluding changes."""
|
|
58
|
+
parts = [f"Goal: {self.goal}"]
|
|
59
|
+
if self.related_infrastructure:
|
|
60
|
+
parts.append(f"Infrastructure: {self.related_infrastructure}")
|
|
61
|
+
if self.additional_context:
|
|
62
|
+
parts.append(f"Context: {self.additional_context}")
|
|
63
|
+
return "\n".join(parts)
|