lightman_ai 0.21.2__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lightman_ai might be problematic. Click here for more details.
- lightman_ai/ai/base/exceptions.py +2 -2
- lightman_ai/ai/gemini/exceptions.py +2 -2
- lightman_ai/ai/openai/exceptions.py +2 -2
- lightman_ai/cli.py +19 -21
- lightman_ai/core/exceptions.py +2 -2
- lightman_ai/core/settings.py +20 -4
- lightman_ai/exceptions.py +5 -0
- lightman_ai/integrations/service_desk/exceptions.py +2 -2
- lightman_ai/main.py +9 -9
- lightman_ai/utils.py +25 -0
- {lightman_ai-0.21.2.dist-info → lightman_ai-1.0.0.dist-info}/METADATA +47 -121
- {lightman_ai-0.21.2.dist-info → lightman_ai-1.0.0.dist-info}/RECORD +15 -13
- {lightman_ai-0.21.2.dist-info → lightman_ai-1.0.0.dist-info}/WHEEL +0 -0
- {lightman_ai-0.21.2.dist-info → lightman_ai-1.0.0.dist-info}/entry_points.txt +0 -0
- {lightman_ai-0.21.2.dist-info → lightman_ai-1.0.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -2,10 +2,10 @@ from collections.abc import Generator
|
|
|
2
2
|
from contextlib import contextmanager
|
|
3
3
|
from typing import Any
|
|
4
4
|
|
|
5
|
-
from lightman_ai.core.exceptions import
|
|
5
|
+
from lightman_ai.core.exceptions import BaseLightmanError
|
|
6
6
|
|
|
7
7
|
|
|
8
|
-
class BaseGeminiError(
|
|
8
|
+
class BaseGeminiError(BaseLightmanError): ...
|
|
9
9
|
|
|
10
10
|
|
|
11
11
|
class GeminiError(BaseGeminiError): ...
|
|
@@ -4,13 +4,13 @@ from collections.abc import Generator
|
|
|
4
4
|
from contextlib import contextmanager
|
|
5
5
|
from typing import Any, override
|
|
6
6
|
|
|
7
|
-
from lightman_ai.core.exceptions import
|
|
7
|
+
from lightman_ai.core.exceptions import BaseLightmanError
|
|
8
8
|
from pydantic_ai.exceptions import ModelHTTPError
|
|
9
9
|
|
|
10
10
|
from openai import RateLimitError
|
|
11
11
|
|
|
12
12
|
|
|
13
|
-
class BaseOpenAIError(
|
|
13
|
+
class BaseOpenAIError(BaseLightmanError): ...
|
|
14
14
|
|
|
15
15
|
|
|
16
16
|
class UnknownOpenAIError(BaseOpenAIError): ...
|
lightman_ai/cli.py
CHANGED
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
import logging
|
|
2
|
-
from datetime import date
|
|
2
|
+
from datetime import date
|
|
3
3
|
from importlib import metadata
|
|
4
|
-
from zoneinfo import ZoneInfo
|
|
5
4
|
|
|
6
5
|
import click
|
|
7
6
|
from dotenv import load_dotenv
|
|
@@ -10,8 +9,10 @@ from lightman_ai.constants import DEFAULT_CONFIG_FILE, DEFAULT_CONFIG_SECTION, D
|
|
|
10
9
|
from lightman_ai.core.config import FileConfig, FinalConfig, PromptConfig
|
|
11
10
|
from lightman_ai.core.exceptions import ConfigNotFoundError, InvalidConfigError, PromptNotFoundError
|
|
12
11
|
from lightman_ai.core.sentry import configure_sentry
|
|
13
|
-
from lightman_ai.core.settings import
|
|
12
|
+
from lightman_ai.core.settings import Settings
|
|
13
|
+
from lightman_ai.exceptions import MultipleDateSourcesError
|
|
14
14
|
from lightman_ai.main import lightman
|
|
15
|
+
from lightman_ai.utils import get_start_date
|
|
15
16
|
|
|
16
17
|
logger = logging.getLogger("lightman")
|
|
17
18
|
|
|
@@ -58,7 +59,7 @@ def entry_point() -> None:
|
|
|
58
59
|
@click.option(
|
|
59
60
|
"--env-file",
|
|
60
61
|
type=str,
|
|
61
|
-
default=
|
|
62
|
+
default=None,
|
|
62
63
|
help=(f"Path to the environment file. Defaults to `{DEFAULT_ENV_FILE}`."),
|
|
63
64
|
)
|
|
64
65
|
@click.option(
|
|
@@ -70,6 +71,7 @@ def entry_point() -> None:
|
|
|
70
71
|
)
|
|
71
72
|
@click.option("--start-date", type=click.DateTime(formats=["%Y-%m-%d"]), help="Start date to retrieve articles")
|
|
72
73
|
@click.option("--today", is_flag=True, help="Retrieve articles from today.")
|
|
74
|
+
@click.option("--yesterday", is_flag=True, help="Retrieve articles from yesterday.")
|
|
73
75
|
def run(
|
|
74
76
|
agent: str,
|
|
75
77
|
prompt: str,
|
|
@@ -78,41 +80,37 @@ def run(
|
|
|
78
80
|
score: int | None,
|
|
79
81
|
config_file: str,
|
|
80
82
|
config: str,
|
|
81
|
-
env_file: str,
|
|
83
|
+
env_file: str | None,
|
|
82
84
|
dry_run: bool,
|
|
83
85
|
start_date: date | None,
|
|
84
86
|
today: bool,
|
|
87
|
+
yesterday: bool,
|
|
85
88
|
) -> int:
|
|
86
89
|
"""
|
|
87
90
|
Entrypoint of the application.
|
|
88
91
|
|
|
89
|
-
Holds no logic. It calls the main method and returns 0 when succesful .
|
|
92
|
+
Holds no logic. It loads the configuration, calls the main method and returns 0 when succesful .
|
|
90
93
|
"""
|
|
91
|
-
load_dotenv(env_file)
|
|
94
|
+
load_dotenv(env_file or DEFAULT_ENV_FILE) # TODO refs: #112
|
|
92
95
|
configure_sentry()
|
|
93
96
|
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
elif isinstance(start_date, date):
|
|
100
|
-
start_datetime = datetime.combine(start_date, time(0, 0), tzinfo=ZoneInfo(settings.TIME_ZONE))
|
|
101
|
-
else:
|
|
102
|
-
start_datetime = None
|
|
97
|
+
settings = Settings.try_load_from_file(env_file)
|
|
98
|
+
try:
|
|
99
|
+
start_datetime = get_start_date(settings, yesterday, today, start_date)
|
|
100
|
+
except MultipleDateSourcesError as e:
|
|
101
|
+
raise click.UsageError(e.args[0]) from e
|
|
103
102
|
|
|
104
103
|
try:
|
|
105
104
|
prompt_config = PromptConfig.get_config_from_file(path=prompt_file)
|
|
106
105
|
config_from_file = FileConfig.get_config_from_file(config_section=config, path=config_file)
|
|
107
106
|
final_config = FinalConfig.init_from_dict(
|
|
108
107
|
data={
|
|
109
|
-
"agent": agent or config_from_file.agent,
|
|
108
|
+
"agent": agent or config_from_file.agent or settings.AGENT,
|
|
110
109
|
"prompt": prompt or config_from_file.prompt,
|
|
111
|
-
"score_threshold": score or config_from_file.score_threshold,
|
|
110
|
+
"score_threshold": score or config_from_file.score_threshold or settings.SCORE,
|
|
112
111
|
"model": model or config_from_file.model,
|
|
113
112
|
}
|
|
114
113
|
)
|
|
115
|
-
|
|
116
114
|
prompt_text = prompt_config.get_prompt(final_config.prompt)
|
|
117
115
|
except (InvalidConfigError, PromptNotFoundError, ConfigNotFoundError) as err:
|
|
118
116
|
raise click.BadParameter(err.args[0]) from None
|
|
@@ -122,8 +120,8 @@ def run(
|
|
|
122
120
|
prompt=prompt_text,
|
|
123
121
|
score_threshold=final_config.score_threshold,
|
|
124
122
|
dry_run=dry_run,
|
|
125
|
-
|
|
126
|
-
|
|
123
|
+
service_desk_project_key=config_from_file.service_desk_project_key,
|
|
124
|
+
service_desk_request_id_type=config_from_file.service_desk_request_id_type,
|
|
127
125
|
model=final_config.model,
|
|
128
126
|
start_date=start_datetime,
|
|
129
127
|
)
|
lightman_ai/core/exceptions.py
CHANGED
lightman_ai/core/settings.py
CHANGED
|
@@ -1,14 +1,30 @@
|
|
|
1
|
-
|
|
1
|
+
import logging
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from typing import Any, Self
|
|
2
4
|
|
|
3
|
-
from pydantic_settings import BaseSettings
|
|
5
|
+
from pydantic_settings import BaseSettings, SettingsConfigDict
|
|
6
|
+
|
|
7
|
+
logger = logging.getLogger("lightman")
|
|
4
8
|
|
|
5
9
|
|
|
6
10
|
class Settings(BaseSettings):
|
|
7
11
|
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
|
8
12
|
super().__init__(*args, **kwargs)
|
|
9
13
|
|
|
10
|
-
|
|
14
|
+
AGENT: str = "openai"
|
|
15
|
+
SCORE: int = 8
|
|
11
16
|
TIME_ZONE: str = "UTC"
|
|
17
|
+
model_config = SettingsConfigDict(extra="ignore")
|
|
18
|
+
|
|
19
|
+
@classmethod
|
|
20
|
+
def try_load_from_file(cls, env_file: str | None = None) -> Self:
|
|
21
|
+
"""
|
|
22
|
+
Initialize Settings class and returns an instance.
|
|
12
23
|
|
|
24
|
+
It tries to load env variables from the env file. Variables set in the environment take precendence.
|
|
13
25
|
|
|
14
|
-
settings
|
|
26
|
+
If the env file is not present, it continues execution, following pydantic-settings' behaviour.
|
|
27
|
+
"""
|
|
28
|
+
if env_file and not Path(env_file).exists():
|
|
29
|
+
logger.warning("env file `%s` not found.", env_file)
|
|
30
|
+
return cls(_env_file=env_file)
|
|
@@ -4,12 +4,12 @@ from contextlib import asynccontextmanager
|
|
|
4
4
|
from typing import Any
|
|
5
5
|
|
|
6
6
|
import httpx
|
|
7
|
-
from lightman_ai.core.exceptions import
|
|
7
|
+
from lightman_ai.core.exceptions import BaseLightmanError
|
|
8
8
|
|
|
9
9
|
logger = logging.getLogger("lightman")
|
|
10
10
|
|
|
11
11
|
|
|
12
|
-
class BaseServiceDeskError(
|
|
12
|
+
class BaseServiceDeskError(BaseLightmanError):
|
|
13
13
|
"""Base exception for all SERVICE_DESK integration errors."""
|
|
14
14
|
|
|
15
15
|
|
lightman_ai/main.py
CHANGED
|
@@ -24,17 +24,17 @@ def _classify_articles(articles: ArticlesList, agent: BaseAgent) -> SelectedArti
|
|
|
24
24
|
def _create_service_desk_issues(
|
|
25
25
|
selected_articles: list[SelectedArticle],
|
|
26
26
|
service_desk_client: ServiceDeskIntegration,
|
|
27
|
-
|
|
28
|
-
|
|
27
|
+
service_desk_project_key: str,
|
|
28
|
+
service_desk_request_id_type: str,
|
|
29
29
|
) -> None:
|
|
30
30
|
async def schedule_task(article: SelectedArticle) -> None:
|
|
31
31
|
try:
|
|
32
32
|
description = f"*Why is relevant:*\n{article.why_is_relevant}\n\n*Source:* {article.link}\n\n*Score:* {article.relevance_score}/10"
|
|
33
33
|
await service_desk_client.create_request_of_type(
|
|
34
|
-
project_key=
|
|
34
|
+
project_key=service_desk_project_key,
|
|
35
35
|
summary=article.title,
|
|
36
36
|
description=description,
|
|
37
|
-
request_id_type=
|
|
37
|
+
request_id_type=service_desk_request_id_type,
|
|
38
38
|
)
|
|
39
39
|
logger.info("Created issue for article %s", article.link)
|
|
40
40
|
except Exception:
|
|
@@ -57,8 +57,8 @@ def lightman(
|
|
|
57
57
|
agent: str,
|
|
58
58
|
prompt: str,
|
|
59
59
|
score_threshold: int,
|
|
60
|
-
|
|
61
|
-
|
|
60
|
+
service_desk_project_key: str | None = None,
|
|
61
|
+
service_desk_request_id_type: str | None = None,
|
|
62
62
|
dry_run: bool = False,
|
|
63
63
|
model: str | None = None,
|
|
64
64
|
start_date: datetime | None = None,
|
|
@@ -83,15 +83,15 @@ def lightman(
|
|
|
83
83
|
logger.info("No articles found to be relevant. Total returned articles by AI %s", len(classified_articles))
|
|
84
84
|
|
|
85
85
|
if not dry_run:
|
|
86
|
-
if not
|
|
86
|
+
if not service_desk_project_key or not service_desk_request_id_type:
|
|
87
87
|
raise ValueError("Missing Service Desk's project key or request id type")
|
|
88
88
|
|
|
89
89
|
service_desk_client = ServiceDeskIntegration.from_env()
|
|
90
90
|
_create_service_desk_issues(
|
|
91
91
|
selected_articles=selected_articles,
|
|
92
92
|
service_desk_client=service_desk_client,
|
|
93
|
-
|
|
94
|
-
|
|
93
|
+
service_desk_project_key=service_desk_project_key,
|
|
94
|
+
service_desk_request_id_type=service_desk_request_id_type,
|
|
95
95
|
)
|
|
96
96
|
|
|
97
97
|
return selected_articles
|
lightman_ai/utils.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
from datetime import date, datetime, time, timedelta
|
|
2
|
+
from zoneinfo import ZoneInfo
|
|
3
|
+
|
|
4
|
+
from lightman_ai.core.settings import Settings
|
|
5
|
+
from lightman_ai.exceptions import MultipleDateSourcesError
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def get_start_date(settings: Settings, yesterday: bool, today: bool, start_date: date | None) -> datetime | None:
|
|
9
|
+
mutually_exclusive_date_fields = [x for x in [start_date, today, yesterday] if x]
|
|
10
|
+
|
|
11
|
+
if len(mutually_exclusive_date_fields) > 1:
|
|
12
|
+
raise MultipleDateSourcesError(
|
|
13
|
+
"--today, --yesterday and --start-date are mutually exclusive. Set one at a time."
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
if today:
|
|
17
|
+
now = datetime.now(ZoneInfo(settings.TIME_ZONE))
|
|
18
|
+
return datetime.combine(now, time(0, 0), tzinfo=ZoneInfo(settings.TIME_ZONE))
|
|
19
|
+
elif yesterday:
|
|
20
|
+
yesterday_date = datetime.now(ZoneInfo(settings.TIME_ZONE)) - timedelta(days=1)
|
|
21
|
+
return datetime.combine(yesterday_date, time(0, 0), tzinfo=ZoneInfo(settings.TIME_ZONE))
|
|
22
|
+
elif isinstance(start_date, date):
|
|
23
|
+
return datetime.combine(start_date, time(0, 0), tzinfo=ZoneInfo(settings.TIME_ZONE))
|
|
24
|
+
else:
|
|
25
|
+
return None
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: lightman_ai
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 1.0.0
|
|
4
4
|
Summary: LLM-powered cybersecurity news aggregator.
|
|
5
5
|
Author-email: sdn4z <imsdn4z@gmail.com>
|
|
6
6
|
License-File: LICENSE
|
|
@@ -18,7 +18,7 @@ Description-Content-Type: text/markdown
|
|
|
18
18
|
|
|
19
19
|
# 🔍 Lightman AI
|
|
20
20
|
|
|
21
|
-
>
|
|
21
|
+
> LLM-Powered Cybersecurity News Intelligence Platform
|
|
22
22
|
|
|
23
23
|
---
|
|
24
24
|
|
|
@@ -49,7 +49,9 @@ Lightman AI is an intelligent cybersecurity news aggregation and risk assessment
|
|
|
49
49
|
|
|
50
50
|
## 🚀 Quick Start
|
|
51
51
|
|
|
52
|
-
###
|
|
52
|
+
### Installation
|
|
53
|
+
|
|
54
|
+
#### pip
|
|
53
55
|
|
|
54
56
|
1. **Install Lightman AI**:
|
|
55
57
|
```bash
|
|
@@ -78,10 +80,13 @@ Lightman AI is an intelligent cybersecurity news aggregation and risk assessment
|
|
|
78
80
|
```bash
|
|
79
81
|
lightman run
|
|
80
82
|
```
|
|
83
|
+
#### Docker
|
|
84
|
+
1. **Pull the image**
|
|
85
|
+
```bash
|
|
86
|
+
docker pull elementsinteractive/lightman-ai:latest
|
|
87
|
+
```
|
|
81
88
|
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
1. **Create configuration file**:
|
|
89
|
+
2. **Create configuration file**:
|
|
85
90
|
```bash
|
|
86
91
|
echo '[default]
|
|
87
92
|
agent = "openai"
|
|
@@ -92,7 +97,7 @@ Lightman AI is an intelligent cybersecurity news aggregation and risk assessment
|
|
|
92
97
|
development = "Analyze cybersecurity news for relevance to our organization."' > lightman.toml
|
|
93
98
|
```
|
|
94
99
|
|
|
95
|
-
|
|
100
|
+
3. **Run with Docker**:
|
|
96
101
|
```bash
|
|
97
102
|
docker run --rm \
|
|
98
103
|
-v $(pwd)/lightman.toml:/app/lightman.toml \
|
|
@@ -101,55 +106,37 @@ Lightman AI is an intelligent cybersecurity news aggregation and risk assessment
|
|
|
101
106
|
lightman run --config-file /app/lightman.toml --score 7
|
|
102
107
|
```
|
|
103
108
|
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
## 📥 Installation
|
|
107
|
-
|
|
108
|
-
### Docker
|
|
109
|
-
Lightman AI has an available Docker image on Docker Hub:
|
|
110
|
-
|
|
111
|
-
```bash
|
|
112
|
-
# Pull the latest image
|
|
113
|
-
docker pull elementsinteractive/lightman-ai:latest
|
|
109
|
+
## 🔧 Usage
|
|
114
110
|
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
echo '[default]
|
|
118
|
-
agent = "openai"
|
|
119
|
-
score_threshold = 8
|
|
120
|
-
prompt = "development"
|
|
121
|
-
|
|
122
|
-
[prompts]
|
|
123
|
-
development = "Analyze cybersecurity news for relevance to our organization."' > lightman.toml
|
|
124
|
-
```
|
|
111
|
+
### CLI Options
|
|
125
112
|
|
|
113
|
+
| Option | Description | Default |
|
|
114
|
+
|--------|-------------|---------|
|
|
115
|
+
| `--agent` | AI agent to use (`openai`, `gemini`) | From config file |
|
|
116
|
+
| `--score` | Minimum relevance score (1-10) | From config file |
|
|
117
|
+
| `--prompt` | Prompt template name | From config file |
|
|
118
|
+
| `--config-file` | Path to configuration file | `lightman.toml` |
|
|
119
|
+
| `--config` | Configuration section to use | `default` |
|
|
120
|
+
| `--env-file` | Path to environment variables file | `.env` |
|
|
121
|
+
| `--dry-run` | Preview results without taking action | `false` |
|
|
122
|
+
| `--prompt-file` | File containing prompt templates | `lightman.toml` |
|
|
123
|
+
| `--start-date` | Start date to retrieve articles | False |
|
|
124
|
+
| `--today` | Retrieve articles from today | False |
|
|
125
|
+
| `--yesterday` | Retrieve articles from yesterday | False |
|
|
126
126
|
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
docker run -d \
|
|
130
|
-
--name lightman-ai \
|
|
131
|
-
-v $(pwd)/lightman.toml:/app/lightman.toml \
|
|
132
|
-
-e OPENAI_API_KEY="your-api-key" \
|
|
133
|
-
elementsinteractive/lightman-ai:latest \
|
|
134
|
-
lightman run --config-file /app/lightman.toml
|
|
135
|
-
```
|
|
127
|
+
### Environment Variables:
|
|
128
|
+
lightman-ai uses the following environment variables:
|
|
136
129
|
|
|
137
|
-
**Docker Environment Variables:**
|
|
138
130
|
- `OPENAI_API_KEY` - Your OpenAI API key
|
|
139
131
|
- `GOOGLE_API_KEY` - Your Google Gemini API key
|
|
140
132
|
- `SERVICE_DESK_URL` - Service desk instance URL (optional)
|
|
141
133
|
- `SERVICE_DESK_USER` - Service desk username (optional)
|
|
142
134
|
- `SERVICE_DESK_TOKEN` - Service desk API token (optional)
|
|
135
|
+
- `TIME_ZONE` - Your time zone (optional, defaults to UTC. i.e. "Europe/Amsterdam".)
|
|
143
136
|
|
|
137
|
+
By default, it will try to load a `.env` file. You can also specify a different path with the `--env-file` option.
|
|
144
138
|
|
|
145
139
|
|
|
146
|
-
### Development Installation
|
|
147
|
-
```bash
|
|
148
|
-
git clone git@github.com:elementsinteractive/lightman-ai.git
|
|
149
|
-
cd lightman_ai
|
|
150
|
-
just venv # Creates virtual environment and installs dependencies
|
|
151
|
-
```
|
|
152
|
-
|
|
153
140
|
## ⚙️ Configuration
|
|
154
141
|
|
|
155
142
|
Lightman AI uses TOML configuration files for flexible setup. Create a `lightman.toml` file:
|
|
@@ -203,30 +190,8 @@ custom_prompt = """
|
|
|
203
190
|
Your custom analysis prompt here...
|
|
204
191
|
"""
|
|
205
192
|
```
|
|
206
|
-
### Environment Variables
|
|
207
|
-
|
|
208
|
-
Set up your AI provider credentials:
|
|
209
|
-
|
|
210
|
-
```bash
|
|
211
|
-
# For OpenAI
|
|
212
|
-
export OPENAI_API_KEY="your-openai-api-key"
|
|
213
|
-
|
|
214
|
-
# For Google Gemini
|
|
215
|
-
export GOOGLE_API_KEY="your-google-api-key"
|
|
216
|
-
|
|
217
|
-
# Optional: Service desk integration
|
|
218
|
-
export SERVICE_DESK_URL="https://your-company.atlassian.net"
|
|
219
|
-
export SERVICE_DESK_USER="your-username"
|
|
220
|
-
export SERVICE_DESK_TOKEN="your-api-token"
|
|
221
|
-
|
|
222
|
-
```
|
|
223
|
-
You can also specify a different path for your .env file with the `--env-file` option
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
## 🔧 Usage
|
|
227
|
-
|
|
228
|
-
### Basic Usage
|
|
229
193
|
|
|
194
|
+
### Examples
|
|
230
195
|
```bash
|
|
231
196
|
# Run with default settings
|
|
232
197
|
lightman run
|
|
@@ -242,69 +207,30 @@ lightman run --env-file production.env --agent openai --score 8
|
|
|
242
207
|
|
|
243
208
|
# Dry run (preview results without creating service desk tickets)
|
|
244
209
|
lightman run --dry-run --agent openai --score 9
|
|
245
|
-
```
|
|
246
210
|
|
|
247
|
-
|
|
211
|
+
# Retrieve all the news from today
|
|
212
|
+
lightman run --agent openai --score 8 --prompt security_critical --today
|
|
248
213
|
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
| `--agent` | AI agent to use (`openai`, `gemini`) | From config file |
|
|
252
|
-
| `--score` | Minimum relevance score (1-10) | From config file |
|
|
253
|
-
| `--prompt` | Prompt template name | From config file |
|
|
254
|
-
| `--config-file` | Path to configuration file | `lightman.toml` |
|
|
255
|
-
| `--config` | Configuration section to use | `default` |
|
|
256
|
-
| `--env-file` | Path to environment variables file | `.env` |
|
|
257
|
-
| `--dry-run` | Preview results without taking action | `false` |
|
|
258
|
-
| `--prompt-file` | File containing prompt templates | `lightman.toml` |
|
|
259
|
-
| `--start-date` | Start date to retrieve articles | None |
|
|
260
|
-
| `--today` | Retrieve articles from today | None |
|
|
261
|
-
|
|
262
|
-
### Example Workflows
|
|
263
|
-
|
|
264
|
-
**Daily Security Monitoring**:
|
|
265
|
-
```bash
|
|
266
|
-
# Local installation
|
|
267
|
-
lightman run --agent openai --score 8 --prompt security_critical
|
|
268
|
-
|
|
269
|
-
# With custom environment file
|
|
270
|
-
lightman run --env-file production.env --agent openai --score 8
|
|
271
|
-
|
|
272
|
-
# Docker
|
|
273
|
-
docker run --rm \
|
|
274
|
-
-v $(pwd)/lightman.toml:/app/lightman.toml \
|
|
275
|
-
-e OPENAI_API_KEY="$OPENAI_API_KEY" \
|
|
276
|
-
elementsinteractive/lightman-ai:latest \
|
|
277
|
-
lightman run --config-file /app/lightman.toml --score 8
|
|
214
|
+
# Retrieve all the news from yesterday
|
|
215
|
+
lightman run --agent openai --score 8 --prompt security_critical --yesterday
|
|
278
216
|
```
|
|
279
217
|
|
|
280
218
|
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
# With environment-specific settings
|
|
287
|
-
lightman run --env-file weekly.env --agent gemini --score 6
|
|
288
|
-
|
|
289
|
-
# Docker
|
|
290
|
-
docker run --rm \
|
|
291
|
-
-v $(pwd)/lightman.toml:/app/lightman.toml \
|
|
292
|
-
-e GOOGLE_API_KEY="$GOOGLE_API_KEY" \
|
|
293
|
-
elementsinteractive/lightman-ai:latest \
|
|
294
|
-
lightman run --config-file /app/lightman.toml --agent gemini --score 6
|
|
295
|
-
```
|
|
219
|
+
### Development Installation
|
|
220
|
+
In order to fully use the provided setup for local development and testing, this project requires the following dependencies:
|
|
221
|
+
- Python 3.13
|
|
222
|
+
- [just](https://github.com/casey/just)
|
|
223
|
+
- [uv](https://docs.astral.sh/uv/getting-started/installation/)
|
|
296
224
|
|
|
297
|
-
|
|
225
|
+
Then simply:
|
|
298
226
|
```bash
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
#
|
|
303
|
-
|
|
227
|
+
git clone git@github.com:elementsinteractive/lightman-ai.git
|
|
228
|
+
cd lightman_ai
|
|
229
|
+
just venv # Creates virtual environment and installs dependencies
|
|
230
|
+
just test # Runs the tests
|
|
231
|
+
just eval # Runs the evaluation framework
|
|
304
232
|
```
|
|
305
233
|
|
|
306
|
-
|
|
307
|
-
|
|
308
234
|
## 📊 Evaluation & Testing
|
|
309
235
|
|
|
310
236
|
Lightman AI includes a comprehensive evaluation framework to test and optimize AI agent performance:
|
|
@@ -1,36 +1,38 @@
|
|
|
1
1
|
lightman_ai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
-
lightman_ai/cli.py,sha256=
|
|
2
|
+
lightman_ai/cli.py,sha256=u9zuj3ksDu4bPKd_Cb409m-la12ctZjFEg8GSER8ykc,4685
|
|
3
3
|
lightman_ai/constants.py,sha256=qfZgcTLK51l--JDhns-uRANjccFEPN6iTFsJKn8T4vs,101
|
|
4
|
-
lightman_ai/
|
|
4
|
+
lightman_ai/exceptions.py,sha256=bEnVe7kW-x3FR70wrx7H4RCoZBGe9o5o6gDwuYv6oio,179
|
|
5
|
+
lightman_ai/main.py,sha256=ZA8XJfbmBaEu8-7sz02_xzb3Do_J_KaP0Tr-1Ve8SWU,3683
|
|
5
6
|
lightman_ai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
7
|
+
lightman_ai/utils.py,sha256=esapbm_vZwAEmczskMbQ4xZQhO4RXSVR0F910dl-2D4,1112
|
|
6
8
|
lightman_ai/ai/utils.py,sha256=vFTN8Tto7QHMV4DpTzn8Dz06niHm_bbgPivyc-rD1aE,509
|
|
7
9
|
lightman_ai/ai/base/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
8
10
|
lightman_ai/ai/base/agent.py,sha256=fFjF_nCzoba4hbSfRPmIm9dxW6A45qJWTrSbrq9NFto,985
|
|
9
|
-
lightman_ai/ai/base/exceptions.py,sha256=
|
|
11
|
+
lightman_ai/ai/base/exceptions.py,sha256=7JcU6Q9gTsTE_hOPEyGJrQ8SUhQ5FnDpgY4iU_3aYIY,105
|
|
10
12
|
lightman_ai/ai/gemini/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
11
13
|
lightman_ai/ai/gemini/agent.py,sha256=1pDdKrm2wCxTFxlHQHXWENiys1FWar6kxppGYFC3YgI,641
|
|
12
|
-
lightman_ai/ai/gemini/exceptions.py,sha256=
|
|
14
|
+
lightman_ai/ai/gemini/exceptions.py,sha256=8BwiMf3v_LMk-VxHzMgwqornZsk-Q6fcg7dNMUq-lhc,405
|
|
13
15
|
lightman_ai/ai/openai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
14
16
|
lightman_ai/ai/openai/agent.py,sha256=bMZS-F58WaXDR6f3j5DhtZ4077Gy80_oGYJczB7nV74,1043
|
|
15
|
-
lightman_ai/ai/openai/exceptions.py,sha256=
|
|
17
|
+
lightman_ai/ai/openai/exceptions.py,sha256=ZGmfc3-msupqABtjTgT_1aymmosx8bHOfT-3iBtXS_0,2412
|
|
16
18
|
lightman_ai/article/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
17
19
|
lightman_ai/article/exceptions.py,sha256=dvAN2bhFKPytgbaAOM-2ucNjH3lLhlbi5MMlLtNuO1E,212
|
|
18
20
|
lightman_ai/article/models.py,sha256=JZaBsqzw7MxC3qORUSJqxru9iSHsbLrgNW5nFc2N6Ks,2491
|
|
19
21
|
lightman_ai/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
20
22
|
lightman_ai/core/config.py,sha256=ppWNoNivNqxGi4TOZIMaTGo46W7Ic1a8cO8hWVAkkps,3031
|
|
21
|
-
lightman_ai/core/exceptions.py,sha256=
|
|
23
|
+
lightman_ai/core/exceptions.py,sha256=c13ilQ80fGmiZZpVyZN5Kpuz5mKNqtzfRXVczHBS0SM,237
|
|
22
24
|
lightman_ai/core/sentry.py,sha256=kqO2sBX_29v4qpmcYyhOBuG0hv82OkDBWh1rcyiKVWk,1375
|
|
23
|
-
lightman_ai/core/settings.py,sha256=
|
|
25
|
+
lightman_ai/core/settings.py,sha256=DSApQZS8xypWxrK2RMILZFY78GI0Ze2lcfUH2HluM2Q,976
|
|
24
26
|
lightman_ai/integrations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
25
27
|
lightman_ai/integrations/service_desk/__init__.py,sha256=Ijs7_ysJVwiT_Y1U8iPOV17E7dtkVzNXL00d8cgyaCc,66
|
|
26
28
|
lightman_ai/integrations/service_desk/constants.py,sha256=ropNDMengLp96pXD6wq2voWPeBESt6f2Wnfk7845mck,306
|
|
27
|
-
lightman_ai/integrations/service_desk/exceptions.py,sha256=
|
|
29
|
+
lightman_ai/integrations/service_desk/exceptions.py,sha256=FFKcp29FcFYgUzsq8H3knINAAiIbdeqMx3ggGjiq4bg,2515
|
|
28
30
|
lightman_ai/integrations/service_desk/integration.py,sha256=5r3bipao_apaDWLRiH4oiBJFb4c1mvo7Sj2AOQUWiFY,2895
|
|
29
31
|
lightman_ai/sources/base.py,sha256=M_n9H9GdcX2tbBcYkiakDxHCnuwP9fV4BQhNAadBubQ,242
|
|
30
32
|
lightman_ai/sources/exceptions.py,sha256=X43BZ6hx-lZAEyM2q5PQXBw3vKPDx5nmY_uRiuITK9s,379
|
|
31
33
|
lightman_ai/sources/the_hacker_news.py,sha256=JTqbAJ1Pf8QKxvwFg6vWmymgj8LtUpIV6hD0KMOwMG8,3034
|
|
32
|
-
lightman_ai-0.
|
|
33
|
-
lightman_ai-0.
|
|
34
|
-
lightman_ai-0.
|
|
35
|
-
lightman_ai-0.
|
|
36
|
-
lightman_ai-0.
|
|
34
|
+
lightman_ai-1.0.0.dist-info/METADATA,sha256=GY1j9yRPL6CweBq4dNArrxAhPdzoDtNH5_yWEE7LjAc,10804
|
|
35
|
+
lightman_ai-1.0.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
36
|
+
lightman_ai-1.0.0.dist-info/entry_points.txt,sha256=9H7Ji-zxbCWAaVL5Yg4fG5va0H_5Tr2kMGGMsghaAas,60
|
|
37
|
+
lightman_ai-1.0.0.dist-info/licenses/LICENSE,sha256=NhxDmY3AGgeEsYHIfDGLNkzBVX94pARRDS8H46JZ1zQ,1076
|
|
38
|
+
lightman_ai-1.0.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|