lightman_ai 0.21.1__tar.gz → 0.22.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/PKG-INFO +57 -123
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/README.md +56 -122
- lightman_ai-0.22.0/VERSION +1 -0
- lightman_ai-0.22.0/src/lightman_ai/ai/base/exceptions.py +4 -0
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/ai/gemini/exceptions.py +2 -2
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/ai/openai/exceptions.py +2 -2
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/cli.py +19 -21
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/core/exceptions.py +2 -2
- lightman_ai-0.22.0/src/lightman_ai/core/settings.py +30 -0
- lightman_ai-0.22.0/src/lightman_ai/exceptions.py +5 -0
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/integrations/service_desk/exceptions.py +2 -2
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/main.py +9 -9
- lightman_ai-0.22.0/src/lightman_ai/utils.py +25 -0
- lightman_ai-0.21.1/VERSION +0 -1
- lightman_ai-0.21.1/src/lightman_ai/ai/base/exceptions.py +0 -4
- lightman_ai-0.21.1/src/lightman_ai/core/settings.py +0 -14
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/.gitignore +0 -0
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/LICENSE +0 -0
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/pyproject.toml +0 -0
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/__init__.py +0 -0
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/ai/base/__init__.py +0 -0
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/ai/base/agent.py +0 -0
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/ai/gemini/__init__.py +0 -0
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/ai/gemini/agent.py +0 -0
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/ai/openai/__init__.py +0 -0
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/ai/openai/agent.py +0 -0
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/ai/utils.py +0 -0
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/article/__init__.py +0 -0
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/article/exceptions.py +0 -0
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/article/models.py +0 -0
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/constants.py +0 -0
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/core/__init__.py +0 -0
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/core/config.py +0 -0
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/core/sentry.py +0 -0
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/integrations/__init__.py +0 -0
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/integrations/service_desk/__init__.py +0 -0
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/integrations/service_desk/constants.py +0 -0
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/integrations/service_desk/integration.py +0 -0
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/py.typed +0 -0
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/sources/base.py +0 -0
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/sources/exceptions.py +0 -0
- {lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/sources/the_hacker_news.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: lightman_ai
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.22.0
|
|
4
4
|
Summary: LLM-powered cybersecurity news aggregator.
|
|
5
5
|
Author-email: sdn4z <imsdn4z@gmail.com>
|
|
6
6
|
License-File: LICENSE
|
|
@@ -18,7 +18,7 @@ Description-Content-Type: text/markdown
|
|
|
18
18
|
|
|
19
19
|
# 🔍 Lightman AI
|
|
20
20
|
|
|
21
|
-
>
|
|
21
|
+
> LLM-Powered Cybersecurity News Intelligence Platform
|
|
22
22
|
|
|
23
23
|
---
|
|
24
24
|
|
|
@@ -49,7 +49,9 @@ Lightman AI is an intelligent cybersecurity news aggregation and risk assessment
|
|
|
49
49
|
|
|
50
50
|
## 🚀 Quick Start
|
|
51
51
|
|
|
52
|
-
###
|
|
52
|
+
### Installation
|
|
53
|
+
|
|
54
|
+
#### pip
|
|
53
55
|
|
|
54
56
|
1. **Install Lightman AI**:
|
|
55
57
|
```bash
|
|
@@ -78,10 +80,13 @@ Lightman AI is an intelligent cybersecurity news aggregation and risk assessment
|
|
|
78
80
|
```bash
|
|
79
81
|
lightman run
|
|
80
82
|
```
|
|
83
|
+
#### Docker
|
|
84
|
+
1. **Pull the image**
|
|
85
|
+
```bash
|
|
86
|
+
docker pull elementsinteractive/lightman-ai:latest
|
|
87
|
+
```
|
|
81
88
|
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
1. **Create configuration file**:
|
|
89
|
+
2. **Create configuration file**:
|
|
85
90
|
```bash
|
|
86
91
|
echo '[default]
|
|
87
92
|
agent = "openai"
|
|
@@ -92,7 +97,7 @@ Lightman AI is an intelligent cybersecurity news aggregation and risk assessment
|
|
|
92
97
|
development = "Analyze cybersecurity news for relevance to our organization."' > lightman.toml
|
|
93
98
|
```
|
|
94
99
|
|
|
95
|
-
|
|
100
|
+
3. **Run with Docker**:
|
|
96
101
|
```bash
|
|
97
102
|
docker run --rm \
|
|
98
103
|
-v $(pwd)/lightman.toml:/app/lightman.toml \
|
|
@@ -101,55 +106,37 @@ Lightman AI is an intelligent cybersecurity news aggregation and risk assessment
|
|
|
101
106
|
lightman run --config-file /app/lightman.toml --score 7
|
|
102
107
|
```
|
|
103
108
|
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
## 📥 Installation
|
|
107
|
-
|
|
108
|
-
### Docker
|
|
109
|
-
Lightman AI has an available Docker image on Docker Hub:
|
|
110
|
-
|
|
111
|
-
```bash
|
|
112
|
-
# Pull the latest image
|
|
113
|
-
docker pull elementsinteractive/lightman-ai:latest
|
|
114
|
-
|
|
115
|
-
# Create your configuration file
|
|
109
|
+
## 🔧 Usage
|
|
116
110
|
|
|
117
|
-
|
|
118
|
-
agent = "openai"
|
|
119
|
-
score_threshold = 8
|
|
120
|
-
prompt = "development"
|
|
121
|
-
|
|
122
|
-
[prompts]
|
|
123
|
-
development = "Analyze cybersecurity news for relevance to our organization."' > lightman.toml
|
|
124
|
-
```
|
|
111
|
+
### CLI Options
|
|
125
112
|
|
|
113
|
+
| Option | Description | Default |
|
|
114
|
+
|--------|-------------|---------|
|
|
115
|
+
| `--agent` | AI agent to use (`openai`, `gemini`) | From config file |
|
|
116
|
+
| `--score` | Minimum relevance score (1-10) | From config file |
|
|
117
|
+
| `--prompt` | Prompt template name | From config file |
|
|
118
|
+
| `--config-file` | Path to configuration file | `lightman.toml` |
|
|
119
|
+
| `--config` | Configuration section to use | `default` |
|
|
120
|
+
| `--env-file` | Path to environment variables file | `.env` |
|
|
121
|
+
| `--dry-run` | Preview results without taking action | `false` |
|
|
122
|
+
| `--prompt-file` | File containing prompt templates | `lightman.toml` |
|
|
123
|
+
| `--start-date` | Start date to retrieve articles | False |
|
|
124
|
+
| `--today` | Retrieve articles from today | False |
|
|
125
|
+
| `--yesterday` | Retrieve articles from yesterday | False |
|
|
126
126
|
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
docker run -d \
|
|
130
|
-
--name lightman-ai \
|
|
131
|
-
-v $(pwd)/lightman.toml:/app/lightman.toml \
|
|
132
|
-
-e OPENAI_API_KEY="your-api-key" \
|
|
133
|
-
elementsinteractive/lightman-ai:latest \
|
|
134
|
-
lightman run --config-file /app/lightman.toml
|
|
135
|
-
```
|
|
127
|
+
### Environment Variables:
|
|
128
|
+
lightman-ai uses the following environment variables:
|
|
136
129
|
|
|
137
|
-
**Docker Environment Variables:**
|
|
138
130
|
- `OPENAI_API_KEY` - Your OpenAI API key
|
|
139
131
|
- `GOOGLE_API_KEY` - Your Google Gemini API key
|
|
140
132
|
- `SERVICE_DESK_URL` - Service desk instance URL (optional)
|
|
141
133
|
- `SERVICE_DESK_USER` - Service desk username (optional)
|
|
142
134
|
- `SERVICE_DESK_TOKEN` - Service desk API token (optional)
|
|
135
|
+
- `TIME_ZONE` - Your time zone (optional, defaults to UTC. i.e. "Europe/Amsterdam".)
|
|
143
136
|
|
|
137
|
+
By default, it will try to load a `.env` file. You can also specify a different path with the `--env-file` option.
|
|
144
138
|
|
|
145
139
|
|
|
146
|
-
### Development Installation
|
|
147
|
-
```bash
|
|
148
|
-
git clone git@github.com:elementsinteractive/lightman-ai.git
|
|
149
|
-
cd lightman_ai
|
|
150
|
-
just venv # Creates virtual environment and installs dependencies
|
|
151
|
-
```
|
|
152
|
-
|
|
153
140
|
## ⚙️ Configuration
|
|
154
141
|
|
|
155
142
|
Lightman AI uses TOML configuration files for flexible setup. Create a `lightman.toml` file:
|
|
@@ -203,30 +190,8 @@ custom_prompt = """
|
|
|
203
190
|
Your custom analysis prompt here...
|
|
204
191
|
"""
|
|
205
192
|
```
|
|
206
|
-
### Environment Variables
|
|
207
|
-
|
|
208
|
-
Set up your AI provider credentials:
|
|
209
|
-
|
|
210
|
-
```bash
|
|
211
|
-
# For OpenAI
|
|
212
|
-
export OPENAI_API_KEY="your-openai-api-key"
|
|
213
|
-
|
|
214
|
-
# For Google Gemini
|
|
215
|
-
export GOOGLE_API_KEY="your-google-api-key"
|
|
216
|
-
|
|
217
|
-
# Optional: Service desk integration
|
|
218
|
-
export SERVICE_DESK_URL="https://your-company.atlassian.net"
|
|
219
|
-
export SERVICE_DESK_USER="your-username"
|
|
220
|
-
export SERVICE_DESK_TOKEN="your-api-token"
|
|
221
|
-
|
|
222
|
-
```
|
|
223
|
-
You can also specify a different path for your .env file with the `--env-file` option
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
## 🔧 Usage
|
|
227
|
-
|
|
228
|
-
### Basic Usage
|
|
229
193
|
|
|
194
|
+
### Examples
|
|
230
195
|
```bash
|
|
231
196
|
# Run with default settings
|
|
232
197
|
lightman run
|
|
@@ -242,67 +207,30 @@ lightman run --env-file production.env --agent openai --score 8
|
|
|
242
207
|
|
|
243
208
|
# Dry run (preview results without creating service desk tickets)
|
|
244
209
|
lightman run --dry-run --agent openai --score 9
|
|
245
|
-
```
|
|
246
210
|
|
|
247
|
-
|
|
211
|
+
# Retrieve all the news from today
|
|
212
|
+
lightman run --agent openai --score 8 --prompt security_critical --today
|
|
248
213
|
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
| `--agent` | AI agent to use (`openai`, `gemini`) | From config file |
|
|
252
|
-
| `--score` | Minimum relevance score (1-10) | From config file |
|
|
253
|
-
| `--prompt` | Prompt template name | From config file |
|
|
254
|
-
| `--config-file` | Path to configuration file | `lightman.toml` |
|
|
255
|
-
| `--config` | Configuration section to use | `default` |
|
|
256
|
-
| `--env-file` | Path to environment variables file | `.env` |
|
|
257
|
-
| `--dry-run` | Preview results without taking action | `false` |
|
|
258
|
-
| `--prompt-file` | File containing prompt templates | `lightman.toml` |
|
|
259
|
-
|
|
260
|
-
### Example Workflows
|
|
261
|
-
|
|
262
|
-
**Daily Security Monitoring**:
|
|
263
|
-
```bash
|
|
264
|
-
# Local installation
|
|
265
|
-
lightman run --agent openai --score 8 --prompt security_critical
|
|
266
|
-
|
|
267
|
-
# With custom environment file
|
|
268
|
-
lightman run --env-file production.env --agent openai --score 8
|
|
269
|
-
|
|
270
|
-
# Docker
|
|
271
|
-
docker run --rm \
|
|
272
|
-
-v $(pwd)/lightman.toml:/app/lightman.toml \
|
|
273
|
-
-e OPENAI_API_KEY="$OPENAI_API_KEY" \
|
|
274
|
-
elementsinteractive/lightman-ai:latest \
|
|
275
|
-
lightman run --config-file /app/lightman.toml --score 8
|
|
214
|
+
# Retrieve all the news from yesterday
|
|
215
|
+
lightman run --agent openai --score 8 --prompt security_critical --yesterday
|
|
276
216
|
```
|
|
277
217
|
|
|
278
218
|
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
# With environment-specific settings
|
|
285
|
-
lightman run --env-file weekly.env --agent gemini --score 6
|
|
286
|
-
|
|
287
|
-
# Docker
|
|
288
|
-
docker run --rm \
|
|
289
|
-
-v $(pwd)/lightman.toml:/app/lightman.toml \
|
|
290
|
-
-e GOOGLE_API_KEY="$GOOGLE_API_KEY" \
|
|
291
|
-
elementsinteractive/lightman-ai:latest \
|
|
292
|
-
lightman run --config-file /app/lightman.toml --agent gemini --score 6
|
|
293
|
-
```
|
|
219
|
+
### Development Installation
|
|
220
|
+
In order to fully use the provided setup for local development and testing, this project requires the following dependencies:
|
|
221
|
+
- Python 3.13
|
|
222
|
+
- [just](https://github.com/casey/just)
|
|
223
|
+
- [uv](https://docs.astral.sh/uv/getting-started/installation/)
|
|
294
224
|
|
|
295
|
-
|
|
225
|
+
Then simply:
|
|
296
226
|
```bash
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
#
|
|
301
|
-
|
|
227
|
+
git clone git@github.com:elementsinteractive/lightman-ai.git
|
|
228
|
+
cd lightman_ai
|
|
229
|
+
just venv # Creates virtual environment and installs dependencies
|
|
230
|
+
just test # Runs the tests
|
|
231
|
+
just eval # Runs the evaluation framework
|
|
302
232
|
```
|
|
303
233
|
|
|
304
|
-
|
|
305
|
-
|
|
306
234
|
## 📊 Evaluation & Testing
|
|
307
235
|
|
|
308
236
|
Lightman AI includes a comprehensive evaluation framework to test and optimize AI agent performance:
|
|
@@ -364,11 +292,17 @@ This approach ensures that performance metrics reflect real-world usage scenario
|
|
|
364
292
|
**Make sure to fill in the `RELEVANT_ARTICLES` with the ones you classify as relevant, so that you can compare the accuracy after running the `eval` script.***
|
|
365
293
|
|
|
366
294
|
## Sentry
|
|
295
|
+
Sentry is **optional**: the application does not require it to function, and all features will work even if Sentry is not configured or fails to start.
|
|
296
|
+
If you install the project via pip and want Sentry installed, run:
|
|
297
|
+
|
|
298
|
+
```bash
|
|
299
|
+
pip install lightman-ai[sentry]
|
|
300
|
+
```
|
|
301
|
+
Sentry comes by default with the Docker image. If you don't want to use it, simply do not set `SENTRY_DSN` env variable.
|
|
367
302
|
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
- Sentry is **optional**: the application does not require it to function, and all features will work even if Sentry is not configured or fails to start.
|
|
303
|
+
The application will automatically pick up and use environment variables if they are present in your environment or `.env` file.
|
|
304
|
+
To enable Sentry, set the `SENTRY_DSN` environment variable. This is **mandatory** for Sentry to be enabled. If `SENTRY_DSN` is not set, Sentry will be skipped and the application will run normally.
|
|
305
|
+
If Sentry fails to initialize for any reason (e.g., network issues, invalid DSN), the application will log a warning and continue execution without error monitoring, and logging to stdout.
|
|
372
306
|
|
|
373
307
|
## 📄 License
|
|
374
308
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# 🔍 Lightman AI
|
|
2
2
|
|
|
3
|
-
>
|
|
3
|
+
> LLM-Powered Cybersecurity News Intelligence Platform
|
|
4
4
|
|
|
5
5
|
---
|
|
6
6
|
|
|
@@ -31,7 +31,9 @@ Lightman AI is an intelligent cybersecurity news aggregation and risk assessment
|
|
|
31
31
|
|
|
32
32
|
## 🚀 Quick Start
|
|
33
33
|
|
|
34
|
-
###
|
|
34
|
+
### Installation
|
|
35
|
+
|
|
36
|
+
#### pip
|
|
35
37
|
|
|
36
38
|
1. **Install Lightman AI**:
|
|
37
39
|
```bash
|
|
@@ -60,10 +62,13 @@ Lightman AI is an intelligent cybersecurity news aggregation and risk assessment
|
|
|
60
62
|
```bash
|
|
61
63
|
lightman run
|
|
62
64
|
```
|
|
65
|
+
#### Docker
|
|
66
|
+
1. **Pull the image**
|
|
67
|
+
```bash
|
|
68
|
+
docker pull elementsinteractive/lightman-ai:latest
|
|
69
|
+
```
|
|
63
70
|
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
1. **Create configuration file**:
|
|
71
|
+
2. **Create configuration file**:
|
|
67
72
|
```bash
|
|
68
73
|
echo '[default]
|
|
69
74
|
agent = "openai"
|
|
@@ -74,7 +79,7 @@ Lightman AI is an intelligent cybersecurity news aggregation and risk assessment
|
|
|
74
79
|
development = "Analyze cybersecurity news for relevance to our organization."' > lightman.toml
|
|
75
80
|
```
|
|
76
81
|
|
|
77
|
-
|
|
82
|
+
3. **Run with Docker**:
|
|
78
83
|
```bash
|
|
79
84
|
docker run --rm \
|
|
80
85
|
-v $(pwd)/lightman.toml:/app/lightman.toml \
|
|
@@ -83,55 +88,37 @@ Lightman AI is an intelligent cybersecurity news aggregation and risk assessment
|
|
|
83
88
|
lightman run --config-file /app/lightman.toml --score 7
|
|
84
89
|
```
|
|
85
90
|
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
## 📥 Installation
|
|
89
|
-
|
|
90
|
-
### Docker
|
|
91
|
-
Lightman AI has an available Docker image on Docker Hub:
|
|
92
|
-
|
|
93
|
-
```bash
|
|
94
|
-
# Pull the latest image
|
|
95
|
-
docker pull elementsinteractive/lightman-ai:latest
|
|
96
|
-
|
|
97
|
-
# Create your configuration file
|
|
91
|
+
## 🔧 Usage
|
|
98
92
|
|
|
99
|
-
|
|
100
|
-
agent = "openai"
|
|
101
|
-
score_threshold = 8
|
|
102
|
-
prompt = "development"
|
|
103
|
-
|
|
104
|
-
[prompts]
|
|
105
|
-
development = "Analyze cybersecurity news for relevance to our organization."' > lightman.toml
|
|
106
|
-
```
|
|
93
|
+
### CLI Options
|
|
107
94
|
|
|
95
|
+
| Option | Description | Default |
|
|
96
|
+
|--------|-------------|---------|
|
|
97
|
+
| `--agent` | AI agent to use (`openai`, `gemini`) | From config file |
|
|
98
|
+
| `--score` | Minimum relevance score (1-10) | From config file |
|
|
99
|
+
| `--prompt` | Prompt template name | From config file |
|
|
100
|
+
| `--config-file` | Path to configuration file | `lightman.toml` |
|
|
101
|
+
| `--config` | Configuration section to use | `default` |
|
|
102
|
+
| `--env-file` | Path to environment variables file | `.env` |
|
|
103
|
+
| `--dry-run` | Preview results without taking action | `false` |
|
|
104
|
+
| `--prompt-file` | File containing prompt templates | `lightman.toml` |
|
|
105
|
+
| `--start-date` | Start date to retrieve articles | False |
|
|
106
|
+
| `--today` | Retrieve articles from today | False |
|
|
107
|
+
| `--yesterday` | Retrieve articles from yesterday | False |
|
|
108
108
|
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
docker run -d \
|
|
112
|
-
--name lightman-ai \
|
|
113
|
-
-v $(pwd)/lightman.toml:/app/lightman.toml \
|
|
114
|
-
-e OPENAI_API_KEY="your-api-key" \
|
|
115
|
-
elementsinteractive/lightman-ai:latest \
|
|
116
|
-
lightman run --config-file /app/lightman.toml
|
|
117
|
-
```
|
|
109
|
+
### Environment Variables:
|
|
110
|
+
lightman-ai uses the following environment variables:
|
|
118
111
|
|
|
119
|
-
**Docker Environment Variables:**
|
|
120
112
|
- `OPENAI_API_KEY` - Your OpenAI API key
|
|
121
113
|
- `GOOGLE_API_KEY` - Your Google Gemini API key
|
|
122
114
|
- `SERVICE_DESK_URL` - Service desk instance URL (optional)
|
|
123
115
|
- `SERVICE_DESK_USER` - Service desk username (optional)
|
|
124
116
|
- `SERVICE_DESK_TOKEN` - Service desk API token (optional)
|
|
117
|
+
- `TIME_ZONE` - Your time zone (optional, defaults to UTC. i.e. "Europe/Amsterdam".)
|
|
125
118
|
|
|
119
|
+
By default, it will try to load a `.env` file. You can also specify a different path with the `--env-file` option.
|
|
126
120
|
|
|
127
121
|
|
|
128
|
-
### Development Installation
|
|
129
|
-
```bash
|
|
130
|
-
git clone git@github.com:elementsinteractive/lightman-ai.git
|
|
131
|
-
cd lightman_ai
|
|
132
|
-
just venv # Creates virtual environment and installs dependencies
|
|
133
|
-
```
|
|
134
|
-
|
|
135
122
|
## ⚙️ Configuration
|
|
136
123
|
|
|
137
124
|
Lightman AI uses TOML configuration files for flexible setup. Create a `lightman.toml` file:
|
|
@@ -185,30 +172,8 @@ custom_prompt = """
|
|
|
185
172
|
Your custom analysis prompt here...
|
|
186
173
|
"""
|
|
187
174
|
```
|
|
188
|
-
### Environment Variables
|
|
189
|
-
|
|
190
|
-
Set up your AI provider credentials:
|
|
191
|
-
|
|
192
|
-
```bash
|
|
193
|
-
# For OpenAI
|
|
194
|
-
export OPENAI_API_KEY="your-openai-api-key"
|
|
195
|
-
|
|
196
|
-
# For Google Gemini
|
|
197
|
-
export GOOGLE_API_KEY="your-google-api-key"
|
|
198
|
-
|
|
199
|
-
# Optional: Service desk integration
|
|
200
|
-
export SERVICE_DESK_URL="https://your-company.atlassian.net"
|
|
201
|
-
export SERVICE_DESK_USER="your-username"
|
|
202
|
-
export SERVICE_DESK_TOKEN="your-api-token"
|
|
203
|
-
|
|
204
|
-
```
|
|
205
|
-
You can also specify a different path for your .env file with the `--env-file` option
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
## 🔧 Usage
|
|
209
|
-
|
|
210
|
-
### Basic Usage
|
|
211
175
|
|
|
176
|
+
### Examples
|
|
212
177
|
```bash
|
|
213
178
|
# Run with default settings
|
|
214
179
|
lightman run
|
|
@@ -224,67 +189,30 @@ lightman run --env-file production.env --agent openai --score 8
|
|
|
224
189
|
|
|
225
190
|
# Dry run (preview results without creating service desk tickets)
|
|
226
191
|
lightman run --dry-run --agent openai --score 9
|
|
227
|
-
```
|
|
228
192
|
|
|
229
|
-
|
|
193
|
+
# Retrieve all the news from today
|
|
194
|
+
lightman run --agent openai --score 8 --prompt security_critical --today
|
|
230
195
|
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
| `--agent` | AI agent to use (`openai`, `gemini`) | From config file |
|
|
234
|
-
| `--score` | Minimum relevance score (1-10) | From config file |
|
|
235
|
-
| `--prompt` | Prompt template name | From config file |
|
|
236
|
-
| `--config-file` | Path to configuration file | `lightman.toml` |
|
|
237
|
-
| `--config` | Configuration section to use | `default` |
|
|
238
|
-
| `--env-file` | Path to environment variables file | `.env` |
|
|
239
|
-
| `--dry-run` | Preview results without taking action | `false` |
|
|
240
|
-
| `--prompt-file` | File containing prompt templates | `lightman.toml` |
|
|
241
|
-
|
|
242
|
-
### Example Workflows
|
|
243
|
-
|
|
244
|
-
**Daily Security Monitoring**:
|
|
245
|
-
```bash
|
|
246
|
-
# Local installation
|
|
247
|
-
lightman run --agent openai --score 8 --prompt security_critical
|
|
248
|
-
|
|
249
|
-
# With custom environment file
|
|
250
|
-
lightman run --env-file production.env --agent openai --score 8
|
|
251
|
-
|
|
252
|
-
# Docker
|
|
253
|
-
docker run --rm \
|
|
254
|
-
-v $(pwd)/lightman.toml:/app/lightman.toml \
|
|
255
|
-
-e OPENAI_API_KEY="$OPENAI_API_KEY" \
|
|
256
|
-
elementsinteractive/lightman-ai:latest \
|
|
257
|
-
lightman run --config-file /app/lightman.toml --score 8
|
|
196
|
+
# Retrieve all the news from yesterday
|
|
197
|
+
lightman run --agent openai --score 8 --prompt security_critical --yesterday
|
|
258
198
|
```
|
|
259
199
|
|
|
260
200
|
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
# With environment-specific settings
|
|
267
|
-
lightman run --env-file weekly.env --agent gemini --score 6
|
|
268
|
-
|
|
269
|
-
# Docker
|
|
270
|
-
docker run --rm \
|
|
271
|
-
-v $(pwd)/lightman.toml:/app/lightman.toml \
|
|
272
|
-
-e GOOGLE_API_KEY="$GOOGLE_API_KEY" \
|
|
273
|
-
elementsinteractive/lightman-ai:latest \
|
|
274
|
-
lightman run --config-file /app/lightman.toml --agent gemini --score 6
|
|
275
|
-
```
|
|
201
|
+
### Development Installation
|
|
202
|
+
In order to fully use the provided setup for local development and testing, this project requires the following dependencies:
|
|
203
|
+
- Python 3.13
|
|
204
|
+
- [just](https://github.com/casey/just)
|
|
205
|
+
- [uv](https://docs.astral.sh/uv/getting-started/installation/)
|
|
276
206
|
|
|
277
|
-
|
|
207
|
+
Then simply:
|
|
278
208
|
```bash
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
#
|
|
283
|
-
|
|
209
|
+
git clone git@github.com:elementsinteractive/lightman-ai.git
|
|
210
|
+
cd lightman_ai
|
|
211
|
+
just venv # Creates virtual environment and installs dependencies
|
|
212
|
+
just test # Runs the tests
|
|
213
|
+
just eval # Runs the evaluation framework
|
|
284
214
|
```
|
|
285
215
|
|
|
286
|
-
|
|
287
|
-
|
|
288
216
|
## 📊 Evaluation & Testing
|
|
289
217
|
|
|
290
218
|
Lightman AI includes a comprehensive evaluation framework to test and optimize AI agent performance:
|
|
@@ -346,11 +274,17 @@ This approach ensures that performance metrics reflect real-world usage scenario
|
|
|
346
274
|
**Make sure to fill in the `RELEVANT_ARTICLES` with the ones you classify as relevant, so that you can compare the accuracy after running the `eval` script.***
|
|
347
275
|
|
|
348
276
|
## Sentry
|
|
277
|
+
Sentry is **optional**: the application does not require it to function, and all features will work even if Sentry is not configured or fails to start.
|
|
278
|
+
If you install the project via pip and want Sentry installed, run:
|
|
279
|
+
|
|
280
|
+
```bash
|
|
281
|
+
pip install lightman-ai[sentry]
|
|
282
|
+
```
|
|
283
|
+
Sentry comes by default with the Docker image. If you don't want to use it, simply do not set `SENTRY_DSN` env variable.
|
|
349
284
|
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
- Sentry is **optional**: the application does not require it to function, and all features will work even if Sentry is not configured or fails to start.
|
|
285
|
+
The application will automatically pick up and use environment variables if they are present in your environment or `.env` file.
|
|
286
|
+
To enable Sentry, set the `SENTRY_DSN` environment variable. This is **mandatory** for Sentry to be enabled. If `SENTRY_DSN` is not set, Sentry will be skipped and the application will run normally.
|
|
287
|
+
If Sentry fails to initialize for any reason (e.g., network issues, invalid DSN), the application will log a warning and continue execution without error monitoring, and logging to stdout.
|
|
354
288
|
|
|
355
289
|
## 📄 License
|
|
356
290
|
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
v0.22.0
|
|
@@ -2,10 +2,10 @@ from collections.abc import Generator
|
|
|
2
2
|
from contextlib import contextmanager
|
|
3
3
|
from typing import Any
|
|
4
4
|
|
|
5
|
-
from lightman_ai.core.exceptions import
|
|
5
|
+
from lightman_ai.core.exceptions import BaseLightmanError
|
|
6
6
|
|
|
7
7
|
|
|
8
|
-
class BaseGeminiError(
|
|
8
|
+
class BaseGeminiError(BaseLightmanError): ...
|
|
9
9
|
|
|
10
10
|
|
|
11
11
|
class GeminiError(BaseGeminiError): ...
|
|
@@ -4,13 +4,13 @@ from collections.abc import Generator
|
|
|
4
4
|
from contextlib import contextmanager
|
|
5
5
|
from typing import Any, override
|
|
6
6
|
|
|
7
|
-
from lightman_ai.core.exceptions import
|
|
7
|
+
from lightman_ai.core.exceptions import BaseLightmanError
|
|
8
8
|
from pydantic_ai.exceptions import ModelHTTPError
|
|
9
9
|
|
|
10
10
|
from openai import RateLimitError
|
|
11
11
|
|
|
12
12
|
|
|
13
|
-
class BaseOpenAIError(
|
|
13
|
+
class BaseOpenAIError(BaseLightmanError): ...
|
|
14
14
|
|
|
15
15
|
|
|
16
16
|
class UnknownOpenAIError(BaseOpenAIError): ...
|
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
import logging
|
|
2
|
-
from datetime import date
|
|
2
|
+
from datetime import date
|
|
3
3
|
from importlib import metadata
|
|
4
|
-
from zoneinfo import ZoneInfo
|
|
5
4
|
|
|
6
5
|
import click
|
|
7
6
|
from dotenv import load_dotenv
|
|
@@ -10,8 +9,10 @@ from lightman_ai.constants import DEFAULT_CONFIG_FILE, DEFAULT_CONFIG_SECTION, D
|
|
|
10
9
|
from lightman_ai.core.config import FileConfig, FinalConfig, PromptConfig
|
|
11
10
|
from lightman_ai.core.exceptions import ConfigNotFoundError, InvalidConfigError, PromptNotFoundError
|
|
12
11
|
from lightman_ai.core.sentry import configure_sentry
|
|
13
|
-
from lightman_ai.core.settings import
|
|
12
|
+
from lightman_ai.core.settings import Settings
|
|
13
|
+
from lightman_ai.exceptions import MultipleDateSourcesError
|
|
14
14
|
from lightman_ai.main import lightman
|
|
15
|
+
from lightman_ai.utils import get_start_date
|
|
15
16
|
|
|
16
17
|
logger = logging.getLogger("lightman")
|
|
17
18
|
|
|
@@ -58,7 +59,7 @@ def entry_point() -> None:
|
|
|
58
59
|
@click.option(
|
|
59
60
|
"--env-file",
|
|
60
61
|
type=str,
|
|
61
|
-
default=
|
|
62
|
+
default=None,
|
|
62
63
|
help=(f"Path to the environment file. Defaults to `{DEFAULT_ENV_FILE}`."),
|
|
63
64
|
)
|
|
64
65
|
@click.option(
|
|
@@ -70,6 +71,7 @@ def entry_point() -> None:
|
|
|
70
71
|
)
|
|
71
72
|
@click.option("--start-date", type=click.DateTime(formats=["%Y-%m-%d"]), help="Start date to retrieve articles")
|
|
72
73
|
@click.option("--today", is_flag=True, help="Retrieve articles from today.")
|
|
74
|
+
@click.option("--yesterday", is_flag=True, help="Retrieve articles from yesterday.")
|
|
73
75
|
def run(
|
|
74
76
|
agent: str,
|
|
75
77
|
prompt: str,
|
|
@@ -78,41 +80,37 @@ def run(
|
|
|
78
80
|
score: int | None,
|
|
79
81
|
config_file: str,
|
|
80
82
|
config: str,
|
|
81
|
-
env_file: str,
|
|
83
|
+
env_file: str | None,
|
|
82
84
|
dry_run: bool,
|
|
83
85
|
start_date: date | None,
|
|
84
86
|
today: bool,
|
|
87
|
+
yesterday: bool,
|
|
85
88
|
) -> int:
|
|
86
89
|
"""
|
|
87
90
|
Entrypoint of the application.
|
|
88
91
|
|
|
89
|
-
Holds no logic. It calls the main method and returns 0 when succesful .
|
|
92
|
+
Holds no logic. It loads the configuration, calls the main method and returns 0 when succesful .
|
|
90
93
|
"""
|
|
91
|
-
load_dotenv(env_file)
|
|
94
|
+
load_dotenv(env_file or DEFAULT_ENV_FILE) # TODO refs: #112
|
|
92
95
|
configure_sentry()
|
|
93
96
|
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
elif isinstance(start_date, date):
|
|
100
|
-
start_datetime = datetime.combine(start_date, time(0, 0), tzinfo=ZoneInfo(settings.TIME_ZONE))
|
|
101
|
-
else:
|
|
102
|
-
start_datetime = None
|
|
97
|
+
settings = Settings.try_load_from_file(env_file)
|
|
98
|
+
try:
|
|
99
|
+
start_datetime = get_start_date(settings, yesterday, today, start_date)
|
|
100
|
+
except MultipleDateSourcesError as e:
|
|
101
|
+
raise click.UsageError(e.args[0]) from e
|
|
103
102
|
|
|
104
103
|
try:
|
|
105
104
|
prompt_config = PromptConfig.get_config_from_file(path=prompt_file)
|
|
106
105
|
config_from_file = FileConfig.get_config_from_file(config_section=config, path=config_file)
|
|
107
106
|
final_config = FinalConfig.init_from_dict(
|
|
108
107
|
data={
|
|
109
|
-
"agent": agent or config_from_file.agent,
|
|
108
|
+
"agent": agent or config_from_file.agent or settings.AGENT,
|
|
110
109
|
"prompt": prompt or config_from_file.prompt,
|
|
111
|
-
"score_threshold": score or config_from_file.score_threshold,
|
|
110
|
+
"score_threshold": score or config_from_file.score_threshold or settings.SCORE,
|
|
112
111
|
"model": model or config_from_file.model,
|
|
113
112
|
}
|
|
114
113
|
)
|
|
115
|
-
|
|
116
114
|
prompt_text = prompt_config.get_prompt(final_config.prompt)
|
|
117
115
|
except (InvalidConfigError, PromptNotFoundError, ConfigNotFoundError) as err:
|
|
118
116
|
raise click.BadParameter(err.args[0]) from None
|
|
@@ -122,8 +120,8 @@ def run(
|
|
|
122
120
|
prompt=prompt_text,
|
|
123
121
|
score_threshold=final_config.score_threshold,
|
|
124
122
|
dry_run=dry_run,
|
|
125
|
-
|
|
126
|
-
|
|
123
|
+
service_desk_project_key=config_from_file.service_desk_project_key,
|
|
124
|
+
service_desk_request_id_type=config_from_file.service_desk_request_id_type,
|
|
127
125
|
model=final_config.model,
|
|
128
126
|
start_date=start_datetime,
|
|
129
127
|
)
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from typing import Any, Self
|
|
4
|
+
|
|
5
|
+
from pydantic_settings import BaseSettings, SettingsConfigDict
|
|
6
|
+
|
|
7
|
+
logger = logging.getLogger("lightman")
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class Settings(BaseSettings):
|
|
11
|
+
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
|
12
|
+
super().__init__(*args, **kwargs)
|
|
13
|
+
|
|
14
|
+
AGENT: str = "openai"
|
|
15
|
+
SCORE: int = 8
|
|
16
|
+
TIME_ZONE: str = "UTC"
|
|
17
|
+
model_config = SettingsConfigDict(extra="ignore")
|
|
18
|
+
|
|
19
|
+
@classmethod
|
|
20
|
+
def try_load_from_file(cls, env_file: str | None = None) -> Self:
|
|
21
|
+
"""
|
|
22
|
+
Initialize Settings class and returns an instance.
|
|
23
|
+
|
|
24
|
+
It tries to load env variables from the env file. Variables set in the environment take precendence.
|
|
25
|
+
|
|
26
|
+
If the env file is not present, it continues execution, following pydantic-settings' behaviour.
|
|
27
|
+
"""
|
|
28
|
+
if env_file and not Path(env_file).exists():
|
|
29
|
+
logger.warning("env file `%s` not found.", env_file)
|
|
30
|
+
return cls(_env_file=env_file)
|
{lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/integrations/service_desk/exceptions.py
RENAMED
|
@@ -4,12 +4,12 @@ from contextlib import asynccontextmanager
|
|
|
4
4
|
from typing import Any
|
|
5
5
|
|
|
6
6
|
import httpx
|
|
7
|
-
from lightman_ai.core.exceptions import
|
|
7
|
+
from lightman_ai.core.exceptions import BaseLightmanError
|
|
8
8
|
|
|
9
9
|
logger = logging.getLogger("lightman")
|
|
10
10
|
|
|
11
11
|
|
|
12
|
-
class BaseServiceDeskError(
|
|
12
|
+
class BaseServiceDeskError(BaseLightmanError):
|
|
13
13
|
"""Base exception for all SERVICE_DESK integration errors."""
|
|
14
14
|
|
|
15
15
|
|
|
@@ -24,17 +24,17 @@ def _classify_articles(articles: ArticlesList, agent: BaseAgent) -> SelectedArti
|
|
|
24
24
|
def _create_service_desk_issues(
|
|
25
25
|
selected_articles: list[SelectedArticle],
|
|
26
26
|
service_desk_client: ServiceDeskIntegration,
|
|
27
|
-
|
|
28
|
-
|
|
27
|
+
service_desk_project_key: str,
|
|
28
|
+
service_desk_request_id_type: str,
|
|
29
29
|
) -> None:
|
|
30
30
|
async def schedule_task(article: SelectedArticle) -> None:
|
|
31
31
|
try:
|
|
32
32
|
description = f"*Why is relevant:*\n{article.why_is_relevant}\n\n*Source:* {article.link}\n\n*Score:* {article.relevance_score}/10"
|
|
33
33
|
await service_desk_client.create_request_of_type(
|
|
34
|
-
project_key=
|
|
34
|
+
project_key=service_desk_project_key,
|
|
35
35
|
summary=article.title,
|
|
36
36
|
description=description,
|
|
37
|
-
request_id_type=
|
|
37
|
+
request_id_type=service_desk_request_id_type,
|
|
38
38
|
)
|
|
39
39
|
logger.info("Created issue for article %s", article.link)
|
|
40
40
|
except Exception:
|
|
@@ -57,8 +57,8 @@ def lightman(
|
|
|
57
57
|
agent: str,
|
|
58
58
|
prompt: str,
|
|
59
59
|
score_threshold: int,
|
|
60
|
-
|
|
61
|
-
|
|
60
|
+
service_desk_project_key: str | None = None,
|
|
61
|
+
service_desk_request_id_type: str | None = None,
|
|
62
62
|
dry_run: bool = False,
|
|
63
63
|
model: str | None = None,
|
|
64
64
|
start_date: datetime | None = None,
|
|
@@ -83,15 +83,15 @@ def lightman(
|
|
|
83
83
|
logger.info("No articles found to be relevant. Total returned articles by AI %s", len(classified_articles))
|
|
84
84
|
|
|
85
85
|
if not dry_run:
|
|
86
|
-
if not
|
|
86
|
+
if not service_desk_project_key or not service_desk_request_id_type:
|
|
87
87
|
raise ValueError("Missing Service Desk's project key or request id type")
|
|
88
88
|
|
|
89
89
|
service_desk_client = ServiceDeskIntegration.from_env()
|
|
90
90
|
_create_service_desk_issues(
|
|
91
91
|
selected_articles=selected_articles,
|
|
92
92
|
service_desk_client=service_desk_client,
|
|
93
|
-
|
|
94
|
-
|
|
93
|
+
service_desk_project_key=service_desk_project_key,
|
|
94
|
+
service_desk_request_id_type=service_desk_request_id_type,
|
|
95
95
|
)
|
|
96
96
|
|
|
97
97
|
return selected_articles
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
from datetime import date, datetime, time, timedelta
|
|
2
|
+
from zoneinfo import ZoneInfo
|
|
3
|
+
|
|
4
|
+
from lightman_ai.core.settings import Settings
|
|
5
|
+
from lightman_ai.exceptions import MultipleDateSourcesError
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def get_start_date(settings: Settings, yesterday: bool, today: bool, start_date: date | None) -> datetime | None:
|
|
9
|
+
mutually_exclusive_date_fields = [x for x in [start_date, today, yesterday] if x]
|
|
10
|
+
|
|
11
|
+
if len(mutually_exclusive_date_fields) > 1:
|
|
12
|
+
raise MultipleDateSourcesError(
|
|
13
|
+
"--today, --yesterday and --start-date are mutually exclusive. Set one at a time."
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
if today:
|
|
17
|
+
now = datetime.now(ZoneInfo(settings.TIME_ZONE))
|
|
18
|
+
return datetime.combine(now, time(0, 0), tzinfo=ZoneInfo(settings.TIME_ZONE))
|
|
19
|
+
elif yesterday:
|
|
20
|
+
yesterday_date = datetime.now(ZoneInfo(settings.TIME_ZONE)) - timedelta(days=1)
|
|
21
|
+
return datetime.combine(yesterday_date, time(0, 0), tzinfo=ZoneInfo(settings.TIME_ZONE))
|
|
22
|
+
elif isinstance(start_date, date):
|
|
23
|
+
return datetime.combine(start_date, time(0, 0), tzinfo=ZoneInfo(settings.TIME_ZONE))
|
|
24
|
+
else:
|
|
25
|
+
return None
|
lightman_ai-0.21.1/VERSION
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
v0.21.1
|
|
@@ -1,14 +0,0 @@
|
|
|
1
|
-
from typing import Any
|
|
2
|
-
|
|
3
|
-
from pydantic_settings import BaseSettings
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
class Settings(BaseSettings):
|
|
7
|
-
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
|
8
|
-
super().__init__(*args, **kwargs)
|
|
9
|
-
|
|
10
|
-
PARALLEL_WORKERS: int = 5
|
|
11
|
-
TIME_ZONE: str = "UTC"
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
settings = Settings()
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/integrations/service_desk/__init__.py
RENAMED
|
File without changes
|
{lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/integrations/service_desk/constants.py
RENAMED
|
File without changes
|
{lightman_ai-0.21.1 → lightman_ai-0.22.0}/src/lightman_ai/integrations/service_desk/integration.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|