lightman_ai 0.21.2__tar.gz → 0.22.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/PKG-INFO +47 -121
  2. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/README.md +46 -120
  3. lightman_ai-0.22.0/VERSION +1 -0
  4. lightman_ai-0.22.0/src/lightman_ai/ai/base/exceptions.py +4 -0
  5. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/src/lightman_ai/ai/gemini/exceptions.py +2 -2
  6. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/src/lightman_ai/ai/openai/exceptions.py +2 -2
  7. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/src/lightman_ai/cli.py +19 -21
  8. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/src/lightman_ai/core/exceptions.py +2 -2
  9. lightman_ai-0.22.0/src/lightman_ai/core/settings.py +30 -0
  10. lightman_ai-0.22.0/src/lightman_ai/exceptions.py +5 -0
  11. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/src/lightman_ai/integrations/service_desk/exceptions.py +2 -2
  12. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/src/lightman_ai/main.py +9 -9
  13. lightman_ai-0.22.0/src/lightman_ai/utils.py +25 -0
  14. lightman_ai-0.21.2/VERSION +0 -1
  15. lightman_ai-0.21.2/src/lightman_ai/ai/base/exceptions.py +0 -4
  16. lightman_ai-0.21.2/src/lightman_ai/core/settings.py +0 -14
  17. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/.gitignore +0 -0
  18. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/LICENSE +0 -0
  19. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/pyproject.toml +0 -0
  20. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/src/lightman_ai/__init__.py +0 -0
  21. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/src/lightman_ai/ai/base/__init__.py +0 -0
  22. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/src/lightman_ai/ai/base/agent.py +0 -0
  23. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/src/lightman_ai/ai/gemini/__init__.py +0 -0
  24. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/src/lightman_ai/ai/gemini/agent.py +0 -0
  25. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/src/lightman_ai/ai/openai/__init__.py +0 -0
  26. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/src/lightman_ai/ai/openai/agent.py +0 -0
  27. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/src/lightman_ai/ai/utils.py +0 -0
  28. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/src/lightman_ai/article/__init__.py +0 -0
  29. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/src/lightman_ai/article/exceptions.py +0 -0
  30. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/src/lightman_ai/article/models.py +0 -0
  31. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/src/lightman_ai/constants.py +0 -0
  32. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/src/lightman_ai/core/__init__.py +0 -0
  33. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/src/lightman_ai/core/config.py +0 -0
  34. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/src/lightman_ai/core/sentry.py +0 -0
  35. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/src/lightman_ai/integrations/__init__.py +0 -0
  36. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/src/lightman_ai/integrations/service_desk/__init__.py +0 -0
  37. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/src/lightman_ai/integrations/service_desk/constants.py +0 -0
  38. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/src/lightman_ai/integrations/service_desk/integration.py +0 -0
  39. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/src/lightman_ai/py.typed +0 -0
  40. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/src/lightman_ai/sources/base.py +0 -0
  41. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/src/lightman_ai/sources/exceptions.py +0 -0
  42. {lightman_ai-0.21.2 → lightman_ai-0.22.0}/src/lightman_ai/sources/the_hacker_news.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lightman_ai
3
- Version: 0.21.2
3
+ Version: 0.22.0
4
4
  Summary: LLM-powered cybersecurity news aggregator.
5
5
  Author-email: sdn4z <imsdn4z@gmail.com>
6
6
  License-File: LICENSE
@@ -18,7 +18,7 @@ Description-Content-Type: text/markdown
18
18
 
19
19
  # 🔍 Lightman AI
20
20
 
21
- > **AI-Powered Cybersecurity News Intelligence Platform*
21
+ > LLM-Powered Cybersecurity News Intelligence Platform
22
22
 
23
23
  ---
24
24
 
@@ -49,7 +49,9 @@ Lightman AI is an intelligent cybersecurity news aggregation and risk assessment
49
49
 
50
50
  ## 🚀 Quick Start
51
51
 
52
- ### pip
52
+ ### Installation
53
+
54
+ #### pip
53
55
 
54
56
  1. **Install Lightman AI**:
55
57
  ```bash
@@ -78,10 +80,13 @@ Lightman AI is an intelligent cybersecurity news aggregation and risk assessment
78
80
  ```bash
79
81
  lightman run
80
82
  ```
83
+ #### Docker
84
+ 1. **Pull the image**
85
+ ```bash
86
+ docker pull elementsinteractive/lightman-ai:latest
87
+ ```
81
88
 
82
- ### Docker
83
-
84
- 1. **Create configuration file**:
89
+ 2. **Create configuration file**:
85
90
  ```bash
86
91
  echo '[default]
87
92
  agent = "openai"
@@ -92,7 +97,7 @@ Lightman AI is an intelligent cybersecurity news aggregation and risk assessment
92
97
  development = "Analyze cybersecurity news for relevance to our organization."' > lightman.toml
93
98
  ```
94
99
 
95
- 2. **Run with Docker**:
100
+ 3. **Run with Docker**:
96
101
  ```bash
97
102
  docker run --rm \
98
103
  -v $(pwd)/lightman.toml:/app/lightman.toml \
@@ -101,55 +106,37 @@ Lightman AI is an intelligent cybersecurity news aggregation and risk assessment
101
106
  lightman run --config-file /app/lightman.toml --score 7
102
107
  ```
103
108
 
104
- 4. **View results**: Lightman will analyze cybersecurity news and output relevant articles that meet your score threshold.
105
-
106
- ## 📥 Installation
107
-
108
- ### Docker
109
- Lightman AI has an available Docker image on Docker Hub:
110
-
111
- ```bash
112
- # Pull the latest image
113
- docker pull elementsinteractive/lightman-ai:latest
109
+ ## 🔧 Usage
114
110
 
115
- # Create your configuration file
116
-
117
- echo '[default]
118
- agent = "openai"
119
- score_threshold = 8
120
- prompt = "development"
121
-
122
- [prompts]
123
- development = "Analyze cybersecurity news for relevance to our organization."' > lightman.toml
124
- ```
111
+ ### CLI Options
125
112
 
113
+ | Option | Description | Default |
114
+ |--------|-------------|---------|
115
+ | `--agent` | AI agent to use (`openai`, `gemini`) | From config file |
116
+ | `--score` | Minimum relevance score (1-10) | From config file |
117
+ | `--prompt` | Prompt template name | From config file |
118
+ | `--config-file` | Path to configuration file | `lightman.toml` |
119
+ | `--config` | Configuration section to use | `default` |
120
+ | `--env-file` | Path to environment variables file | `.env` |
121
+ | `--dry-run` | Preview results without taking action | `false` |
122
+ | `--prompt-file` | File containing prompt templates | `lightman.toml` |
123
+ | `--start-date` | Start date to retrieve articles | False |
124
+ | `--today` | Retrieve articles from today | False |
125
+ | `--yesterday` | Retrieve articles from yesterday | False |
126
126
 
127
- # Run with mounted configuration
128
- ```bash
129
- docker run -d \
130
- --name lightman-ai \
131
- -v $(pwd)/lightman.toml:/app/lightman.toml \
132
- -e OPENAI_API_KEY="your-api-key" \
133
- elementsinteractive/lightman-ai:latest \
134
- lightman run --config-file /app/lightman.toml
135
- ```
127
+ ### Environment Variables:
128
+ lightman-ai uses the following environment variables:
136
129
 
137
- **Docker Environment Variables:**
138
130
  - `OPENAI_API_KEY` - Your OpenAI API key
139
131
  - `GOOGLE_API_KEY` - Your Google Gemini API key
140
132
  - `SERVICE_DESK_URL` - Service desk instance URL (optional)
141
133
  - `SERVICE_DESK_USER` - Service desk username (optional)
142
134
  - `SERVICE_DESK_TOKEN` - Service desk API token (optional)
135
+ - `TIME_ZONE` - Your time zone (optional, defaults to UTC. i.e. "Europe/Amsterdam".)
143
136
 
137
+ By default, it will try to load a `.env` file. You can also specify a different path with the `--env-file` option.
144
138
 
145
139
 
146
- ### Development Installation
147
- ```bash
148
- git clone git@github.com:elementsinteractive/lightman-ai.git
149
- cd lightman_ai
150
- just venv # Creates virtual environment and installs dependencies
151
- ```
152
-
153
140
  ## ⚙️ Configuration
154
141
 
155
142
  Lightman AI uses TOML configuration files for flexible setup. Create a `lightman.toml` file:
@@ -203,30 +190,8 @@ custom_prompt = """
203
190
  Your custom analysis prompt here...
204
191
  """
205
192
  ```
206
- ### Environment Variables
207
-
208
- Set up your AI provider credentials:
209
-
210
- ```bash
211
- # For OpenAI
212
- export OPENAI_API_KEY="your-openai-api-key"
213
-
214
- # For Google Gemini
215
- export GOOGLE_API_KEY="your-google-api-key"
216
-
217
- # Optional: Service desk integration
218
- export SERVICE_DESK_URL="https://your-company.atlassian.net"
219
- export SERVICE_DESK_USER="your-username"
220
- export SERVICE_DESK_TOKEN="your-api-token"
221
-
222
- ```
223
- You can also specify a different path for your .env file with the `--env-file` option
224
-
225
-
226
- ## 🔧 Usage
227
-
228
- ### Basic Usage
229
193
 
194
+ ### Examples
230
195
  ```bash
231
196
  # Run with default settings
232
197
  lightman run
@@ -242,69 +207,30 @@ lightman run --env-file production.env --agent openai --score 8
242
207
 
243
208
  # Dry run (preview results without creating service desk tickets)
244
209
  lightman run --dry-run --agent openai --score 9
245
- ```
246
210
 
247
- ### Command Line Options
211
+ # Retrieve all the news from today
212
+ lightman run --agent openai --score 8 --prompt security_critical --today
248
213
 
249
- | Option | Description | Default |
250
- |--------|-------------|---------|
251
- | `--agent` | AI agent to use (`openai`, `gemini`) | From config file |
252
- | `--score` | Minimum relevance score (1-10) | From config file |
253
- | `--prompt` | Prompt template name | From config file |
254
- | `--config-file` | Path to configuration file | `lightman.toml` |
255
- | `--config` | Configuration section to use | `default` |
256
- | `--env-file` | Path to environment variables file | `.env` |
257
- | `--dry-run` | Preview results without taking action | `false` |
258
- | `--prompt-file` | File containing prompt templates | `lightman.toml` |
259
- | `--start-date` | Start date to retrieve articles | None |
260
- | `--today` | Retrieve articles from today | None |
261
-
262
- ### Example Workflows
263
-
264
- **Daily Security Monitoring**:
265
- ```bash
266
- # Local installation
267
- lightman run --agent openai --score 8 --prompt security_critical
268
-
269
- # With custom environment file
270
- lightman run --env-file production.env --agent openai --score 8
271
-
272
- # Docker
273
- docker run --rm \
274
- -v $(pwd)/lightman.toml:/app/lightman.toml \
275
- -e OPENAI_API_KEY="$OPENAI_API_KEY" \
276
- elementsinteractive/lightman-ai:latest \
277
- lightman run --config-file /app/lightman.toml --score 8
214
+ # Retrieve all the news from yesterday
215
+ lightman run --agent openai --score 8 --prompt security_critical --yesterday
278
216
  ```
279
217
 
280
218
 
281
- **Weekly Risk Assessment**:
282
- ```bash
283
- # Local installation
284
- lightman run --agent gemini --score 6 --prompt weekly_assessment
285
-
286
- # With environment-specific settings
287
- lightman run --env-file weekly.env --agent gemini --score 6
288
-
289
- # Docker
290
- docker run --rm \
291
- -v $(pwd)/lightman.toml:/app/lightman.toml \
292
- -e GOOGLE_API_KEY="$GOOGLE_API_KEY" \
293
- elementsinteractive/lightman-ai:latest \
294
- lightman run --config-file /app/lightman.toml --agent gemini --score 6
295
- ```
219
+ ### Development Installation
220
+ In order to fully use the provided setup for local development and testing, this project requires the following dependencies:
221
+ - Python 3.13
222
+ - [just](https://github.com/casey/just)
223
+ - [uv](https://docs.astral.sh/uv/getting-started/installation/)
296
224
 
297
- **Integration Testing**:
225
+ Then simply:
298
226
  ```bash
299
- # Test configuration without creating tickets
300
- lightman run --dry-run --config testing
301
-
302
- # Test with staging environment
303
- lightman run --env-file staging.env --dry-run --config testing
227
+ git clone git@github.com:elementsinteractive/lightman-ai.git
228
+ cd lightman_ai
229
+ just venv # Creates virtual environment and installs dependencies
230
+ just test # Runs the tests
231
+ just eval # Runs the evaluation framework
304
232
  ```
305
233
 
306
-
307
-
308
234
  ## 📊 Evaluation & Testing
309
235
 
310
236
  Lightman AI includes a comprehensive evaluation framework to test and optimize AI agent performance:
@@ -1,6 +1,6 @@
1
1
  # 🔍 Lightman AI
2
2
 
3
- > **AI-Powered Cybersecurity News Intelligence Platform*
3
+ > LLM-Powered Cybersecurity News Intelligence Platform
4
4
 
5
5
  ---
6
6
 
@@ -31,7 +31,9 @@ Lightman AI is an intelligent cybersecurity news aggregation and risk assessment
31
31
 
32
32
  ## 🚀 Quick Start
33
33
 
34
- ### pip
34
+ ### Installation
35
+
36
+ #### pip
35
37
 
36
38
  1. **Install Lightman AI**:
37
39
  ```bash
@@ -60,10 +62,13 @@ Lightman AI is an intelligent cybersecurity news aggregation and risk assessment
60
62
  ```bash
61
63
  lightman run
62
64
  ```
65
+ #### Docker
66
+ 1. **Pull the image**
67
+ ```bash
68
+ docker pull elementsinteractive/lightman-ai:latest
69
+ ```
63
70
 
64
- ### Docker
65
-
66
- 1. **Create configuration file**:
71
+ 2. **Create configuration file**:
67
72
  ```bash
68
73
  echo '[default]
69
74
  agent = "openai"
@@ -74,7 +79,7 @@ Lightman AI is an intelligent cybersecurity news aggregation and risk assessment
74
79
  development = "Analyze cybersecurity news for relevance to our organization."' > lightman.toml
75
80
  ```
76
81
 
77
- 2. **Run with Docker**:
82
+ 3. **Run with Docker**:
78
83
  ```bash
79
84
  docker run --rm \
80
85
  -v $(pwd)/lightman.toml:/app/lightman.toml \
@@ -83,55 +88,37 @@ Lightman AI is an intelligent cybersecurity news aggregation and risk assessment
83
88
  lightman run --config-file /app/lightman.toml --score 7
84
89
  ```
85
90
 
86
- 4. **View results**: Lightman will analyze cybersecurity news and output relevant articles that meet your score threshold.
87
-
88
- ## 📥 Installation
89
-
90
- ### Docker
91
- Lightman AI has an available Docker image on Docker Hub:
92
-
93
- ```bash
94
- # Pull the latest image
95
- docker pull elementsinteractive/lightman-ai:latest
91
+ ## 🔧 Usage
96
92
 
97
- # Create your configuration file
98
-
99
- echo '[default]
100
- agent = "openai"
101
- score_threshold = 8
102
- prompt = "development"
103
-
104
- [prompts]
105
- development = "Analyze cybersecurity news for relevance to our organization."' > lightman.toml
106
- ```
93
+ ### CLI Options
107
94
 
95
+ | Option | Description | Default |
96
+ |--------|-------------|---------|
97
+ | `--agent` | AI agent to use (`openai`, `gemini`) | From config file |
98
+ | `--score` | Minimum relevance score (1-10) | From config file |
99
+ | `--prompt` | Prompt template name | From config file |
100
+ | `--config-file` | Path to configuration file | `lightman.toml` |
101
+ | `--config` | Configuration section to use | `default` |
102
+ | `--env-file` | Path to environment variables file | `.env` |
103
+ | `--dry-run` | Preview results without taking action | `false` |
104
+ | `--prompt-file` | File containing prompt templates | `lightman.toml` |
105
+ | `--start-date` | Start date to retrieve articles | False |
106
+ | `--today` | Retrieve articles from today | False |
107
+ | `--yesterday` | Retrieve articles from yesterday | False |
108
108
 
109
- # Run with mounted configuration
110
- ```bash
111
- docker run -d \
112
- --name lightman-ai \
113
- -v $(pwd)/lightman.toml:/app/lightman.toml \
114
- -e OPENAI_API_KEY="your-api-key" \
115
- elementsinteractive/lightman-ai:latest \
116
- lightman run --config-file /app/lightman.toml
117
- ```
109
+ ### Environment Variables:
110
+ lightman-ai uses the following environment variables:
118
111
 
119
- **Docker Environment Variables:**
120
112
  - `OPENAI_API_KEY` - Your OpenAI API key
121
113
  - `GOOGLE_API_KEY` - Your Google Gemini API key
122
114
  - `SERVICE_DESK_URL` - Service desk instance URL (optional)
123
115
  - `SERVICE_DESK_USER` - Service desk username (optional)
124
116
  - `SERVICE_DESK_TOKEN` - Service desk API token (optional)
117
+ - `TIME_ZONE` - Your time zone (optional, defaults to UTC. i.e. "Europe/Amsterdam".)
125
118
 
119
+ By default, it will try to load a `.env` file. You can also specify a different path with the `--env-file` option.
126
120
 
127
121
 
128
- ### Development Installation
129
- ```bash
130
- git clone git@github.com:elementsinteractive/lightman-ai.git
131
- cd lightman_ai
132
- just venv # Creates virtual environment and installs dependencies
133
- ```
134
-
135
122
  ## ⚙️ Configuration
136
123
 
137
124
  Lightman AI uses TOML configuration files for flexible setup. Create a `lightman.toml` file:
@@ -185,30 +172,8 @@ custom_prompt = """
185
172
  Your custom analysis prompt here...
186
173
  """
187
174
  ```
188
- ### Environment Variables
189
-
190
- Set up your AI provider credentials:
191
-
192
- ```bash
193
- # For OpenAI
194
- export OPENAI_API_KEY="your-openai-api-key"
195
-
196
- # For Google Gemini
197
- export GOOGLE_API_KEY="your-google-api-key"
198
-
199
- # Optional: Service desk integration
200
- export SERVICE_DESK_URL="https://your-company.atlassian.net"
201
- export SERVICE_DESK_USER="your-username"
202
- export SERVICE_DESK_TOKEN="your-api-token"
203
-
204
- ```
205
- You can also specify a different path for your .env file with the `--env-file` option
206
-
207
-
208
- ## 🔧 Usage
209
-
210
- ### Basic Usage
211
175
 
176
+ ### Examples
212
177
  ```bash
213
178
  # Run with default settings
214
179
  lightman run
@@ -224,69 +189,30 @@ lightman run --env-file production.env --agent openai --score 8
224
189
 
225
190
  # Dry run (preview results without creating service desk tickets)
226
191
  lightman run --dry-run --agent openai --score 9
227
- ```
228
192
 
229
- ### Command Line Options
193
+ # Retrieve all the news from today
194
+ lightman run --agent openai --score 8 --prompt security_critical --today
230
195
 
231
- | Option | Description | Default |
232
- |--------|-------------|---------|
233
- | `--agent` | AI agent to use (`openai`, `gemini`) | From config file |
234
- | `--score` | Minimum relevance score (1-10) | From config file |
235
- | `--prompt` | Prompt template name | From config file |
236
- | `--config-file` | Path to configuration file | `lightman.toml` |
237
- | `--config` | Configuration section to use | `default` |
238
- | `--env-file` | Path to environment variables file | `.env` |
239
- | `--dry-run` | Preview results without taking action | `false` |
240
- | `--prompt-file` | File containing prompt templates | `lightman.toml` |
241
- | `--start-date` | Start date to retrieve articles | None |
242
- | `--today` | Retrieve articles from today | None |
243
-
244
- ### Example Workflows
245
-
246
- **Daily Security Monitoring**:
247
- ```bash
248
- # Local installation
249
- lightman run --agent openai --score 8 --prompt security_critical
250
-
251
- # With custom environment file
252
- lightman run --env-file production.env --agent openai --score 8
253
-
254
- # Docker
255
- docker run --rm \
256
- -v $(pwd)/lightman.toml:/app/lightman.toml \
257
- -e OPENAI_API_KEY="$OPENAI_API_KEY" \
258
- elementsinteractive/lightman-ai:latest \
259
- lightman run --config-file /app/lightman.toml --score 8
196
+ # Retrieve all the news from yesterday
197
+ lightman run --agent openai --score 8 --prompt security_critical --yesterday
260
198
  ```
261
199
 
262
200
 
263
- **Weekly Risk Assessment**:
264
- ```bash
265
- # Local installation
266
- lightman run --agent gemini --score 6 --prompt weekly_assessment
267
-
268
- # With environment-specific settings
269
- lightman run --env-file weekly.env --agent gemini --score 6
270
-
271
- # Docker
272
- docker run --rm \
273
- -v $(pwd)/lightman.toml:/app/lightman.toml \
274
- -e GOOGLE_API_KEY="$GOOGLE_API_KEY" \
275
- elementsinteractive/lightman-ai:latest \
276
- lightman run --config-file /app/lightman.toml --agent gemini --score 6
277
- ```
201
+ ### Development Installation
202
+ In order to fully use the provided setup for local development and testing, this project requires the following dependencies:
203
+ - Python 3.13
204
+ - [just](https://github.com/casey/just)
205
+ - [uv](https://docs.astral.sh/uv/getting-started/installation/)
278
206
 
279
- **Integration Testing**:
207
+ Then simply:
280
208
  ```bash
281
- # Test configuration without creating tickets
282
- lightman run --dry-run --config testing
283
-
284
- # Test with staging environment
285
- lightman run --env-file staging.env --dry-run --config testing
209
+ git clone git@github.com:elementsinteractive/lightman-ai.git
210
+ cd lightman_ai
211
+ just venv # Creates virtual environment and installs dependencies
212
+ just test # Runs the tests
213
+ just eval # Runs the evaluation framework
286
214
  ```
287
215
 
288
-
289
-
290
216
  ## 📊 Evaluation & Testing
291
217
 
292
218
  Lightman AI includes a comprehensive evaluation framework to test and optimize AI agent performance:
@@ -0,0 +1 @@
1
+ v0.22.0
@@ -0,0 +1,4 @@
1
+ from lightman_ai.core.exceptions import BaseLightmanError
2
+
3
+
4
+ class BaseAgentError(BaseLightmanError): ...
@@ -2,10 +2,10 @@ from collections.abc import Generator
2
2
  from contextlib import contextmanager
3
3
  from typing import Any
4
4
 
5
- from lightman_ai.core.exceptions import BaseHackermanError
5
+ from lightman_ai.core.exceptions import BaseLightmanError
6
6
 
7
7
 
8
- class BaseGeminiError(BaseHackermanError): ...
8
+ class BaseGeminiError(BaseLightmanError): ...
9
9
 
10
10
 
11
11
  class GeminiError(BaseGeminiError): ...
@@ -4,13 +4,13 @@ from collections.abc import Generator
4
4
  from contextlib import contextmanager
5
5
  from typing import Any, override
6
6
 
7
- from lightman_ai.core.exceptions import BaseHackermanError
7
+ from lightman_ai.core.exceptions import BaseLightmanError
8
8
  from pydantic_ai.exceptions import ModelHTTPError
9
9
 
10
10
  from openai import RateLimitError
11
11
 
12
12
 
13
- class BaseOpenAIError(BaseHackermanError): ...
13
+ class BaseOpenAIError(BaseLightmanError): ...
14
14
 
15
15
 
16
16
  class UnknownOpenAIError(BaseOpenAIError): ...
@@ -1,7 +1,6 @@
1
1
  import logging
2
- from datetime import date, datetime, time
2
+ from datetime import date
3
3
  from importlib import metadata
4
- from zoneinfo import ZoneInfo
5
4
 
6
5
  import click
7
6
  from dotenv import load_dotenv
@@ -10,8 +9,10 @@ from lightman_ai.constants import DEFAULT_CONFIG_FILE, DEFAULT_CONFIG_SECTION, D
10
9
  from lightman_ai.core.config import FileConfig, FinalConfig, PromptConfig
11
10
  from lightman_ai.core.exceptions import ConfigNotFoundError, InvalidConfigError, PromptNotFoundError
12
11
  from lightman_ai.core.sentry import configure_sentry
13
- from lightman_ai.core.settings import settings
12
+ from lightman_ai.core.settings import Settings
13
+ from lightman_ai.exceptions import MultipleDateSourcesError
14
14
  from lightman_ai.main import lightman
15
+ from lightman_ai.utils import get_start_date
15
16
 
16
17
  logger = logging.getLogger("lightman")
17
18
 
@@ -58,7 +59,7 @@ def entry_point() -> None:
58
59
  @click.option(
59
60
  "--env-file",
60
61
  type=str,
61
- default=DEFAULT_ENV_FILE,
62
+ default=None,
62
63
  help=(f"Path to the environment file. Defaults to `{DEFAULT_ENV_FILE}`."),
63
64
  )
64
65
  @click.option(
@@ -70,6 +71,7 @@ def entry_point() -> None:
70
71
  )
71
72
  @click.option("--start-date", type=click.DateTime(formats=["%Y-%m-%d"]), help="Start date to retrieve articles")
72
73
  @click.option("--today", is_flag=True, help="Retrieve articles from today.")
74
+ @click.option("--yesterday", is_flag=True, help="Retrieve articles from yesterday.")
73
75
  def run(
74
76
  agent: str,
75
77
  prompt: str,
@@ -78,41 +80,37 @@ def run(
78
80
  score: int | None,
79
81
  config_file: str,
80
82
  config: str,
81
- env_file: str,
83
+ env_file: str | None,
82
84
  dry_run: bool,
83
85
  start_date: date | None,
84
86
  today: bool,
87
+ yesterday: bool,
85
88
  ) -> int:
86
89
  """
87
90
  Entrypoint of the application.
88
91
 
89
- Holds no logic. It calls the main method and returns 0 when succesful .
92
+ Holds no logic. It loads the configuration, calls the main method and returns 0 when succesful .
90
93
  """
91
- load_dotenv(env_file)
94
+ load_dotenv(env_file or DEFAULT_ENV_FILE) # TODO refs: #112
92
95
  configure_sentry()
93
96
 
94
- if start_date and today:
95
- raise click.UsageError("--today and --start-date cannot be set at the same time.")
96
- elif today:
97
- now = datetime.now(ZoneInfo(settings.TIME_ZONE))
98
- start_datetime = datetime.combine(now, time(0, 0), tzinfo=ZoneInfo(settings.TIME_ZONE))
99
- elif isinstance(start_date, date):
100
- start_datetime = datetime.combine(start_date, time(0, 0), tzinfo=ZoneInfo(settings.TIME_ZONE))
101
- else:
102
- start_datetime = None
97
+ settings = Settings.try_load_from_file(env_file)
98
+ try:
99
+ start_datetime = get_start_date(settings, yesterday, today, start_date)
100
+ except MultipleDateSourcesError as e:
101
+ raise click.UsageError(e.args[0]) from e
103
102
 
104
103
  try:
105
104
  prompt_config = PromptConfig.get_config_from_file(path=prompt_file)
106
105
  config_from_file = FileConfig.get_config_from_file(config_section=config, path=config_file)
107
106
  final_config = FinalConfig.init_from_dict(
108
107
  data={
109
- "agent": agent or config_from_file.agent,
108
+ "agent": agent or config_from_file.agent or settings.AGENT,
110
109
  "prompt": prompt or config_from_file.prompt,
111
- "score_threshold": score or config_from_file.score_threshold,
110
+ "score_threshold": score or config_from_file.score_threshold or settings.SCORE,
112
111
  "model": model or config_from_file.model,
113
112
  }
114
113
  )
115
-
116
114
  prompt_text = prompt_config.get_prompt(final_config.prompt)
117
115
  except (InvalidConfigError, PromptNotFoundError, ConfigNotFoundError) as err:
118
116
  raise click.BadParameter(err.args[0]) from None
@@ -122,8 +120,8 @@ def run(
122
120
  prompt=prompt_text,
123
121
  score_threshold=final_config.score_threshold,
124
122
  dry_run=dry_run,
125
- project_key=config_from_file.service_desk_project_key,
126
- request_id_type=config_from_file.service_desk_request_id_type,
123
+ service_desk_project_key=config_from_file.service_desk_project_key,
124
+ service_desk_request_id_type=config_from_file.service_desk_request_id_type,
127
125
  model=final_config.model,
128
126
  start_date=start_datetime,
129
127
  )
@@ -1,7 +1,7 @@
1
- class BaseHackermanError(Exception): ...
1
+ class BaseLightmanError(Exception): ...
2
2
 
3
3
 
4
- class BaseConfigError(BaseHackermanError): ...
4
+ class BaseConfigError(BaseLightmanError): ...
5
5
 
6
6
 
7
7
  class InvalidConfigError(BaseConfigError): ...
@@ -0,0 +1,30 @@
1
+ import logging
2
+ from pathlib import Path
3
+ from typing import Any, Self
4
+
5
+ from pydantic_settings import BaseSettings, SettingsConfigDict
6
+
7
+ logger = logging.getLogger("lightman")
8
+
9
+
10
+ class Settings(BaseSettings):
11
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
12
+ super().__init__(*args, **kwargs)
13
+
14
+ AGENT: str = "openai"
15
+ SCORE: int = 8
16
+ TIME_ZONE: str = "UTC"
17
+ model_config = SettingsConfigDict(extra="ignore")
18
+
19
+ @classmethod
20
+ def try_load_from_file(cls, env_file: str | None = None) -> Self:
21
+ """
22
+ Initialize Settings class and returns an instance.
23
+
24
+ It tries to load env variables from the env file. Variables set in the environment take precendence.
25
+
26
+ If the env file is not present, it continues execution, following pydantic-settings' behaviour.
27
+ """
28
+ if env_file and not Path(env_file).exists():
29
+ logger.warning("env file `%s` not found.", env_file)
30
+ return cls(_env_file=env_file)
@@ -0,0 +1,5 @@
1
+ from lightman_ai.core.exceptions import BaseLightmanError
2
+
3
+
4
+ class MultipleDateSourcesError(BaseLightmanError):
5
+ """Exception for when more than one date source is provided."""
@@ -4,12 +4,12 @@ from contextlib import asynccontextmanager
4
4
  from typing import Any
5
5
 
6
6
  import httpx
7
- from lightman_ai.core.exceptions import BaseHackermanError
7
+ from lightman_ai.core.exceptions import BaseLightmanError
8
8
 
9
9
  logger = logging.getLogger("lightman")
10
10
 
11
11
 
12
- class BaseServiceDeskError(BaseHackermanError):
12
+ class BaseServiceDeskError(BaseLightmanError):
13
13
  """Base exception for all SERVICE_DESK integration errors."""
14
14
 
15
15
 
@@ -24,17 +24,17 @@ def _classify_articles(articles: ArticlesList, agent: BaseAgent) -> SelectedArti
24
24
  def _create_service_desk_issues(
25
25
  selected_articles: list[SelectedArticle],
26
26
  service_desk_client: ServiceDeskIntegration,
27
- project_key: str,
28
- request_id_type: str,
27
+ service_desk_project_key: str,
28
+ service_desk_request_id_type: str,
29
29
  ) -> None:
30
30
  async def schedule_task(article: SelectedArticle) -> None:
31
31
  try:
32
32
  description = f"*Why is relevant:*\n{article.why_is_relevant}\n\n*Source:* {article.link}\n\n*Score:* {article.relevance_score}/10"
33
33
  await service_desk_client.create_request_of_type(
34
- project_key=project_key,
34
+ project_key=service_desk_project_key,
35
35
  summary=article.title,
36
36
  description=description,
37
- request_id_type=request_id_type,
37
+ request_id_type=service_desk_request_id_type,
38
38
  )
39
39
  logger.info("Created issue for article %s", article.link)
40
40
  except Exception:
@@ -57,8 +57,8 @@ def lightman(
57
57
  agent: str,
58
58
  prompt: str,
59
59
  score_threshold: int,
60
- project_key: str | None = None,
61
- request_id_type: str | None = None,
60
+ service_desk_project_key: str | None = None,
61
+ service_desk_request_id_type: str | None = None,
62
62
  dry_run: bool = False,
63
63
  model: str | None = None,
64
64
  start_date: datetime | None = None,
@@ -83,15 +83,15 @@ def lightman(
83
83
  logger.info("No articles found to be relevant. Total returned articles by AI %s", len(classified_articles))
84
84
 
85
85
  if not dry_run:
86
- if not project_key or not request_id_type:
86
+ if not service_desk_project_key or not service_desk_request_id_type:
87
87
  raise ValueError("Missing Service Desk's project key or request id type")
88
88
 
89
89
  service_desk_client = ServiceDeskIntegration.from_env()
90
90
  _create_service_desk_issues(
91
91
  selected_articles=selected_articles,
92
92
  service_desk_client=service_desk_client,
93
- project_key=project_key,
94
- request_id_type=request_id_type,
93
+ service_desk_project_key=service_desk_project_key,
94
+ service_desk_request_id_type=service_desk_request_id_type,
95
95
  )
96
96
 
97
97
  return selected_articles
@@ -0,0 +1,25 @@
1
+ from datetime import date, datetime, time, timedelta
2
+ from zoneinfo import ZoneInfo
3
+
4
+ from lightman_ai.core.settings import Settings
5
+ from lightman_ai.exceptions import MultipleDateSourcesError
6
+
7
+
8
+ def get_start_date(settings: Settings, yesterday: bool, today: bool, start_date: date | None) -> datetime | None:
9
+ mutually_exclusive_date_fields = [x for x in [start_date, today, yesterday] if x]
10
+
11
+ if len(mutually_exclusive_date_fields) > 1:
12
+ raise MultipleDateSourcesError(
13
+ "--today, --yesterday and --start-date are mutually exclusive. Set one at a time."
14
+ )
15
+
16
+ if today:
17
+ now = datetime.now(ZoneInfo(settings.TIME_ZONE))
18
+ return datetime.combine(now, time(0, 0), tzinfo=ZoneInfo(settings.TIME_ZONE))
19
+ elif yesterday:
20
+ yesterday_date = datetime.now(ZoneInfo(settings.TIME_ZONE)) - timedelta(days=1)
21
+ return datetime.combine(yesterday_date, time(0, 0), tzinfo=ZoneInfo(settings.TIME_ZONE))
22
+ elif isinstance(start_date, date):
23
+ return datetime.combine(start_date, time(0, 0), tzinfo=ZoneInfo(settings.TIME_ZONE))
24
+ else:
25
+ return None
@@ -1 +0,0 @@
1
- v0.21.2
@@ -1,4 +0,0 @@
1
- from lightman_ai.core.exceptions import BaseHackermanError
2
-
3
-
4
- class BaseAgentError(BaseHackermanError): ...
@@ -1,14 +0,0 @@
1
- from typing import Any
2
-
3
- from pydantic_settings import BaseSettings
4
-
5
-
6
- class Settings(BaseSettings):
7
- def __init__(self, *args: Any, **kwargs: Any) -> None:
8
- super().__init__(*args, **kwargs)
9
-
10
- PARALLEL_WORKERS: int = 5
11
- TIME_ZONE: str = "UTC"
12
-
13
-
14
- settings = Settings()
File without changes
File without changes