lightman_ai 0.22.0__tar.gz → 1.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lightman_ai-0.22.0/README.md → lightman_ai-1.1.0/PKG-INFO +65 -4
- lightman_ai-0.22.0/PKG-INFO → lightman_ai-1.1.0/README.md +47 -22
- lightman_ai-1.1.0/VERSION +1 -0
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/pyproject.toml +2 -1
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/ai/base/agent.py +12 -8
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/ai/gemini/agent.py +4 -3
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/ai/openai/agent.py +5 -4
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/cli.py +41 -9
- lightman_ai-1.1.0/src/lightman_ai/constants.py +12 -0
- lightman_ai-1.1.0/src/lightman_ai/core/sentry.py +32 -0
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/main.py +2 -6
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/sources/the_hacker_news.py +2 -0
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/utils.py +6 -7
- lightman_ai-0.22.0/VERSION +0 -1
- lightman_ai-0.22.0/src/lightman_ai/constants.py +0 -5
- lightman_ai-0.22.0/src/lightman_ai/core/sentry.py +0 -39
- lightman_ai-0.22.0/src/lightman_ai/core/settings.py +0 -30
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/.gitignore +0 -0
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/LICENSE +0 -0
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/__init__.py +0 -0
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/ai/base/__init__.py +0 -0
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/ai/base/exceptions.py +0 -0
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/ai/gemini/__init__.py +0 -0
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/ai/gemini/exceptions.py +0 -0
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/ai/openai/__init__.py +0 -0
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/ai/openai/exceptions.py +0 -0
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/ai/utils.py +0 -0
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/article/__init__.py +0 -0
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/article/exceptions.py +0 -0
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/article/models.py +0 -0
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/core/__init__.py +0 -0
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/core/config.py +0 -0
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/core/exceptions.py +0 -0
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/exceptions.py +0 -0
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/integrations/__init__.py +0 -0
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/integrations/service_desk/__init__.py +0 -0
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/integrations/service_desk/constants.py +0 -0
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/integrations/service_desk/exceptions.py +0 -0
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/integrations/service_desk/integration.py +0 -0
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/py.typed +0 -0
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/sources/base.py +0 -0
- {lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/sources/exceptions.py +0 -0
|
@@ -1,4 +1,29 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: lightman_ai
|
|
3
|
+
Version: 1.1.0
|
|
4
|
+
Summary: LLM-powered cybersecurity news aggregator.
|
|
5
|
+
Author-email: sdn4z <imsdn4z@gmail.com>, scastlara <s.cast.lara@gmail.com>
|
|
6
|
+
License-File: LICENSE
|
|
7
|
+
Requires-Python: <4,>=3.13
|
|
8
|
+
Requires-Dist: click<9.0.0,>=8.1.7
|
|
9
|
+
Requires-Dist: httpx<1.0.0,>=0.28.0
|
|
10
|
+
Requires-Dist: pydantic-ai-slim[google,openai]<2.0.0,>=1.0.0
|
|
11
|
+
Requires-Dist: pydantic-settings<3.0.0,>=2.9.1
|
|
12
|
+
Requires-Dist: python-dotenv<2.0.0,>=1.1.1
|
|
13
|
+
Requires-Dist: stamina<26.0.0,>=25.1.0
|
|
14
|
+
Requires-Dist: tomlkit<1.0.0,>=0.13.3
|
|
15
|
+
Provides-Extra: sentry
|
|
16
|
+
Requires-Dist: sentry-sdk<3.0.0,>=2.21.0; extra == 'sentry'
|
|
17
|
+
Description-Content-Type: text/markdown
|
|
18
|
+
|
|
1
19
|
# 🔍 Lightman AI
|
|
20
|
+

|
|
21
|
+
[](https://pypi.org/project/lightman-ai/)
|
|
22
|
+
[](https://hub.docker.com/r/elementsinteractive/lightman-ai)
|
|
23
|
+
[](https://pypi.org/project/lightman-ai/)
|
|
24
|
+
[](https://github.com/astral-sh/ruff)
|
|
25
|
+
[](LICENSE)
|
|
26
|
+
|
|
2
27
|
|
|
3
28
|
> LLM-Powered Cybersecurity News Intelligence Platform
|
|
4
29
|
|
|
@@ -74,19 +99,37 @@ Lightman AI is an intelligent cybersecurity news aggregation and risk assessment
|
|
|
74
99
|
agent = "openai"
|
|
75
100
|
score_threshold = 8
|
|
76
101
|
prompt = "development"
|
|
102
|
+
log_level = "INFO"
|
|
77
103
|
|
|
78
104
|
[prompts]
|
|
79
105
|
development = "Analyze cybersecurity news for relevance to our organization."' > lightman.toml
|
|
80
106
|
```
|
|
81
107
|
|
|
82
108
|
3. **Run with Docker**:
|
|
109
|
+
|
|
83
110
|
```bash
|
|
84
111
|
docker run --rm \
|
|
85
112
|
-v $(pwd)/lightman.toml:/app/lightman.toml \
|
|
86
113
|
-e OPENAI_API_KEY="your-api-key" \
|
|
87
114
|
elementsinteractive/lightman-ai:latest \
|
|
88
|
-
|
|
115
|
+
run --config-file /app/lightman.toml --score 8 --agent openai
|
|
116
|
+
```
|
|
117
|
+
|
|
118
|
+
You use a .env file instead of setting the environment variables through the cli
|
|
119
|
+
|
|
120
|
+
```bash
|
|
121
|
+
cp .env.example .env
|
|
89
122
|
```
|
|
123
|
+
|
|
124
|
+
Fill it with your values and run:
|
|
125
|
+
|
|
126
|
+
```bash
|
|
127
|
+
docker run --rm \
|
|
128
|
+
-v $(pwd)/lightman.toml:/app/lightman.toml \
|
|
129
|
+
--env-file .env \
|
|
130
|
+
elementsinteractive/lightman-ai:latest \
|
|
131
|
+
run --config-file /app/lightman.toml --score 8 --agent openai
|
|
132
|
+
```
|
|
90
133
|
|
|
91
134
|
## 🔧 Usage
|
|
92
135
|
|
|
@@ -105,6 +148,8 @@ Lightman AI is an intelligent cybersecurity news aggregation and risk assessment
|
|
|
105
148
|
| `--start-date` | Start date to retrieve articles | False |
|
|
106
149
|
| `--today` | Retrieve articles from today | False |
|
|
107
150
|
| `--yesterday` | Retrieve articles from yesterday | False |
|
|
151
|
+
| `-v` | Be more verbose on output | False |
|
|
152
|
+
|
|
108
153
|
|
|
109
154
|
### Environment Variables:
|
|
110
155
|
lightman-ai uses the following environment variables:
|
|
@@ -133,17 +178,33 @@ prompt = 'development' # Prompt template to use
|
|
|
133
178
|
service_desk_project_key = "SEC"
|
|
134
179
|
service_desk_request_id_type = "incident"
|
|
135
180
|
|
|
181
|
+
# alternative configuration
|
|
182
|
+
[malware]
|
|
183
|
+
agent = 'openai' # AI agent to use (openai, gemini)
|
|
184
|
+
score_threshold = 8 # Minimum relevance score (1-10)
|
|
185
|
+
prompt = 'malware' # Prompt template to use
|
|
186
|
+
|
|
187
|
+
# Optional: Service desk integration
|
|
188
|
+
service_desk_project_key = "SEC"
|
|
189
|
+
service_desk_request_id_type = "incident"
|
|
190
|
+
|
|
136
191
|
[prompts]
|
|
137
192
|
development = """
|
|
138
193
|
Analyze the following cybersecurity news articles and determine their relevance to our organization.
|
|
139
194
|
Rate each article from 1-10 based on potential impact and urgency.
|
|
140
|
-
Focus on
|
|
141
|
-
|
|
195
|
+
Focus on vulnerabilities."""
|
|
196
|
+
|
|
197
|
+
malware = """
|
|
198
|
+
Analyze the following cybersecurity news articles and determine their relevance to our organization.
|
|
199
|
+
Rate each article from 1-10 based on potential impact and urgency.
|
|
200
|
+
Focus on malware."""
|
|
142
201
|
|
|
143
202
|
custom_prompt = """
|
|
144
203
|
Your custom analysis prompt here...
|
|
145
204
|
"""
|
|
146
205
|
```
|
|
206
|
+
Note how it supports different configurations and prompts.
|
|
207
|
+
|
|
147
208
|
|
|
148
209
|
It also supports having separate files for your prompts and your configuration settings. Specify the path with `--prompt`.
|
|
149
210
|
|
|
@@ -200,7 +261,7 @@ lightman run --agent openai --score 8 --prompt security_critical --yesterday
|
|
|
200
261
|
|
|
201
262
|
### Development Installation
|
|
202
263
|
In order to fully use the provided setup for local development and testing, this project requires the following dependencies:
|
|
203
|
-
- Python 3.13
|
|
264
|
+
- [Python 3.13](https://www.python.org/downloads/release/python-3130/)
|
|
204
265
|
- [just](https://github.com/casey/just)
|
|
205
266
|
- [uv](https://docs.astral.sh/uv/getting-started/installation/)
|
|
206
267
|
|
|
@@ -1,22 +1,11 @@
|
|
|
1
|
-
Metadata-Version: 2.4
|
|
2
|
-
Name: lightman_ai
|
|
3
|
-
Version: 0.22.0
|
|
4
|
-
Summary: LLM-powered cybersecurity news aggregator.
|
|
5
|
-
Author-email: sdn4z <imsdn4z@gmail.com>
|
|
6
|
-
License-File: LICENSE
|
|
7
|
-
Requires-Python: <4,>=3.13
|
|
8
|
-
Requires-Dist: click<9.0.0,>=8.1.7
|
|
9
|
-
Requires-Dist: httpx<1.0.0,>=0.28.0
|
|
10
|
-
Requires-Dist: pydantic-ai-slim[google,openai]>=0.4.4
|
|
11
|
-
Requires-Dist: pydantic-settings<3.0.0,>=2.9.1
|
|
12
|
-
Requires-Dist: python-dotenv<2.0.0,>=1.1.1
|
|
13
|
-
Requires-Dist: stamina<26.0.0,>=25.1.0
|
|
14
|
-
Requires-Dist: tomlkit<1.0.0,>=0.13.3
|
|
15
|
-
Provides-Extra: sentry
|
|
16
|
-
Requires-Dist: sentry-sdk<3.0.0,>=2.21.0; extra == 'sentry'
|
|
17
|
-
Description-Content-Type: text/markdown
|
|
18
|
-
|
|
19
1
|
# 🔍 Lightman AI
|
|
2
|
+

|
|
3
|
+
[](https://pypi.org/project/lightman-ai/)
|
|
4
|
+
[](https://hub.docker.com/r/elementsinteractive/lightman-ai)
|
|
5
|
+
[](https://pypi.org/project/lightman-ai/)
|
|
6
|
+
[](https://github.com/astral-sh/ruff)
|
|
7
|
+
[](LICENSE)
|
|
8
|
+
|
|
20
9
|
|
|
21
10
|
> LLM-Powered Cybersecurity News Intelligence Platform
|
|
22
11
|
|
|
@@ -92,19 +81,37 @@ Lightman AI is an intelligent cybersecurity news aggregation and risk assessment
|
|
|
92
81
|
agent = "openai"
|
|
93
82
|
score_threshold = 8
|
|
94
83
|
prompt = "development"
|
|
84
|
+
log_level = "INFO"
|
|
95
85
|
|
|
96
86
|
[prompts]
|
|
97
87
|
development = "Analyze cybersecurity news for relevance to our organization."' > lightman.toml
|
|
98
88
|
```
|
|
99
89
|
|
|
100
90
|
3. **Run with Docker**:
|
|
91
|
+
|
|
101
92
|
```bash
|
|
102
93
|
docker run --rm \
|
|
103
94
|
-v $(pwd)/lightman.toml:/app/lightman.toml \
|
|
104
95
|
-e OPENAI_API_KEY="your-api-key" \
|
|
105
96
|
elementsinteractive/lightman-ai:latest \
|
|
106
|
-
|
|
97
|
+
run --config-file /app/lightman.toml --score 8 --agent openai
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
You use a .env file instead of setting the environment variables through the cli
|
|
101
|
+
|
|
102
|
+
```bash
|
|
103
|
+
cp .env.example .env
|
|
107
104
|
```
|
|
105
|
+
|
|
106
|
+
Fill it with your values and run:
|
|
107
|
+
|
|
108
|
+
```bash
|
|
109
|
+
docker run --rm \
|
|
110
|
+
-v $(pwd)/lightman.toml:/app/lightman.toml \
|
|
111
|
+
--env-file .env \
|
|
112
|
+
elementsinteractive/lightman-ai:latest \
|
|
113
|
+
run --config-file /app/lightman.toml --score 8 --agent openai
|
|
114
|
+
```
|
|
108
115
|
|
|
109
116
|
## 🔧 Usage
|
|
110
117
|
|
|
@@ -123,6 +130,8 @@ Lightman AI is an intelligent cybersecurity news aggregation and risk assessment
|
|
|
123
130
|
| `--start-date` | Start date to retrieve articles | False |
|
|
124
131
|
| `--today` | Retrieve articles from today | False |
|
|
125
132
|
| `--yesterday` | Retrieve articles from yesterday | False |
|
|
133
|
+
| `-v` | Be more verbose on output | False |
|
|
134
|
+
|
|
126
135
|
|
|
127
136
|
### Environment Variables:
|
|
128
137
|
lightman-ai uses the following environment variables:
|
|
@@ -151,17 +160,33 @@ prompt = 'development' # Prompt template to use
|
|
|
151
160
|
service_desk_project_key = "SEC"
|
|
152
161
|
service_desk_request_id_type = "incident"
|
|
153
162
|
|
|
163
|
+
# alternative configuration
|
|
164
|
+
[malware]
|
|
165
|
+
agent = 'openai' # AI agent to use (openai, gemini)
|
|
166
|
+
score_threshold = 8 # Minimum relevance score (1-10)
|
|
167
|
+
prompt = 'malware' # Prompt template to use
|
|
168
|
+
|
|
169
|
+
# Optional: Service desk integration
|
|
170
|
+
service_desk_project_key = "SEC"
|
|
171
|
+
service_desk_request_id_type = "incident"
|
|
172
|
+
|
|
154
173
|
[prompts]
|
|
155
174
|
development = """
|
|
156
175
|
Analyze the following cybersecurity news articles and determine their relevance to our organization.
|
|
157
176
|
Rate each article from 1-10 based on potential impact and urgency.
|
|
158
|
-
Focus on
|
|
159
|
-
|
|
177
|
+
Focus on vulnerabilities."""
|
|
178
|
+
|
|
179
|
+
malware = """
|
|
180
|
+
Analyze the following cybersecurity news articles and determine their relevance to our organization.
|
|
181
|
+
Rate each article from 1-10 based on potential impact and urgency.
|
|
182
|
+
Focus on malware."""
|
|
160
183
|
|
|
161
184
|
custom_prompt = """
|
|
162
185
|
Your custom analysis prompt here...
|
|
163
186
|
"""
|
|
164
187
|
```
|
|
188
|
+
Note how it supports different configurations and prompts.
|
|
189
|
+
|
|
165
190
|
|
|
166
191
|
It also supports having separate files for your prompts and your configuration settings. Specify the path with `--prompt`.
|
|
167
192
|
|
|
@@ -218,7 +243,7 @@ lightman run --agent openai --score 8 --prompt security_critical --yesterday
|
|
|
218
243
|
|
|
219
244
|
### Development Installation
|
|
220
245
|
In order to fully use the provided setup for local development and testing, this project requires the following dependencies:
|
|
221
|
-
- Python 3.13
|
|
246
|
+
- [Python 3.13](https://www.python.org/downloads/release/python-3130/)
|
|
222
247
|
- [just](https://github.com/casey/just)
|
|
223
248
|
- [uv](https://docs.astral.sh/uv/getting-started/installation/)
|
|
224
249
|
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
v1.1.0
|
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
[project]
|
|
3
3
|
authors = [
|
|
4
4
|
{name = "sdn4z", email = "imsdn4z@gmail.com"},
|
|
5
|
+
{name = "scastlara", email = "s.cast.lara@gmail.com"}
|
|
5
6
|
]
|
|
6
7
|
requires-python = "<4,>=3.13"
|
|
7
8
|
dependencies = [
|
|
@@ -11,7 +12,7 @@ dependencies = [
|
|
|
11
12
|
"stamina<26.0.0,>=25.1.0",
|
|
12
13
|
"pydantic-settings<3.0.0,>=2.9.1",
|
|
13
14
|
"tomlkit<1.0.0,>=0.13.3",
|
|
14
|
-
"pydantic-ai-slim[google,openai]>=0.
|
|
15
|
+
"pydantic-ai-slim[google,openai]>=1.0.0,<2.0.0",
|
|
15
16
|
]
|
|
16
17
|
name = "lightman_ai"
|
|
17
18
|
description = "LLM-powered cybersecurity news aggregator."
|
|
@@ -1,26 +1,30 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
from abc import ABC, abstractmethod
|
|
3
|
-
from typing import Never
|
|
3
|
+
from typing import ClassVar, Never, override
|
|
4
4
|
|
|
5
5
|
from lightman_ai.article.models import SelectedArticlesList
|
|
6
6
|
from pydantic_ai import Agent
|
|
7
7
|
from pydantic_ai.models.google import GoogleModel
|
|
8
|
-
from pydantic_ai.models.openai import
|
|
8
|
+
from pydantic_ai.models.openai import OpenAIChatModel
|
|
9
9
|
|
|
10
10
|
|
|
11
11
|
class BaseAgent(ABC):
|
|
12
|
-
|
|
13
|
-
|
|
12
|
+
_AGENT_CLASS: type[OpenAIChatModel] | type[GoogleModel]
|
|
13
|
+
_DEFAULT_MODEL_NAME: str
|
|
14
|
+
_AGENT_NAME: ClassVar[str]
|
|
14
15
|
|
|
15
16
|
def __init__(self, system_prompt: str, model: str | None = None, logger: logging.Logger | None = None) -> None:
|
|
16
|
-
|
|
17
|
+
selected_model = model or self._DEFAULT_MODEL_NAME
|
|
18
|
+
agent_model = self._AGENT_CLASS(selected_model)
|
|
17
19
|
self.agent: Agent[Never, SelectedArticlesList] = Agent(
|
|
18
20
|
model=agent_model, output_type=SelectedArticlesList, system_prompt=system_prompt
|
|
19
21
|
)
|
|
20
22
|
self.logger = logger or logging.getLogger("lightman")
|
|
23
|
+
self.logger.info("Selected %s's %s model", self, selected_model)
|
|
21
24
|
|
|
22
|
-
|
|
23
|
-
|
|
25
|
+
@override
|
|
26
|
+
def __str__(self) -> str:
|
|
27
|
+
return self._AGENT_NAME
|
|
24
28
|
|
|
25
29
|
@abstractmethod
|
|
26
|
-
def
|
|
30
|
+
def run_prompt(self, prompt: str) -> SelectedArticlesList: ...
|
|
@@ -9,11 +9,12 @@ from pydantic_ai.models.google import GoogleModel
|
|
|
9
9
|
class GeminiAgent(BaseAgent):
|
|
10
10
|
"""Class that provides an interface to operate with the Gemini model."""
|
|
11
11
|
|
|
12
|
-
|
|
13
|
-
|
|
12
|
+
_AGENT_CLASS = GoogleModel
|
|
13
|
+
_DEFAULT_MODEL_NAME = "gemini-2.5-pro"
|
|
14
|
+
_AGENT_NAME = "Gemini"
|
|
14
15
|
|
|
15
16
|
@override
|
|
16
|
-
def
|
|
17
|
+
def run_prompt(self, prompt: str) -> SelectedArticlesList:
|
|
17
18
|
with map_gemini_exceptions():
|
|
18
19
|
result = self.agent.run_sync(prompt)
|
|
19
20
|
return result.output
|
|
@@ -5,21 +5,22 @@ from lightman_ai.ai.base.agent import BaseAgent
|
|
|
5
5
|
from lightman_ai.ai.openai.exceptions import LimitTokensExceededError, map_openai_exceptions
|
|
6
6
|
from lightman_ai.article.models import SelectedArticlesList
|
|
7
7
|
from pydantic_ai.agent import AgentRunResult
|
|
8
|
-
from pydantic_ai.models.openai import
|
|
8
|
+
from pydantic_ai.models.openai import OpenAIChatModel
|
|
9
9
|
|
|
10
10
|
|
|
11
11
|
class OpenAIAgent(BaseAgent):
|
|
12
12
|
"""Class that provides an interface to operate with the OpenAI model."""
|
|
13
13
|
|
|
14
|
-
|
|
15
|
-
|
|
14
|
+
_AGENT_CLASS = OpenAIChatModel
|
|
15
|
+
_DEFAULT_MODEL_NAME = "gpt-4.1"
|
|
16
|
+
_AGENT_NAME = "OpenAI"
|
|
16
17
|
|
|
17
18
|
def _execute_agent(self, prompt: str) -> AgentRunResult[SelectedArticlesList]:
|
|
18
19
|
with map_openai_exceptions():
|
|
19
20
|
return self.agent.run_sync(prompt)
|
|
20
21
|
|
|
21
22
|
@override
|
|
22
|
-
def
|
|
23
|
+
def run_prompt(self, prompt: str) -> SelectedArticlesList:
|
|
23
24
|
try:
|
|
24
25
|
result = self._execute_agent(prompt)
|
|
25
26
|
except LimitTokensExceededError as err:
|
|
@@ -1,20 +1,33 @@
|
|
|
1
1
|
import logging
|
|
2
|
+
import os
|
|
2
3
|
from datetime import date
|
|
3
4
|
from importlib import metadata
|
|
4
5
|
|
|
5
6
|
import click
|
|
6
7
|
from dotenv import load_dotenv
|
|
7
8
|
from lightman_ai.ai.utils import AGENT_CHOICES
|
|
8
|
-
from lightman_ai.constants import
|
|
9
|
+
from lightman_ai.constants import (
|
|
10
|
+
DEFAULT_AGENT,
|
|
11
|
+
DEFAULT_CONFIG_FILE,
|
|
12
|
+
DEFAULT_CONFIG_SECTION,
|
|
13
|
+
DEFAULT_ENV_FILE,
|
|
14
|
+
DEFAULT_LOG_LEVEL,
|
|
15
|
+
DEFAULT_SCORE,
|
|
16
|
+
DEFAULT_TIME_ZONE,
|
|
17
|
+
VERBOSE_LOG_LEVEL,
|
|
18
|
+
)
|
|
9
19
|
from lightman_ai.core.config import FileConfig, FinalConfig, PromptConfig
|
|
10
20
|
from lightman_ai.core.exceptions import ConfigNotFoundError, InvalidConfigError, PromptNotFoundError
|
|
11
21
|
from lightman_ai.core.sentry import configure_sentry
|
|
12
|
-
from lightman_ai.core.settings import Settings
|
|
13
22
|
from lightman_ai.exceptions import MultipleDateSourcesError
|
|
14
23
|
from lightman_ai.main import lightman
|
|
15
24
|
from lightman_ai.utils import get_start_date
|
|
16
25
|
|
|
17
26
|
logger = logging.getLogger("lightman")
|
|
27
|
+
logging.basicConfig(
|
|
28
|
+
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
|
|
29
|
+
datefmt="%Y-%m-%d %H:%M:%S",
|
|
30
|
+
)
|
|
18
31
|
|
|
19
32
|
|
|
20
33
|
def get_version() -> str:
|
|
@@ -72,6 +85,7 @@ def entry_point() -> None:
|
|
|
72
85
|
@click.option("--start-date", type=click.DateTime(formats=["%Y-%m-%d"]), help="Start date to retrieve articles")
|
|
73
86
|
@click.option("--today", is_flag=True, help="Retrieve articles from today.")
|
|
74
87
|
@click.option("--yesterday", is_flag=True, help="Retrieve articles from yesterday.")
|
|
88
|
+
@click.option("-v", is_flag=True, help="Be more verbose on output.")
|
|
75
89
|
def run(
|
|
76
90
|
agent: str,
|
|
77
91
|
prompt: str,
|
|
@@ -85,18 +99,30 @@ def run(
|
|
|
85
99
|
start_date: date | None,
|
|
86
100
|
today: bool,
|
|
87
101
|
yesterday: bool,
|
|
102
|
+
v: bool,
|
|
88
103
|
) -> int:
|
|
89
104
|
"""
|
|
90
105
|
Entrypoint of the application.
|
|
91
106
|
|
|
92
107
|
Holds no logic. It loads the configuration, calls the main method and returns 0 when succesful .
|
|
93
108
|
"""
|
|
94
|
-
load_dotenv(env_file or DEFAULT_ENV_FILE)
|
|
95
|
-
|
|
109
|
+
load_dotenv(env_file or DEFAULT_ENV_FILE)
|
|
110
|
+
|
|
111
|
+
if v:
|
|
112
|
+
logger.setLevel(VERBOSE_LOG_LEVEL)
|
|
113
|
+
else:
|
|
114
|
+
try:
|
|
115
|
+
env_log_level = os.getenv("LOG_LEVEL")
|
|
116
|
+
log_level = env_log_level.upper() if env_log_level else DEFAULT_LOG_LEVEL
|
|
117
|
+
logger.setLevel(log_level)
|
|
118
|
+
except ValueError:
|
|
119
|
+
logger.setLevel(DEFAULT_LOG_LEVEL)
|
|
120
|
+
logger.warning("Invalid logging level. Using default value.")
|
|
121
|
+
|
|
122
|
+
configure_sentry(logger.level)
|
|
96
123
|
|
|
97
|
-
settings = Settings.try_load_from_file(env_file)
|
|
98
124
|
try:
|
|
99
|
-
start_datetime = get_start_date(
|
|
125
|
+
start_datetime = get_start_date(os.getenv("TIME_ZONE", DEFAULT_TIME_ZONE), yesterday, today, start_date)
|
|
100
126
|
except MultipleDateSourcesError as e:
|
|
101
127
|
raise click.UsageError(e.args[0]) from e
|
|
102
128
|
|
|
@@ -105,9 +131,9 @@ def run(
|
|
|
105
131
|
config_from_file = FileConfig.get_config_from_file(config_section=config, path=config_file)
|
|
106
132
|
final_config = FinalConfig.init_from_dict(
|
|
107
133
|
data={
|
|
108
|
-
"agent": agent or config_from_file.agent or
|
|
134
|
+
"agent": agent or config_from_file.agent or DEFAULT_AGENT,
|
|
109
135
|
"prompt": prompt or config_from_file.prompt,
|
|
110
|
-
"score_threshold": score or config_from_file.score_threshold or
|
|
136
|
+
"score_threshold": score or config_from_file.score_threshold or DEFAULT_SCORE,
|
|
111
137
|
"model": model or config_from_file.model,
|
|
112
138
|
}
|
|
113
139
|
)
|
|
@@ -126,5 +152,11 @@ def run(
|
|
|
126
152
|
start_date=start_datetime,
|
|
127
153
|
)
|
|
128
154
|
relevant_articles_metadata = [f"{article.title} ({article.link})" for article in relevant_articles]
|
|
129
|
-
|
|
155
|
+
|
|
156
|
+
if relevant_articles_metadata:
|
|
157
|
+
articles = f"Found these articles:\n* {'\n* '.join(relevant_articles_metadata)} "
|
|
158
|
+
click.echo(click.style(articles))
|
|
159
|
+
else:
|
|
160
|
+
click.echo(click.style("No relevant articles found."))
|
|
161
|
+
|
|
130
162
|
return 0
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import os
|
|
3
|
+
from importlib import metadata
|
|
4
|
+
|
|
5
|
+
logger = logging.getLogger("lightman")
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def configure_sentry(log_level: int) -> None:
|
|
9
|
+
"""Configure Sentry for error tracking."""
|
|
10
|
+
try:
|
|
11
|
+
import sentry_sdk # noqa: PLC0415
|
|
12
|
+
from sentry_sdk.integrations.logging import LoggingIntegration # noqa: PLC0415
|
|
13
|
+
except ImportError:
|
|
14
|
+
if os.getenv("SENTRY_DSN"):
|
|
15
|
+
logger.warning(
|
|
16
|
+
"Could not initialize sentry, it is not installed! Add it by installing the project with `lightman-ai[sentry]`."
|
|
17
|
+
)
|
|
18
|
+
return
|
|
19
|
+
|
|
20
|
+
if not os.getenv("SENTRY_DSN"):
|
|
21
|
+
logger.warning("SENTRY_DSN not configured, skipping Sentry initialization")
|
|
22
|
+
return
|
|
23
|
+
|
|
24
|
+
try:
|
|
25
|
+
sentry_logging = LoggingIntegration(level=logging.INFO, event_level=log_level)
|
|
26
|
+
|
|
27
|
+
sentry_sdk.init(
|
|
28
|
+
release=metadata.version("lightman-ai"),
|
|
29
|
+
integrations=[sentry_logging],
|
|
30
|
+
)
|
|
31
|
+
except Exception as e:
|
|
32
|
+
logger.warning("Could not instantiate Sentry! %s.\nContinuing with the execution.", e)
|
|
@@ -11,6 +11,7 @@ from lightman_ai.integrations.service_desk.integration import (
|
|
|
11
11
|
from lightman_ai.sources.the_hacker_news import TheHackerNewsSource
|
|
12
12
|
|
|
13
13
|
logger = logging.getLogger("lightman")
|
|
14
|
+
logger.addHandler(logging.NullHandler())
|
|
14
15
|
|
|
15
16
|
|
|
16
17
|
def _get_articles_from_source(start_date: datetime | None = None) -> ArticlesList:
|
|
@@ -18,7 +19,7 @@ def _get_articles_from_source(start_date: datetime | None = None) -> ArticlesLis
|
|
|
18
19
|
|
|
19
20
|
|
|
20
21
|
def _classify_articles(articles: ArticlesList, agent: BaseAgent) -> SelectedArticlesList:
|
|
21
|
-
return agent.
|
|
22
|
+
return agent.run_prompt(prompt=str(articles))
|
|
22
23
|
|
|
23
24
|
|
|
24
25
|
def _create_service_desk_issues(
|
|
@@ -67,7 +68,6 @@ def lightman(
|
|
|
67
68
|
|
|
68
69
|
agent_class = get_agent_class_from_agent_name(agent)
|
|
69
70
|
agent_instance = agent_class(prompt, model, logger=logger)
|
|
70
|
-
logger.info("Selected %s.", agent_instance)
|
|
71
71
|
|
|
72
72
|
classified_articles = _classify_articles(
|
|
73
73
|
articles=articles,
|
|
@@ -77,10 +77,6 @@ def lightman(
|
|
|
77
77
|
selected_articles: list[SelectedArticle] = classified_articles.get_articles_with_score_gte_threshold(
|
|
78
78
|
score_threshold
|
|
79
79
|
)
|
|
80
|
-
if selected_articles:
|
|
81
|
-
logger.info("Found these articles: %s", selected_articles)
|
|
82
|
-
else:
|
|
83
|
-
logger.info("No articles found to be relevant. Total returned articles by AI %s", len(classified_articles))
|
|
84
80
|
|
|
85
81
|
if not dry_run:
|
|
86
82
|
if not service_desk_project_key or not service_desk_request_id_type:
|
|
@@ -25,8 +25,10 @@ class TheHackerNewsSource(BaseSource):
|
|
|
25
25
|
@override
|
|
26
26
|
def get_articles(self, date: datetime | None = None) -> ArticlesList:
|
|
27
27
|
"""Return the articles that are present in THN feed."""
|
|
28
|
+
logger.info("Downloading articles from %s", THN_URL)
|
|
28
29
|
feed = self.get_feed()
|
|
29
30
|
articles = self._xml_to_list_of_articles(feed)
|
|
31
|
+
logger.info("Articles properly downloaded and parsed.")
|
|
30
32
|
if date:
|
|
31
33
|
return ArticlesList.get_articles_from_date_onwards(articles=articles, start_date=date)
|
|
32
34
|
else:
|
|
@@ -1,11 +1,10 @@
|
|
|
1
1
|
from datetime import date, datetime, time, timedelta
|
|
2
2
|
from zoneinfo import ZoneInfo
|
|
3
3
|
|
|
4
|
-
from lightman_ai.core.settings import Settings
|
|
5
4
|
from lightman_ai.exceptions import MultipleDateSourcesError
|
|
6
5
|
|
|
7
6
|
|
|
8
|
-
def get_start_date(
|
|
7
|
+
def get_start_date(time_zone: str, yesterday: bool, today: bool, start_date: date | None) -> datetime | None:
|
|
9
8
|
mutually_exclusive_date_fields = [x for x in [start_date, today, yesterday] if x]
|
|
10
9
|
|
|
11
10
|
if len(mutually_exclusive_date_fields) > 1:
|
|
@@ -14,12 +13,12 @@ def get_start_date(settings: Settings, yesterday: bool, today: bool, start_date:
|
|
|
14
13
|
)
|
|
15
14
|
|
|
16
15
|
if today:
|
|
17
|
-
now = datetime.now(ZoneInfo(
|
|
18
|
-
return datetime.combine(now, time(0, 0), tzinfo=ZoneInfo(
|
|
16
|
+
now = datetime.now(ZoneInfo(time_zone))
|
|
17
|
+
return datetime.combine(now, time(0, 0), tzinfo=ZoneInfo(time_zone))
|
|
19
18
|
elif yesterday:
|
|
20
|
-
yesterday_date = datetime.now(ZoneInfo(
|
|
21
|
-
return datetime.combine(yesterday_date, time(0, 0), tzinfo=ZoneInfo(
|
|
19
|
+
yesterday_date = datetime.now(ZoneInfo(time_zone)) - timedelta(days=1)
|
|
20
|
+
return datetime.combine(yesterday_date, time(0, 0), tzinfo=ZoneInfo(time_zone))
|
|
22
21
|
elif isinstance(start_date, date):
|
|
23
|
-
return datetime.combine(start_date, time(0, 0), tzinfo=ZoneInfo(
|
|
22
|
+
return datetime.combine(start_date, time(0, 0), tzinfo=ZoneInfo(time_zone))
|
|
24
23
|
else:
|
|
25
24
|
return None
|
lightman_ai-0.22.0/VERSION
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
v0.22.0
|
|
@@ -1,39 +0,0 @@
|
|
|
1
|
-
import logging
|
|
2
|
-
import os
|
|
3
|
-
from importlib import metadata
|
|
4
|
-
|
|
5
|
-
logger = logging.getLogger("lightman")
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
def configure_sentry() -> None:
|
|
9
|
-
"""Configure Sentry for error tracking."""
|
|
10
|
-
try:
|
|
11
|
-
import sentry_sdk
|
|
12
|
-
from sentry_sdk.integrations.logging import LoggingIntegration
|
|
13
|
-
except ImportError:
|
|
14
|
-
logger.warning(
|
|
15
|
-
"Could not initialize sentry, it is not installed! Add it by installing the project with `lightman-ai[sentry]`."
|
|
16
|
-
)
|
|
17
|
-
return
|
|
18
|
-
|
|
19
|
-
try:
|
|
20
|
-
if not os.getenv("SENTRY_DSN"):
|
|
21
|
-
logger.info("SENTRY_DSN not configured, skipping Sentry initialization")
|
|
22
|
-
return
|
|
23
|
-
|
|
24
|
-
logging_level_str = os.getenv("LOGGING_LEVEL", "ERROR").upper()
|
|
25
|
-
try:
|
|
26
|
-
logging_level = getattr(logging, logging_level_str, logging.ERROR)
|
|
27
|
-
except AttributeError:
|
|
28
|
-
logger.warning("The specified logging level `%s` does not exist. Defaulting to ERROR.", logging_level_str)
|
|
29
|
-
logging_level = logging.ERROR
|
|
30
|
-
|
|
31
|
-
# Set up logging integration
|
|
32
|
-
sentry_logging = LoggingIntegration(level=logging.INFO, event_level=logging_level)
|
|
33
|
-
|
|
34
|
-
sentry_sdk.init(
|
|
35
|
-
release=metadata.version("lightman-ai"),
|
|
36
|
-
integrations=[sentry_logging],
|
|
37
|
-
)
|
|
38
|
-
except Exception as e:
|
|
39
|
-
logger.warning("Could not instantiate Sentry! %s.\nContinuing with the execution.", e)
|
|
@@ -1,30 +0,0 @@
|
|
|
1
|
-
import logging
|
|
2
|
-
from pathlib import Path
|
|
3
|
-
from typing import Any, Self
|
|
4
|
-
|
|
5
|
-
from pydantic_settings import BaseSettings, SettingsConfigDict
|
|
6
|
-
|
|
7
|
-
logger = logging.getLogger("lightman")
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
class Settings(BaseSettings):
|
|
11
|
-
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
|
12
|
-
super().__init__(*args, **kwargs)
|
|
13
|
-
|
|
14
|
-
AGENT: str = "openai"
|
|
15
|
-
SCORE: int = 8
|
|
16
|
-
TIME_ZONE: str = "UTC"
|
|
17
|
-
model_config = SettingsConfigDict(extra="ignore")
|
|
18
|
-
|
|
19
|
-
@classmethod
|
|
20
|
-
def try_load_from_file(cls, env_file: str | None = None) -> Self:
|
|
21
|
-
"""
|
|
22
|
-
Initialize Settings class and returns an instance.
|
|
23
|
-
|
|
24
|
-
It tries to load env variables from the env file. Variables set in the environment take precendence.
|
|
25
|
-
|
|
26
|
-
If the env file is not present, it continues execution, following pydantic-settings' behaviour.
|
|
27
|
-
"""
|
|
28
|
-
if env_file and not Path(env_file).exists():
|
|
29
|
-
logger.warning("env file `%s` not found.", env_file)
|
|
30
|
-
return cls(_env_file=env_file)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/integrations/service_desk/__init__.py
RENAMED
|
File without changes
|
{lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/integrations/service_desk/constants.py
RENAMED
|
File without changes
|
{lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/integrations/service_desk/exceptions.py
RENAMED
|
File without changes
|
{lightman_ai-0.22.0 → lightman_ai-1.1.0}/src/lightman_ai/integrations/service_desk/integration.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|