logdetective 0.9.0__py3-none-any.whl → 0.10.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- logdetective/constants.py +4 -0
- logdetective/{server/remote_log.py → remote_log.py} +3 -43
- logdetective/server/compressors.py +49 -4
- logdetective/server/{utils.py → config.py} +12 -13
- logdetective/server/database/models/merge_request_jobs.py +79 -7
- logdetective/server/emoji.py +104 -0
- logdetective/server/gitlab.py +413 -0
- logdetective/server/llm.py +284 -0
- logdetective/server/metric.py +9 -8
- logdetective/server/models.py +78 -6
- logdetective/server/server.py +170 -637
- logdetective/utils.py +1 -1
- {logdetective-0.9.0.dist-info → logdetective-0.10.0.dist-info}/METADATA +3 -2
- logdetective-0.10.0.dist-info/RECORD +31 -0
- {logdetective-0.9.0.dist-info → logdetective-0.10.0.dist-info}/WHEEL +1 -1
- logdetective-0.9.0.dist-info/RECORD +0 -28
- {logdetective-0.9.0.dist-info → logdetective-0.10.0.dist-info}/LICENSE +0 -0
- {logdetective-0.9.0.dist-info → logdetective-0.10.0.dist-info}/entry_points.txt +0 -0
logdetective/server/metric.py
CHANGED
|
@@ -1,6 +1,5 @@
|
|
|
1
1
|
import io
|
|
2
2
|
import inspect
|
|
3
|
-
import logging
|
|
4
3
|
import datetime
|
|
5
4
|
|
|
6
5
|
from typing import Union
|
|
@@ -9,12 +8,11 @@ from functools import wraps
|
|
|
9
8
|
import aiohttp
|
|
10
9
|
|
|
11
10
|
from starlette.responses import StreamingResponse
|
|
12
|
-
from logdetective.server.database.models import EndpointType, AnalyzeRequestMetrics
|
|
13
|
-
from logdetective.server.remote_log import RemoteLog
|
|
14
11
|
from logdetective.server import models
|
|
15
|
-
from logdetective.
|
|
16
|
-
|
|
17
|
-
|
|
12
|
+
from logdetective.remote_log import RemoteLog
|
|
13
|
+
from logdetective.server.config import LOG
|
|
14
|
+
from logdetective.server.compressors import LLMResponseCompressor, RemoteLogCompressor
|
|
15
|
+
from logdetective.server.database.models import EndpointType, AnalyzeRequestMetrics
|
|
18
16
|
|
|
19
17
|
|
|
20
18
|
async def add_new_metrics(
|
|
@@ -31,7 +29,9 @@ async def add_new_metrics(
|
|
|
31
29
|
and the log (in a zip format) for which analysis is requested.
|
|
32
30
|
"""
|
|
33
31
|
remote_log = RemoteLog(url, http_session)
|
|
34
|
-
compressed_log_content =
|
|
32
|
+
compressed_log_content = (
|
|
33
|
+
compressed_log_content or await RemoteLogCompressor(remote_log).zip_content()
|
|
34
|
+
)
|
|
35
35
|
return AnalyzeRequestMetrics.create(
|
|
36
36
|
endpoint=EndpointType(api_name),
|
|
37
37
|
compressed_log=compressed_log_content,
|
|
@@ -58,7 +58,8 @@ def update_metrics(
|
|
|
58
58
|
compressed_response = None
|
|
59
59
|
LOG.warning(
|
|
60
60
|
"Given response can not be serialized "
|
|
61
|
-
"and saved in db (probably a StreamingResponse): %s.",
|
|
61
|
+
"and saved in db (probably a StreamingResponse): %s.",
|
|
62
|
+
e,
|
|
62
63
|
)
|
|
63
64
|
|
|
64
65
|
response_sent_at = (
|
logdetective/server/models.py
CHANGED
|
@@ -9,7 +9,15 @@ from pydantic import (
|
|
|
9
9
|
NonNegativeFloat,
|
|
10
10
|
HttpUrl,
|
|
11
11
|
)
|
|
12
|
-
|
|
12
|
+
|
|
13
|
+
from aiolimiter import AsyncLimiter
|
|
14
|
+
from gitlab import Gitlab
|
|
15
|
+
|
|
16
|
+
from logdetective.constants import (
|
|
17
|
+
DEFAULT_TEMPERATURE,
|
|
18
|
+
LLM_DEFAULT_MAX_QUEUE_SIZE,
|
|
19
|
+
LLM_DEFAULT_REQUESTS_PER_MINUTE,
|
|
20
|
+
)
|
|
13
21
|
|
|
14
22
|
|
|
15
23
|
class BuildLog(BaseModel):
|
|
@@ -46,6 +54,33 @@ class JobHook(BaseModel):
|
|
|
46
54
|
project_id: int
|
|
47
55
|
|
|
48
56
|
|
|
57
|
+
class EmojiMergeRequest(BaseModel):
|
|
58
|
+
"""Model of the 'merge_request' subsection of Emoji webhook messages.
|
|
59
|
+
This model implements only the fields that we care about. The webhook
|
|
60
|
+
sends many more fields that we will ignore."""
|
|
61
|
+
|
|
62
|
+
# The identifier of the target project
|
|
63
|
+
target_project_id: int
|
|
64
|
+
|
|
65
|
+
# The internal identifier (relative to the target project)
|
|
66
|
+
iid: int
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
class EmojiHook(BaseModel):
|
|
70
|
+
"""Model of Job Hook events sent from GitLab.
|
|
71
|
+
Full details of the specification are available at
|
|
72
|
+
https://docs.gitlab.com/user/project/integrations/webhook_events/#job-events
|
|
73
|
+
This model implements only the fields that we care about. The webhook
|
|
74
|
+
sends many more fields that we will ignore."""
|
|
75
|
+
|
|
76
|
+
# The kind of webhook message. We are only interested in 'emoji' messages
|
|
77
|
+
# which represents awarding or revoking emoji reactions on notes.
|
|
78
|
+
object_kind: str = Field(pattern=r"^emoji$")
|
|
79
|
+
|
|
80
|
+
# Information about the merge request this emoji applies to, if any.
|
|
81
|
+
merge_request: EmojiMergeRequest = Field(default=None)
|
|
82
|
+
|
|
83
|
+
|
|
49
84
|
class Explanation(BaseModel):
|
|
50
85
|
"""Model of snippet or general log explanation from Log Detective"""
|
|
51
86
|
|
|
@@ -92,7 +127,7 @@ class StagedResponse(Response):
|
|
|
92
127
|
snippets: List[AnalyzedSnippet]
|
|
93
128
|
|
|
94
129
|
|
|
95
|
-
class InferenceConfig(BaseModel):
|
|
130
|
+
class InferenceConfig(BaseModel): # pylint: disable=too-many-instance-attributes
|
|
96
131
|
"""Model for inference configuration of logdetective server."""
|
|
97
132
|
|
|
98
133
|
max_tokens: int = -1
|
|
@@ -104,6 +139,9 @@ class InferenceConfig(BaseModel):
|
|
|
104
139
|
api_token: str = ""
|
|
105
140
|
model: str = ""
|
|
106
141
|
temperature: NonNegativeFloat = DEFAULT_TEMPERATURE
|
|
142
|
+
max_queue_size: int = LLM_DEFAULT_MAX_QUEUE_SIZE
|
|
143
|
+
request_period: float = 60.0 / LLM_DEFAULT_REQUESTS_PER_MINUTE
|
|
144
|
+
_limiter: AsyncLimiter = AsyncLimiter(LLM_DEFAULT_REQUESTS_PER_MINUTE)
|
|
107
145
|
|
|
108
146
|
def __init__(self, data: Optional[dict] = None):
|
|
109
147
|
super().__init__()
|
|
@@ -117,6 +155,16 @@ class InferenceConfig(BaseModel):
|
|
|
117
155
|
self.api_token = data.get("api_token", "")
|
|
118
156
|
self.model = data.get("model", "default-model")
|
|
119
157
|
self.temperature = data.get("temperature", DEFAULT_TEMPERATURE)
|
|
158
|
+
self.max_queue_size = data.get("max_queue_size", LLM_DEFAULT_MAX_QUEUE_SIZE)
|
|
159
|
+
|
|
160
|
+
self._requests_per_minute = data.get(
|
|
161
|
+
"requests_per_minute", LLM_DEFAULT_REQUESTS_PER_MINUTE
|
|
162
|
+
)
|
|
163
|
+
self._limiter = AsyncLimiter(self._requests_per_minute)
|
|
164
|
+
|
|
165
|
+
def get_limiter(self):
|
|
166
|
+
"""Return the limiter object so it can be used as a context manager"""
|
|
167
|
+
return self._limiter
|
|
120
168
|
|
|
121
169
|
|
|
122
170
|
class ExtractorConfig(BaseModel):
|
|
@@ -136,26 +184,50 @@ class ExtractorConfig(BaseModel):
|
|
|
136
184
|
self.verbose = data.get("verbose", False)
|
|
137
185
|
|
|
138
186
|
|
|
139
|
-
class
|
|
187
|
+
class GitLabInstanceConfig(BaseModel):
|
|
140
188
|
"""Model for GitLab configuration of logdetective server."""
|
|
141
189
|
|
|
190
|
+
name: str = None
|
|
142
191
|
url: str = None
|
|
143
192
|
api_url: str = None
|
|
144
193
|
api_token: str = None
|
|
194
|
+
_conn: Gitlab = None
|
|
145
195
|
|
|
146
196
|
# Maximum size of artifacts.zip in MiB. (default: 300 MiB)
|
|
147
197
|
max_artifact_size: int = 300
|
|
148
198
|
|
|
149
|
-
def __init__(self, data: Optional[dict] = None):
|
|
199
|
+
def __init__(self, name: str, data: Optional[dict] = None):
|
|
150
200
|
super().__init__()
|
|
151
201
|
if data is None:
|
|
152
202
|
return
|
|
153
203
|
|
|
204
|
+
self.name = name
|
|
154
205
|
self.url = data.get("url", "https://gitlab.com")
|
|
155
206
|
self.api_url = f"{self.url}/api/v4"
|
|
156
207
|
self.api_token = data.get("api_token", None)
|
|
157
208
|
self.max_artifact_size = int(data.get("max_artifact_size")) * 1024 * 1024
|
|
158
209
|
|
|
210
|
+
self._conn = Gitlab(url=self.url, private_token=self.api_token)
|
|
211
|
+
|
|
212
|
+
def get_connection(self):
|
|
213
|
+
"""Get the Gitlab connection object"""
|
|
214
|
+
return self._conn
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
class GitLabConfig(BaseModel):
|
|
218
|
+
"""Model for GitLab configuration of logdetective server."""
|
|
219
|
+
|
|
220
|
+
instances: Dict[str, GitLabInstanceConfig] = {}
|
|
221
|
+
|
|
222
|
+
def __init__(self, data: Optional[dict] = None):
|
|
223
|
+
super().__init__()
|
|
224
|
+
if data is None:
|
|
225
|
+
return
|
|
226
|
+
|
|
227
|
+
for instance_name, instance_data in data.items():
|
|
228
|
+
instance = GitLabInstanceConfig(instance_name, instance_data)
|
|
229
|
+
self.instances[instance.url] = instance
|
|
230
|
+
|
|
159
231
|
|
|
160
232
|
class LogConfig(BaseModel):
|
|
161
233
|
"""Logging configuration"""
|
|
@@ -232,7 +304,7 @@ class TimePeriod(BaseModel):
|
|
|
232
304
|
@model_validator(mode="before")
|
|
233
305
|
@classmethod
|
|
234
306
|
def check_exclusive_fields(cls, data):
|
|
235
|
-
"""
|
|
307
|
+
"""Check that only one key between weeks, days and hours is defined"""
|
|
236
308
|
if isinstance(data, dict):
|
|
237
309
|
how_many_fields = sum(
|
|
238
310
|
1
|
|
@@ -284,6 +356,6 @@ class TimePeriod(BaseModel):
|
|
|
284
356
|
datetime.datetime: The start time of the period.
|
|
285
357
|
"""
|
|
286
358
|
time = end_time or datetime.datetime.now(datetime.timezone.utc)
|
|
287
|
-
if
|
|
359
|
+
if time.tzinfo is None:
|
|
288
360
|
end_time = end_time.replace(tzinfo=datetime.timezone.utc)
|
|
289
361
|
return time - self.get_time_period()
|