logdetective 0.5.11__py3-none-any.whl → 0.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- logdetective/logdetective.py +17 -8
- logdetective/prompts.yml +4 -4
- logdetective/server/compressors.py +144 -0
- logdetective/server/database/base.py +3 -0
- logdetective/server/database/models/__init__.py +21 -0
- logdetective/server/database/models/merge_request_jobs.py +515 -0
- logdetective/server/database/{models.py → models/metrics.py} +105 -100
- logdetective/server/metric.py +40 -16
- logdetective/server/models.py +12 -3
- logdetective/server/remote_log.py +109 -0
- logdetective/server/server.py +370 -176
- logdetective/utils.py +12 -22
- {logdetective-0.5.11.dist-info → logdetective-0.9.0.dist-info}/METADATA +12 -7
- logdetective-0.9.0.dist-info/RECORD +28 -0
- logdetective-0.5.11.dist-info/RECORD +0 -24
- {logdetective-0.5.11.dist-info → logdetective-0.9.0.dist-info}/LICENSE +0 -0
- {logdetective-0.5.11.dist-info → logdetective-0.9.0.dist-info}/WHEEL +0 -0
- {logdetective-0.5.11.dist-info → logdetective-0.9.0.dist-info}/entry_points.txt +0 -0
logdetective/logdetective.py
CHANGED
|
@@ -1,8 +1,11 @@
|
|
|
1
1
|
import argparse
|
|
2
|
+
import asyncio
|
|
2
3
|
import logging
|
|
3
4
|
import sys
|
|
4
5
|
import os
|
|
5
6
|
|
|
7
|
+
import aiohttp
|
|
8
|
+
|
|
6
9
|
from logdetective.constants import DEFAULT_ADVISOR, DEFAULT_TEMPERATURE
|
|
7
10
|
from logdetective.utils import (
|
|
8
11
|
process_log,
|
|
@@ -82,7 +85,7 @@ def setup_args():
|
|
|
82
85
|
return parser.parse_args()
|
|
83
86
|
|
|
84
87
|
|
|
85
|
-
def
|
|
88
|
+
async def run(): # pylint: disable=too-many-statements,too-many-locals
|
|
86
89
|
"""Main execution function."""
|
|
87
90
|
args = setup_args()
|
|
88
91
|
|
|
@@ -128,13 +131,14 @@ def main(): # pylint: disable=too-many-statements,too-many-locals
|
|
|
128
131
|
|
|
129
132
|
LOG.info("Getting summary")
|
|
130
133
|
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
134
|
+
async with aiohttp.ClientSession() as http:
|
|
135
|
+
try:
|
|
136
|
+
log = await retrieve_log_content(http, args.file)
|
|
137
|
+
except ValueError as e:
|
|
138
|
+
# file does not exist
|
|
139
|
+
LOG.error(e)
|
|
140
|
+
sys.exit(4)
|
|
141
|
+
log_summary = extractor(log)
|
|
138
142
|
|
|
139
143
|
ratio = len(log_summary) / len(log.split("\n"))
|
|
140
144
|
|
|
@@ -182,5 +186,10 @@ def main(): # pylint: disable=too-many-statements,too-many-locals
|
|
|
182
186
|
print(f"\nResponse certainty: {certainty:.2f}%\n")
|
|
183
187
|
|
|
184
188
|
|
|
189
|
+
def main():
|
|
190
|
+
""" Evaluate logdetective program and wait for it to finish """
|
|
191
|
+
asyncio.run(run())
|
|
192
|
+
|
|
193
|
+
|
|
185
194
|
if __name__ == "__main__":
|
|
186
195
|
main()
|
logdetective/prompts.yml
CHANGED
|
@@ -4,7 +4,7 @@
|
|
|
4
4
|
# The defaults are stored in constants.py
|
|
5
5
|
|
|
6
6
|
prompt_template: |
|
|
7
|
-
Given following log snippets, and nothing else, explain what failure, if any,
|
|
7
|
+
Given following log snippets, and nothing else, explain what failure, if any, occurred during build of this package.
|
|
8
8
|
|
|
9
9
|
Analysis of the snippets must be in a format of [X] : [Y], where [X] is a log snippet, and [Y] is the explanation.
|
|
10
10
|
Snippets themselves must not be altered in any way whatsoever.
|
|
@@ -44,15 +44,15 @@ snippet_prompt_template: |
|
|
|
44
44
|
Analysis:
|
|
45
45
|
|
|
46
46
|
prompt_template_staged: |
|
|
47
|
-
Given following log snippets, their explanation, and nothing else, explain what failure, if any,
|
|
47
|
+
Given following log snippets, their explanation, and nothing else, explain what failure, if any, occurred during build of this package.
|
|
48
48
|
|
|
49
49
|
Snippets are in a format of [X] : [Y], where [X] is a log snippet, and [Y] is the explanation.
|
|
50
50
|
|
|
51
51
|
Snippets are delimited with '================'.
|
|
52
52
|
|
|
53
|
-
Drawing on information from all snippets, provide
|
|
53
|
+
Drawing on information from all snippets, provide a concise explanation of the issue and recommend a solution.
|
|
54
54
|
|
|
55
|
-
Explanation of the issue, and recommended solution, should take handful of sentences.
|
|
55
|
+
Explanation of the issue, and recommended solution, should take a handful of sentences.
|
|
56
56
|
|
|
57
57
|
Snippets:
|
|
58
58
|
|
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
import io
|
|
2
|
+
import logging
|
|
3
|
+
import zipfile
|
|
4
|
+
|
|
5
|
+
from typing import Union, Dict
|
|
6
|
+
from logdetective.server.models import (
|
|
7
|
+
StagedResponse,
|
|
8
|
+
Response,
|
|
9
|
+
AnalyzedSnippet,
|
|
10
|
+
Explanation,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
LOG = logging.getLogger("logdetective")
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class TextCompressor:
|
|
18
|
+
"""
|
|
19
|
+
Encapsulates one or more texts in one or more files with the specified names
|
|
20
|
+
and provides methods to retrieve them later.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
def zip(self, items: Dict[str, str]) -> bytes:
|
|
24
|
+
"""
|
|
25
|
+
Compress multiple texts into different files within a zip archive.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
items: Dictionary where keys are file names and values are text content
|
|
29
|
+
to be compressed
|
|
30
|
+
|
|
31
|
+
Returns:
|
|
32
|
+
bytes: The compressed zip archive as bytes
|
|
33
|
+
"""
|
|
34
|
+
zip_buffer = io.BytesIO()
|
|
35
|
+
with zipfile.ZipFile(zip_buffer, "w", zipfile.ZIP_DEFLATED) as zip_file:
|
|
36
|
+
for key, value in items.items():
|
|
37
|
+
zip_file.writestr(key, value)
|
|
38
|
+
|
|
39
|
+
zip_buffer.seek(0)
|
|
40
|
+
return zip_buffer.getvalue()
|
|
41
|
+
|
|
42
|
+
def unzip(self, zip_data: Union[bytes, io.BytesIO]) -> str:
|
|
43
|
+
"""
|
|
44
|
+
Uncompress data created by TextCompressor.zip().
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
zip_data: A zipped stream of bytes or BytesIO object
|
|
48
|
+
|
|
49
|
+
Returns:
|
|
50
|
+
{file_name: str}: The decompressed content as a dict of file names and UTF-8 strings
|
|
51
|
+
"""
|
|
52
|
+
if isinstance(zip_data, bytes):
|
|
53
|
+
zip_buffer = io.BytesIO(zip_data)
|
|
54
|
+
else:
|
|
55
|
+
zip_buffer = zip_data
|
|
56
|
+
|
|
57
|
+
content = {}
|
|
58
|
+
with zipfile.ZipFile(zip_buffer, "r") as zip_file:
|
|
59
|
+
file_list = zip_file.namelist()
|
|
60
|
+
for file_name in file_list:
|
|
61
|
+
content[file_name] = zip_file.read(file_name).decode("utf-8")
|
|
62
|
+
|
|
63
|
+
return content
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class LLMResponseCompressor:
|
|
67
|
+
"""
|
|
68
|
+
Handles compression and decompression of LLM responses.
|
|
69
|
+
"""
|
|
70
|
+
|
|
71
|
+
EXPLANATION_FILE_NAME = "explanation.txt"
|
|
72
|
+
SNIPPET_FILE_NAME = "snippet_{number}.txt"
|
|
73
|
+
COMPRESSOR = TextCompressor()
|
|
74
|
+
|
|
75
|
+
def __init__(self, response: Union[StagedResponse, Response]):
|
|
76
|
+
"""
|
|
77
|
+
Initialize with an LLM response.
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
response: Either a StagedResponse or Response object
|
|
81
|
+
"""
|
|
82
|
+
self._response = response
|
|
83
|
+
|
|
84
|
+
def zip_response(self) -> bytes:
|
|
85
|
+
"""
|
|
86
|
+
Compress the content of the LLM response.
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
bytes: Compressed response as bytes
|
|
90
|
+
"""
|
|
91
|
+
items = {
|
|
92
|
+
self.EXPLANATION_FILE_NAME: self._response.explanation.model_dump_json()
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
if isinstance(self._response, StagedResponse):
|
|
96
|
+
for i, snippet in enumerate(self._response.snippets):
|
|
97
|
+
items[self.SNIPPET_FILE_NAME.format(number=i)] = (
|
|
98
|
+
snippet.model_dump_json()
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
return self.COMPRESSOR.zip(items)
|
|
102
|
+
|
|
103
|
+
@classmethod
|
|
104
|
+
def unzip(
|
|
105
|
+
cls, zip_data: Union[bytes, io.BytesIO]
|
|
106
|
+
) -> Union[StagedResponse, Response]:
|
|
107
|
+
"""
|
|
108
|
+
Uncompress the zipped content of the LLM response.
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
zip_data: Compressed data as bytes or BytesIO
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
Union[StagedResponse, Response]: The decompressed (partial) response object,
|
|
115
|
+
missing response_certainty.
|
|
116
|
+
"""
|
|
117
|
+
items = cls.COMPRESSOR.unzip(zip_data)
|
|
118
|
+
if cls.EXPLANATION_FILE_NAME not in items:
|
|
119
|
+
raise KeyError(
|
|
120
|
+
f"Required file {cls.EXPLANATION_FILE_NAME} not found in zip archive"
|
|
121
|
+
)
|
|
122
|
+
explanation = Explanation.model_validate_json(items[cls.EXPLANATION_FILE_NAME])
|
|
123
|
+
|
|
124
|
+
snippets = []
|
|
125
|
+
snippet_files = {
|
|
126
|
+
k: v
|
|
127
|
+
for k, v in items.items()
|
|
128
|
+
if cls.SNIPPET_FILE_NAME.replace("{number}.txt", "") in k
|
|
129
|
+
}
|
|
130
|
+
for i in range(len(snippet_files)):
|
|
131
|
+
snippets.append(
|
|
132
|
+
AnalyzedSnippet.model_validate_json(
|
|
133
|
+
items[cls.SNIPPET_FILE_NAME.format(number=i)]
|
|
134
|
+
)
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
if snippets:
|
|
138
|
+
response = StagedResponse(
|
|
139
|
+
explanation=explanation, snippets=snippets, response_certainty=0
|
|
140
|
+
)
|
|
141
|
+
else:
|
|
142
|
+
response = Response(explanation=explanation, response_certainty=0)
|
|
143
|
+
|
|
144
|
+
return response
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
from logdetective.server.database.base import Base
|
|
2
|
+
from logdetective.server.database.models.merge_request_jobs import (
|
|
3
|
+
Forge,
|
|
4
|
+
GitlabMergeRequestJobs,
|
|
5
|
+
Comments,
|
|
6
|
+
Reactions,
|
|
7
|
+
)
|
|
8
|
+
from logdetective.server.database.models.metrics import (
|
|
9
|
+
AnalyzeRequestMetrics,
|
|
10
|
+
EndpointType,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
__all__ = [
|
|
14
|
+
Base.__name__,
|
|
15
|
+
GitlabMergeRequestJobs.__name__,
|
|
16
|
+
Comments.__name__,
|
|
17
|
+
Reactions.__name__,
|
|
18
|
+
AnalyzeRequestMetrics.__name__,
|
|
19
|
+
EndpointType.__name__,
|
|
20
|
+
Forge.__name__,
|
|
21
|
+
]
|