logdetective 0.6.0__py3-none-any.whl → 0.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
logdetective/prompts.yml CHANGED
@@ -4,7 +4,7 @@
4
4
  # The defaults are stored in constants.py
5
5
 
6
6
  prompt_template: |
7
- Given following log snippets, and nothing else, explain what failure, if any, occured during build of this package.
7
+ Given following log snippets, and nothing else, explain what failure, if any, occurred during build of this package.
8
8
 
9
9
  Analysis of the snippets must be in a format of [X] : [Y], where [X] is a log snippet, and [Y] is the explanation.
10
10
  Snippets themselves must not be altered in any way whatsoever.
@@ -44,15 +44,15 @@ snippet_prompt_template: |
44
44
  Analysis:
45
45
 
46
46
  prompt_template_staged: |
47
- Given following log snippets, their explanation, and nothing else, explain what failure, if any, occured during build of this package.
47
+ Given following log snippets, their explanation, and nothing else, explain what failure, if any, occurred during build of this package.
48
48
 
49
49
  Snippets are in a format of [X] : [Y], where [X] is a log snippet, and [Y] is the explanation.
50
50
 
51
51
  Snippets are delimited with '================'.
52
52
 
53
- Drawing on information from all snippets, provide complete explanation of the issue and recommend solution.
53
+ Drawing on information from all snippets, provide a concise explanation of the issue and recommend a solution.
54
54
 
55
- Explanation of the issue, and recommended solution, should take handful of sentences.
55
+ Explanation of the issue, and recommended solution, should take a handful of sentences.
56
56
 
57
57
  Snippets:
58
58
 
@@ -0,0 +1,144 @@
1
+ import io
2
+ import logging
3
+ import zipfile
4
+
5
+ from typing import Union, Dict
6
+ from logdetective.server.models import (
7
+ StagedResponse,
8
+ Response,
9
+ AnalyzedSnippet,
10
+ Explanation,
11
+ )
12
+
13
+
14
+ LOG = logging.getLogger("logdetective")
15
+
16
+
17
+ class TextCompressor:
18
+ """
19
+ Encapsulates one or more texts in one or more files with the specified names
20
+ and provides methods to retrieve them later.
21
+ """
22
+
23
+ def zip(self, items: Dict[str, str]) -> bytes:
24
+ """
25
+ Compress multiple texts into different files within a zip archive.
26
+
27
+ Args:
28
+ items: Dictionary where keys are file names and values are text content
29
+ to be compressed
30
+
31
+ Returns:
32
+ bytes: The compressed zip archive as bytes
33
+ """
34
+ zip_buffer = io.BytesIO()
35
+ with zipfile.ZipFile(zip_buffer, "w", zipfile.ZIP_DEFLATED) as zip_file:
36
+ for key, value in items.items():
37
+ zip_file.writestr(key, value)
38
+
39
+ zip_buffer.seek(0)
40
+ return zip_buffer.getvalue()
41
+
42
+ def unzip(self, zip_data: Union[bytes, io.BytesIO]) -> str:
43
+ """
44
+ Uncompress data created by TextCompressor.zip().
45
+
46
+ Args:
47
+ zip_data: A zipped stream of bytes or BytesIO object
48
+
49
+ Returns:
50
+ {file_name: str}: The decompressed content as a dict of file names and UTF-8 strings
51
+ """
52
+ if isinstance(zip_data, bytes):
53
+ zip_buffer = io.BytesIO(zip_data)
54
+ else:
55
+ zip_buffer = zip_data
56
+
57
+ content = {}
58
+ with zipfile.ZipFile(zip_buffer, "r") as zip_file:
59
+ file_list = zip_file.namelist()
60
+ for file_name in file_list:
61
+ content[file_name] = zip_file.read(file_name).decode("utf-8")
62
+
63
+ return content
64
+
65
+
66
+ class LLMResponseCompressor:
67
+ """
68
+ Handles compression and decompression of LLM responses.
69
+ """
70
+
71
+ EXPLANATION_FILE_NAME = "explanation.txt"
72
+ SNIPPET_FILE_NAME = "snippet_{number}.txt"
73
+ COMPRESSOR = TextCompressor()
74
+
75
+ def __init__(self, response: Union[StagedResponse, Response]):
76
+ """
77
+ Initialize with an LLM response.
78
+
79
+ Args:
80
+ response: Either a StagedResponse or Response object
81
+ """
82
+ self._response = response
83
+
84
+ def zip_response(self) -> bytes:
85
+ """
86
+ Compress the content of the LLM response.
87
+
88
+ Returns:
89
+ bytes: Compressed response as bytes
90
+ """
91
+ items = {
92
+ self.EXPLANATION_FILE_NAME: self._response.explanation.model_dump_json()
93
+ }
94
+
95
+ if isinstance(self._response, StagedResponse):
96
+ for i, snippet in enumerate(self._response.snippets):
97
+ items[self.SNIPPET_FILE_NAME.format(number=i)] = (
98
+ snippet.model_dump_json()
99
+ )
100
+
101
+ return self.COMPRESSOR.zip(items)
102
+
103
+ @classmethod
104
+ def unzip(
105
+ cls, zip_data: Union[bytes, io.BytesIO]
106
+ ) -> Union[StagedResponse, Response]:
107
+ """
108
+ Uncompress the zipped content of the LLM response.
109
+
110
+ Args:
111
+ zip_data: Compressed data as bytes or BytesIO
112
+
113
+ Returns:
114
+ Union[StagedResponse, Response]: The decompressed (partial) response object,
115
+ missing response_certainty.
116
+ """
117
+ items = cls.COMPRESSOR.unzip(zip_data)
118
+ if cls.EXPLANATION_FILE_NAME not in items:
119
+ raise KeyError(
120
+ f"Required file {cls.EXPLANATION_FILE_NAME} not found in zip archive"
121
+ )
122
+ explanation = Explanation.model_validate_json(items[cls.EXPLANATION_FILE_NAME])
123
+
124
+ snippets = []
125
+ snippet_files = {
126
+ k: v
127
+ for k, v in items.items()
128
+ if cls.SNIPPET_FILE_NAME.replace("{number}.txt", "") in k
129
+ }
130
+ for i in range(len(snippet_files)):
131
+ snippets.append(
132
+ AnalyzedSnippet.model_validate_json(
133
+ items[cls.SNIPPET_FILE_NAME.format(number=i)]
134
+ )
135
+ )
136
+
137
+ if snippets:
138
+ response = StagedResponse(
139
+ explanation=explanation, snippets=snippets, response_certainty=0
140
+ )
141
+ else:
142
+ response = Response(explanation=explanation, response_certainty=0)
143
+
144
+ return response
@@ -61,3 +61,6 @@ def destroy():
61
61
  """Destroy db"""
62
62
  Base.metadata.drop_all(engine)
63
63
  logger.warning("Database cleaned")
64
+
65
+
66
+ DB_MAX_RETRIES = 3 # How many times retry a db operation
@@ -0,0 +1,21 @@
1
+ from logdetective.server.database.base import Base
2
+ from logdetective.server.database.models.merge_request_jobs import (
3
+ Forge,
4
+ GitlabMergeRequestJobs,
5
+ Comments,
6
+ Reactions,
7
+ )
8
+ from logdetective.server.database.models.metrics import (
9
+ AnalyzeRequestMetrics,
10
+ EndpointType,
11
+ )
12
+
13
+ __all__ = [
14
+ Base.__name__,
15
+ GitlabMergeRequestJobs.__name__,
16
+ Comments.__name__,
17
+ Reactions.__name__,
18
+ AnalyzeRequestMetrics.__name__,
19
+ EndpointType.__name__,
20
+ Forge.__name__,
21
+ ]