logdetective 0.2.5__tar.gz → 0.2.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {logdetective-0.2.5 → logdetective-0.2.6}/PKG-INFO +17 -2
- {logdetective-0.2.5 → logdetective-0.2.6}/README.md +16 -1
- {logdetective-0.2.5 → logdetective-0.2.6}/logdetective/constants.py +1 -1
- {logdetective-0.2.5 → logdetective-0.2.6}/logdetective/logdetective.py +22 -5
- logdetective-0.2.6/logdetective/server.py +87 -0
- {logdetective-0.2.5 → logdetective-0.2.6}/logdetective/utils.py +26 -3
- {logdetective-0.2.5 → logdetective-0.2.6}/pyproject.toml +1 -1
- logdetective-0.2.5/logdetective/server.py +0 -56
- {logdetective-0.2.5 → logdetective-0.2.6}/LICENSE +0 -0
- {logdetective-0.2.5 → logdetective-0.2.6}/logdetective/__init__.py +0 -0
- {logdetective-0.2.5 → logdetective-0.2.6}/logdetective/drain3.ini +0 -0
- {logdetective-0.2.5 → logdetective-0.2.6}/logdetective/extractors.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: logdetective
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.6
|
|
4
4
|
Summary: Log using LLM AI to search for build/test failures and provide ideas for fixing these.
|
|
5
5
|
License: Apache-2.0
|
|
6
6
|
Author: Jiri Podivin
|
|
@@ -83,6 +83,10 @@ Example you want to use a different model:
|
|
|
83
83
|
logdetective https://example.com/logs.txt --model https://huggingface.co/QuantFactory/Meta-Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct.Q5_K_S.gguf?download=true
|
|
84
84
|
logdetective https://example.com/logs.txt --model QuantFactory/Meta-Llama-3-8B-Instruct-GGUF
|
|
85
85
|
|
|
86
|
+
Note that streaming with some models (notably Meta-Llama-3 is broken) is broken and can be workarounded by `no-stream` option:
|
|
87
|
+
|
|
88
|
+
logdetective https://example.com/logs.txt --model QuantFactory/Meta-Llama-3-8B-Instruct-GGUF --no-stream
|
|
89
|
+
|
|
86
90
|
|
|
87
91
|
Real Example
|
|
88
92
|
------------
|
|
@@ -181,7 +185,7 @@ or
|
|
|
181
185
|
Server
|
|
182
186
|
------
|
|
183
187
|
|
|
184
|
-
FastApi based server is implemented in `logdetective/server.py`. In order to run in a development mode,
|
|
188
|
+
FastApi based server is implemented in `logdetective/server.py`. In order to run it in a development mode,
|
|
185
189
|
simply start llama-cpp-python server with your chosen model as described in llama-cpp-python [docs](https://llama-cpp-python.readthedocs.io/en/latest/server/#running-the-server).
|
|
186
190
|
|
|
187
191
|
Afterwards, start the logdetective server with `fastapi dev logdetective/server.py --port 8080`.
|
|
@@ -189,6 +193,17 @@ Requests can then be made with post requests, for example:
|
|
|
189
193
|
|
|
190
194
|
curl --header "Content-Type: application/json" --request POST --data '{"url":"<YOUR_URL_HERE>"}' http://localhost:8080/analyze
|
|
191
195
|
|
|
196
|
+
We also have a Containerfile and composefile to run the logdetective server and llama server in containers.
|
|
197
|
+
|
|
198
|
+
Before doing `podman-compose up`, make sure to set `MODELS_PATH` environment variable and point to a directory with your local model files:
|
|
199
|
+
```
|
|
200
|
+
$ export MODELS_PATH=/path/to/models/
|
|
201
|
+
$ ll $MODELS_PATH
|
|
202
|
+
-rw-r--r--. 1 tt tt 3.9G apr 10 17:18 mistral-7b-instruct-v0.2.Q4_K_S.gguf
|
|
203
|
+
```
|
|
204
|
+
|
|
205
|
+
If the variable is not set, `./models` is mounted inside by default.
|
|
206
|
+
|
|
192
207
|
|
|
193
208
|
License
|
|
194
209
|
-------
|
|
@@ -55,6 +55,10 @@ Example you want to use a different model:
|
|
|
55
55
|
logdetective https://example.com/logs.txt --model https://huggingface.co/QuantFactory/Meta-Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct.Q5_K_S.gguf?download=true
|
|
56
56
|
logdetective https://example.com/logs.txt --model QuantFactory/Meta-Llama-3-8B-Instruct-GGUF
|
|
57
57
|
|
|
58
|
+
Note that streaming with some models (notably Meta-Llama-3 is broken) is broken and can be workarounded by `no-stream` option:
|
|
59
|
+
|
|
60
|
+
logdetective https://example.com/logs.txt --model QuantFactory/Meta-Llama-3-8B-Instruct-GGUF --no-stream
|
|
61
|
+
|
|
58
62
|
|
|
59
63
|
Real Example
|
|
60
64
|
------------
|
|
@@ -153,7 +157,7 @@ or
|
|
|
153
157
|
Server
|
|
154
158
|
------
|
|
155
159
|
|
|
156
|
-
FastApi based server is implemented in `logdetective/server.py`. In order to run in a development mode,
|
|
160
|
+
FastApi based server is implemented in `logdetective/server.py`. In order to run it in a development mode,
|
|
157
161
|
simply start llama-cpp-python server with your chosen model as described in llama-cpp-python [docs](https://llama-cpp-python.readthedocs.io/en/latest/server/#running-the-server).
|
|
158
162
|
|
|
159
163
|
Afterwards, start the logdetective server with `fastapi dev logdetective/server.py --port 8080`.
|
|
@@ -161,6 +165,17 @@ Requests can then be made with post requests, for example:
|
|
|
161
165
|
|
|
162
166
|
curl --header "Content-Type: application/json" --request POST --data '{"url":"<YOUR_URL_HERE>"}' http://localhost:8080/analyze
|
|
163
167
|
|
|
168
|
+
We also have a Containerfile and composefile to run the logdetective server and llama server in containers.
|
|
169
|
+
|
|
170
|
+
Before doing `podman-compose up`, make sure to set `MODELS_PATH` environment variable and point to a directory with your local model files:
|
|
171
|
+
```
|
|
172
|
+
$ export MODELS_PATH=/path/to/models/
|
|
173
|
+
$ ll $MODELS_PATH
|
|
174
|
+
-rw-r--r--. 1 tt tt 3.9G apr 10 17:18 mistral-7b-instruct-v0.2.Q4_K_S.gguf
|
|
175
|
+
```
|
|
176
|
+
|
|
177
|
+
If the variable is not set, `./models` is mounted inside by default.
|
|
178
|
+
|
|
164
179
|
|
|
165
180
|
License
|
|
166
181
|
-------
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
|
|
2
2
|
# pylint: disable=line-too-long
|
|
3
|
-
DEFAULT_ADVISOR = "
|
|
3
|
+
DEFAULT_ADVISOR = "fedora-copr/Mistral-7B-Instruct-v0.2-GGUF"
|
|
4
4
|
|
|
5
5
|
PROMPT_TEMPLATE = """
|
|
6
6
|
Given following log snippets, and nothing else, explain what failure, if any, occured during build of this package.
|
|
@@ -8,9 +8,8 @@ from logdetective.extractors import LLMExtractor, DrainExtractor
|
|
|
8
8
|
|
|
9
9
|
LOG = logging.getLogger("logdetective")
|
|
10
10
|
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
"""Main execution function."""
|
|
11
|
+
def setup_args():
|
|
12
|
+
""" Setup argument parser and return arguments. """
|
|
14
13
|
parser = argparse.ArgumentParser("logdetective")
|
|
15
14
|
parser.add_argument("file", type=str,
|
|
16
15
|
default="", help="The URL or path to the log file to be analyzed.")
|
|
@@ -21,6 +20,7 @@ def main():
|
|
|
21
20
|
help="Suffix of the model file name to be retrieved from Hugging Face.\
|
|
22
21
|
Makes sense only if the model is specified with Hugging Face name.",
|
|
23
22
|
default="Q4_K_S.gguf")
|
|
23
|
+
parser.add_argument("-n", "--no-stream", action='store_true')
|
|
24
24
|
parser.add_argument("-S", "--summarizer", type=str, default="drain",
|
|
25
25
|
help="Choose between LLM and Drain template miner as the log summarizer.\
|
|
26
26
|
LLM must be specified as path to a model, URL or local file.")
|
|
@@ -32,7 +32,12 @@ def main():
|
|
|
32
32
|
This only makes sense when you are summarizing with Drain")
|
|
33
33
|
parser.add_argument("-v", "--verbose", action='count', default=0)
|
|
34
34
|
parser.add_argument("-q", "--quiet", action='store_true')
|
|
35
|
-
|
|
35
|
+
return parser.parse_args()
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def main():
|
|
39
|
+
"""Main execution function."""
|
|
40
|
+
args = setup_args()
|
|
36
41
|
|
|
37
42
|
if args.verbose and args.quiet:
|
|
38
43
|
sys.stderr.write("Error: --quiet and --verbose is mutually exclusive.\n")
|
|
@@ -83,7 +88,19 @@ def main():
|
|
|
83
88
|
log_summary = format_snippets(log_summary)
|
|
84
89
|
LOG.info("Log summary: \n %s", log_summary)
|
|
85
90
|
|
|
86
|
-
|
|
91
|
+
stream = True
|
|
92
|
+
if args.no_stream:
|
|
93
|
+
stream = False
|
|
94
|
+
response = process_log(log_summary, model, stream)
|
|
95
|
+
print("Explanation:")
|
|
96
|
+
if args.no_stream:
|
|
97
|
+
print(response["choices"][0]["text"])
|
|
98
|
+
else:
|
|
99
|
+
# Stream the output
|
|
100
|
+
for chunk in response:
|
|
101
|
+
delta = chunk['choices'][0]['text']
|
|
102
|
+
print(delta, end='', flush=True)
|
|
103
|
+
print()
|
|
87
104
|
|
|
88
105
|
|
|
89
106
|
if __name__ == "__main__":
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import os
|
|
3
|
+
import json
|
|
4
|
+
|
|
5
|
+
from fastapi import FastAPI, HTTPException
|
|
6
|
+
from pydantic import BaseModel
|
|
7
|
+
|
|
8
|
+
import requests
|
|
9
|
+
|
|
10
|
+
from logdetective.constants import PROMPT_TEMPLATE
|
|
11
|
+
from logdetective.extractors import DrainExtractor
|
|
12
|
+
from logdetective.utils import validate_url
|
|
13
|
+
|
|
14
|
+
class BuildLog(BaseModel):
|
|
15
|
+
"""Model of data submitted to API.
|
|
16
|
+
"""
|
|
17
|
+
url: str
|
|
18
|
+
|
|
19
|
+
LOG = logging.getLogger("logdetective")
|
|
20
|
+
|
|
21
|
+
app = FastAPI()
|
|
22
|
+
|
|
23
|
+
LLM_CPP_HOST = os.environ.get("LLAMA_CPP_HOST", "localhost")
|
|
24
|
+
LLM_CPP_SERVER_ADDRESS = f"http://{LLM_CPP_HOST}"
|
|
25
|
+
LLM_CPP_SERVER_PORT = os.environ.get("LLAMA_CPP_SERVER_PORT", 8000)
|
|
26
|
+
LLM_CPP_SERVER_TIMEOUT = os.environ.get("LLAMA_CPP_SERVER_TIMEOUT", 600)
|
|
27
|
+
LOG_SOURCE_REQUEST_TIMEOUT = os.environ.get("LOG_SOURCE_REQUEST_TIMEOUT", 60)
|
|
28
|
+
|
|
29
|
+
@app.post("/analyze", )
|
|
30
|
+
async def analyze_log(build_log: BuildLog):
|
|
31
|
+
"""Provide endpoint for log file submission and analysis.
|
|
32
|
+
Request must be in form {"url":"<YOUR_URL_HERE>"}.
|
|
33
|
+
URL must be valid for the request to be passed to the LLM server.
|
|
34
|
+
Meaning that it must contain appropriate scheme, path and netloc,
|
|
35
|
+
while lacking result, params or query fields.
|
|
36
|
+
"""
|
|
37
|
+
extractor = DrainExtractor(verbose=True, context=True, max_clusters=8)
|
|
38
|
+
|
|
39
|
+
LOG.info("Getting summary")
|
|
40
|
+
# Perform basic validation of the URL
|
|
41
|
+
if validate_url(url=build_log.url):
|
|
42
|
+
try:
|
|
43
|
+
log_request = requests.get(build_log.url, timeout=int(LOG_SOURCE_REQUEST_TIMEOUT))
|
|
44
|
+
except requests.RequestException as ex:
|
|
45
|
+
raise HTTPException(
|
|
46
|
+
status_code=400,
|
|
47
|
+
detail=f"We couldn't obtain the logs: {ex}") from ex
|
|
48
|
+
|
|
49
|
+
if not log_request.ok:
|
|
50
|
+
raise HTTPException(status_code=400,
|
|
51
|
+
detail="Something went wrong while getting the logs: "
|
|
52
|
+
f"[{log_request.status_code}] {log_request.text}")
|
|
53
|
+
else:
|
|
54
|
+
LOG.error("Invalid URL received ")
|
|
55
|
+
raise HTTPException(status_code=400,
|
|
56
|
+
detail=f"Invalid log URL: {build_log.url}")
|
|
57
|
+
|
|
58
|
+
log = log_request.text
|
|
59
|
+
log_summary = extractor(log)
|
|
60
|
+
|
|
61
|
+
ratio = len(log_summary) / len(log.split('\n'))
|
|
62
|
+
LOG.debug("Log summary: \n %s", log_summary)
|
|
63
|
+
LOG.info("Compression ratio: %s", ratio)
|
|
64
|
+
|
|
65
|
+
LOG.info("Analyzing the text")
|
|
66
|
+
data = {
|
|
67
|
+
"prompt": PROMPT_TEMPLATE.format(log_summary),
|
|
68
|
+
"max_tokens": "0"}
|
|
69
|
+
|
|
70
|
+
try:
|
|
71
|
+
# Expects llama-cpp server to run on LLM_CPP_SERVER_ADDRESS:LLM_CPP_SERVER_PORT
|
|
72
|
+
response = requests.post(
|
|
73
|
+
f"{LLM_CPP_SERVER_ADDRESS}:{LLM_CPP_SERVER_PORT}/v1/completions",
|
|
74
|
+
headers={"Content-Type":"application/json"},
|
|
75
|
+
data=json.dumps(data),
|
|
76
|
+
timeout=int(LLM_CPP_SERVER_TIMEOUT))
|
|
77
|
+
except requests.RequestException as ex:
|
|
78
|
+
raise HTTPException(
|
|
79
|
+
status_code=400,
|
|
80
|
+
detail=f"Llama-cpp query failed: {ex}") from ex
|
|
81
|
+
|
|
82
|
+
if not log_request.ok:
|
|
83
|
+
raise HTTPException(
|
|
84
|
+
status_code=400,
|
|
85
|
+
detail="Something went wrong while getting a response from the llama server: "
|
|
86
|
+
f"[{log_request.status_code}] {log_request.text}")
|
|
87
|
+
return response.text
|
|
@@ -53,6 +53,9 @@ def initialize_model(model_pth: str, filename_suffix: str = ".gguf", verbose: bo
|
|
|
53
53
|
filename_suffix (str): suffix of the model file name to be pulled from Hugging Face
|
|
54
54
|
verbose (bool): level of verbosity for llamacpp
|
|
55
55
|
"""
|
|
56
|
+
|
|
57
|
+
LOG.info("Loading model from %s", model_pth)
|
|
58
|
+
|
|
56
59
|
if os.path.isfile(model_pth):
|
|
57
60
|
model = Llama(
|
|
58
61
|
model_path=model_pth,
|
|
@@ -68,7 +71,7 @@ def initialize_model(model_pth: str, filename_suffix: str = ".gguf", verbose: bo
|
|
|
68
71
|
return model
|
|
69
72
|
|
|
70
73
|
|
|
71
|
-
def process_log(log: str, model: Llama) -> str:
|
|
74
|
+
def process_log(log: str, model: Llama, stream: bool) -> str:
|
|
72
75
|
"""
|
|
73
76
|
Processes a given log using the provided language model and returns its summary.
|
|
74
77
|
|
|
@@ -79,11 +82,16 @@ def process_log(log: str, model: Llama) -> str:
|
|
|
79
82
|
Returns:
|
|
80
83
|
str: The summary of the given log generated by the language model.
|
|
81
84
|
"""
|
|
82
|
-
return model(
|
|
85
|
+
return model(
|
|
86
|
+
prompt=PROMPT_TEMPLATE.format(log),
|
|
87
|
+
stream=stream,
|
|
88
|
+
max_tokens=0)
|
|
83
89
|
|
|
84
90
|
|
|
85
91
|
def retrieve_log_content(log_path: str) -> str:
|
|
86
|
-
"""Get content of the file on the log_path path.
|
|
92
|
+
"""Get content of the file on the log_path path.
|
|
93
|
+
Path is assumed to be valid URL if it has a scheme.
|
|
94
|
+
Otherwise it attempts to pull it from local filesystem."""
|
|
87
95
|
parsed_url = urlparse(log_path)
|
|
88
96
|
log = ""
|
|
89
97
|
|
|
@@ -113,3 +121,18 @@ def format_snippets(snippets: list[str]) -> str:
|
|
|
113
121
|
================
|
|
114
122
|
"""
|
|
115
123
|
return summary
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def validate_url(url: str) -> bool:
|
|
127
|
+
"""Validate incoming URL to be at least somewhat sensible for log files
|
|
128
|
+
Only http and https protocols permitted. No result, params or query fields allowed.
|
|
129
|
+
Either netloc or path must have non-zero length.
|
|
130
|
+
"""
|
|
131
|
+
result = urlparse(url)
|
|
132
|
+
if result.scheme not in ['http', 'https']:
|
|
133
|
+
return False
|
|
134
|
+
if any([result.params, result.query, result.fragment]):
|
|
135
|
+
return False
|
|
136
|
+
if not (result.path or result.netloc):
|
|
137
|
+
return False
|
|
138
|
+
return True
|
|
@@ -1,56 +0,0 @@
|
|
|
1
|
-
import logging
|
|
2
|
-
import os
|
|
3
|
-
import json
|
|
4
|
-
|
|
5
|
-
from fastapi import FastAPI
|
|
6
|
-
from pydantic import BaseModel
|
|
7
|
-
|
|
8
|
-
import requests
|
|
9
|
-
|
|
10
|
-
from logdetective.constants import PROMPT_TEMPLATE
|
|
11
|
-
from logdetective.extractors import DrainExtractor
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
class BuildLog(BaseModel):
|
|
15
|
-
"""Model of data submitted to API.
|
|
16
|
-
"""
|
|
17
|
-
url: str
|
|
18
|
-
|
|
19
|
-
LOG = logging.getLogger("logdetective")
|
|
20
|
-
|
|
21
|
-
app = FastAPI()
|
|
22
|
-
|
|
23
|
-
LLM_CPP_SERVER_ADDRESS = os.environ.get("LLAMA_CPP_SERVER", " http://localhost")
|
|
24
|
-
LLM_CPP_SERVER_PORT = os.environ.get("LLAMA_CPP_SERVER_PORT", 8000)
|
|
25
|
-
LLM_CPP_SERVER_TIMEOUT = os.environ.get("LLAMA_CPP_SERVER_TIMEOUT", 600)
|
|
26
|
-
LOG_SOURCE_REQUEST_TIMEOUT = os.environ.get("LOG_SOURCE_REQUEST_TIMEOUT", 60)
|
|
27
|
-
|
|
28
|
-
@app.post("/analyze", )
|
|
29
|
-
async def analyze_log(build_log: BuildLog):
|
|
30
|
-
"""Provide endpoint for log file submission and analysis.
|
|
31
|
-
Request must be in form {"url":"<YOUR_URL_HERE>"}.
|
|
32
|
-
"""
|
|
33
|
-
extractor = DrainExtractor(verbose=True, context=True, max_clusters=8)
|
|
34
|
-
|
|
35
|
-
LOG.info("Getting summary")
|
|
36
|
-
|
|
37
|
-
log = requests.get(build_log.url, timeout=int(LOG_SOURCE_REQUEST_TIMEOUT)).text
|
|
38
|
-
log_summary = extractor(log)
|
|
39
|
-
|
|
40
|
-
ratio = len(log_summary) / len(log.split('\n'))
|
|
41
|
-
LOG.debug("Log summary: \n %s", log_summary)
|
|
42
|
-
LOG.info("Compression ratio: %s", ratio)
|
|
43
|
-
|
|
44
|
-
LOG.info("Analyzing the text")
|
|
45
|
-
data = {
|
|
46
|
-
"prompt": PROMPT_TEMPLATE.format(log_summary),
|
|
47
|
-
"max_tokens": "0"}
|
|
48
|
-
|
|
49
|
-
# Expects llama-cpp server to run on LLM_CPP_SERVER_ADDRESS:LLM_CPP_SERVER_PORT
|
|
50
|
-
response = requests.post(
|
|
51
|
-
f"{LLM_CPP_SERVER_ADDRESS}:{LLM_CPP_SERVER_PORT}/v1/completions",
|
|
52
|
-
headers={"Content-Type":"application/json"},
|
|
53
|
-
data=json.dumps(data),
|
|
54
|
-
timeout=int(LLM_CPP_SERVER_TIMEOUT))
|
|
55
|
-
|
|
56
|
-
return response.text
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|