logdetective 0.2.5__py3-none-any.whl → 0.2.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
logdetective/constants.py CHANGED
@@ -1,6 +1,6 @@
1
1
 
2
2
  # pylint: disable=line-too-long
3
- DEFAULT_ADVISOR = "TheBloke/Mistral-7B-Instruct-v0.2-GGUF"
3
+ DEFAULT_ADVISOR = "fedora-copr/Mistral-7B-Instruct-v0.2-GGUF"
4
4
 
5
5
  PROMPT_TEMPLATE = """
6
6
  Given following log snippets, and nothing else, explain what failure, if any, occured during build of this package.
@@ -30,3 +30,16 @@ Log:
30
30
  Answer:
31
31
 
32
32
  """
33
+
34
+ SNIPPET_PROMPT_TEMPLATE = """
35
+ Analyse following RPM build log snippet.
36
+ Analysis of the snippets must be in a format of [X] : [Y], where [X] is a log snippet, and [Y] is the explanation.
37
+ Snippets themselves must not be altered in any way whatsoever.
38
+
39
+ Snippet:
40
+
41
+ {}
42
+
43
+ Analysis:
44
+
45
+ """
@@ -3,14 +3,14 @@ import logging
3
3
  import sys
4
4
 
5
5
  from logdetective.constants import DEFAULT_ADVISOR
6
- from logdetective.utils import process_log, initialize_model, retrieve_log_content, format_snippets
6
+ from logdetective.utils import (
7
+ process_log, initialize_model, retrieve_log_content, format_snippets, compute_certainty)
7
8
  from logdetective.extractors import LLMExtractor, DrainExtractor
8
9
 
9
10
  LOG = logging.getLogger("logdetective")
10
11
 
11
-
12
- def main():
13
- """Main execution function."""
12
+ def setup_args():
13
+ """ Setup argument parser and return arguments. """
14
14
  parser = argparse.ArgumentParser("logdetective")
15
15
  parser.add_argument("file", type=str,
16
16
  default="", help="The URL or path to the log file to be analyzed.")
@@ -21,6 +21,7 @@ def main():
21
21
  help="Suffix of the model file name to be retrieved from Hugging Face.\
22
22
  Makes sense only if the model is specified with Hugging Face name.",
23
23
  default="Q4_K_S.gguf")
24
+ parser.add_argument("-n", "--no-stream", action='store_true')
24
25
  parser.add_argument("-S", "--summarizer", type=str, default="drain",
25
26
  help="Choose between LLM and Drain template miner as the log summarizer.\
26
27
  LLM must be specified as path to a model, URL or local file.")
@@ -32,7 +33,12 @@ def main():
32
33
  This only makes sense when you are summarizing with Drain")
33
34
  parser.add_argument("-v", "--verbose", action='count', default=0)
34
35
  parser.add_argument("-q", "--quiet", action='store_true')
35
- args = parser.parse_args()
36
+ return parser.parse_args()
37
+
38
+
39
+ def main():
40
+ """Main execution function."""
41
+ args = setup_args()
36
42
 
37
43
  if args.verbose and args.quiet:
38
44
  sys.stderr.write("Error: --quiet and --verbose is mutually exclusive.\n")
@@ -83,7 +89,25 @@ def main():
83
89
  log_summary = format_snippets(log_summary)
84
90
  LOG.info("Log summary: \n %s", log_summary)
85
91
 
86
- print(f"Explanation: \n{process_log(log_summary, model)}")
92
+ stream = True
93
+ if args.no_stream:
94
+ stream = False
95
+ response = process_log(log_summary, model, stream)
96
+ probs = []
97
+ print("Explanation:")
98
+ if args.no_stream:
99
+ print(response["choices"][0]["text"])
100
+ probs = response["choices"][0]["logprobs"]["top_logprobs"]
101
+ else:
102
+ # Stream the output
103
+ for chunk in response:
104
+ if isinstance(chunk["choices"][0]["logprobs"], dict):
105
+ probs.extend(chunk["choices"][0]["logprobs"]["top_logprobs"])
106
+ delta = chunk['choices'][0]['text']
107
+ print(delta, end='', flush=True)
108
+ certainty = compute_certainty(probs)
109
+
110
+ print(f"\nResponse certainty: {certainty:.2f}%\n")
87
111
 
88
112
 
89
113
  if __name__ == "__main__":
logdetective/server.py CHANGED
@@ -1,56 +1,192 @@
1
+ import json
1
2
  import logging
2
3
  import os
3
- import json
4
+ from typing import List
4
5
 
5
- from fastapi import FastAPI
6
+ from llama_cpp import CreateCompletionResponse
7
+ from fastapi import FastAPI, HTTPException
6
8
  from pydantic import BaseModel
7
9
 
8
10
  import requests
9
11
 
10
- from logdetective.constants import PROMPT_TEMPLATE
12
+ from logdetective.constants import PROMPT_TEMPLATE, SNIPPET_PROMPT_TEMPLATE
11
13
  from logdetective.extractors import DrainExtractor
12
-
14
+ from logdetective.utils import validate_url, compute_certainty
13
15
 
14
16
  class BuildLog(BaseModel):
15
17
  """Model of data submitted to API.
16
18
  """
17
19
  url: str
18
20
 
21
+
22
+ class Response(BaseModel):
23
+ """Model of data returned by Log Detective API
24
+
25
+ explanation: CreateCompletionResponse
26
+ https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#llama_cpp.llama_types.CreateCompletionResponse
27
+ response_certainty: float
28
+ """
29
+ explanation: CreateCompletionResponse
30
+ response_certainty: float
31
+
32
+
33
+ class StagedResponse(Response):
34
+ """Model of data returned by Log Detective API when called when staged response
35
+ is requested. Contains list of reponses to prompts for individual snippets.
36
+
37
+ explanation: CreateCompletionResponse
38
+ https://llama-cpp-python.readthedocs.io/en/latest/api-reference/#llama_cpp.llama_types.CreateCompletionResponse
39
+ response_certainty: float
40
+ snippets: list of CreateCompletionResponse
41
+ """
42
+ snippets: List[CreateCompletionResponse]
43
+
44
+
19
45
  LOG = logging.getLogger("logdetective")
20
46
 
21
47
  app = FastAPI()
22
48
 
23
- LLM_CPP_SERVER_ADDRESS = os.environ.get("LLAMA_CPP_SERVER", " http://localhost")
49
+ LLM_CPP_HOST = os.environ.get("LLAMA_CPP_HOST", "localhost")
50
+ LLM_CPP_SERVER_ADDRESS = f"http://{LLM_CPP_HOST}"
24
51
  LLM_CPP_SERVER_PORT = os.environ.get("LLAMA_CPP_SERVER_PORT", 8000)
25
52
  LLM_CPP_SERVER_TIMEOUT = os.environ.get("LLAMA_CPP_SERVER_TIMEOUT", 600)
26
53
  LOG_SOURCE_REQUEST_TIMEOUT = os.environ.get("LOG_SOURCE_REQUEST_TIMEOUT", 60)
27
54
 
28
- @app.post("/analyze", )
29
- async def analyze_log(build_log: BuildLog):
30
- """Provide endpoint for log file submission and analysis.
31
- Request must be in form {"url":"<YOUR_URL_HERE>"}.
55
+
56
+ def process_url(url: str) -> str:
57
+ """Validate log URL and return log text.
58
+ """
59
+ if validate_url(url=url):
60
+ try:
61
+ log_request = requests.get(url, timeout=int(LOG_SOURCE_REQUEST_TIMEOUT))
62
+ except requests.RequestException as ex:
63
+ raise HTTPException(
64
+ status_code=400,
65
+ detail=f"We couldn't obtain the logs: {ex}") from ex
66
+
67
+ if not log_request.ok:
68
+ raise HTTPException(status_code=400,
69
+ detail="Something went wrong while getting the logs: "
70
+ f"[{log_request.status_code}] {log_request.text}")
71
+ else:
72
+ LOG.error("Invalid URL received ")
73
+ raise HTTPException(status_code=400,
74
+ detail=f"Invalid log URL: {url}")
75
+
76
+ return log_request.text
77
+
78
+
79
+ def mine_logs(log: str) -> List[str]:
80
+ """Extract snippets from log text
32
81
  """
33
82
  extractor = DrainExtractor(verbose=True, context=True, max_clusters=8)
34
83
 
35
84
  LOG.info("Getting summary")
36
-
37
- log = requests.get(build_log.url, timeout=int(LOG_SOURCE_REQUEST_TIMEOUT)).text
38
85
  log_summary = extractor(log)
39
86
 
40
87
  ratio = len(log_summary) / len(log.split('\n'))
41
88
  LOG.debug("Log summary: \n %s", log_summary)
42
89
  LOG.info("Compression ratio: %s", ratio)
43
90
 
91
+
92
+ return log_summary
93
+
94
+ def submit_text(text: str, max_tokens: int = 0, log_probs: int = 1):
95
+ """Submit prompt to LLM.
96
+ max_tokens: number of tokens to be produces, 0 indicates run until encountering EOS
97
+ log_probs: number of token choices to produce log probs for
98
+ """
44
99
  LOG.info("Analyzing the text")
45
100
  data = {
46
- "prompt": PROMPT_TEMPLATE.format(log_summary),
47
- "max_tokens": "0"}
101
+ "prompt": text,
102
+ "max_tokens": str(max_tokens),
103
+ "logprobs": str(log_probs)}
104
+
105
+ try:
106
+ # Expects llama-cpp server to run on LLM_CPP_SERVER_ADDRESS:LLM_CPP_SERVER_PORT
107
+ response = requests.post(
108
+ f"{LLM_CPP_SERVER_ADDRESS}:{LLM_CPP_SERVER_PORT}/v1/completions",
109
+ headers={"Content-Type":"application/json"},
110
+ data=json.dumps(data),
111
+ timeout=int(LLM_CPP_SERVER_TIMEOUT))
112
+ except requests.RequestException as ex:
113
+ raise HTTPException(
114
+ status_code=400,
115
+ detail=f"Llama-cpp query failed: {ex}") from ex
116
+
117
+ if not response.ok:
118
+ raise HTTPException(
119
+ status_code=400,
120
+ detail="Something went wrong while getting a response from the llama server: "
121
+ f"[{response.status_code}] {response.text}")
122
+ try:
123
+ response = json.loads(response.text)
124
+ except UnicodeDecodeError as ex:
125
+ LOG.error("Error encountered while parsing llama server response: %s", ex)
126
+ raise HTTPException(
127
+ status_code=400,
128
+ detail=f"Couldn't parse the response.\nError: {ex}\nData: {response.text}") from ex
129
+
130
+ return CreateCompletionResponse(response)
131
+
132
+
133
+ @app.post("/analyze", response_model=Response)
134
+ async def analyze_log(build_log: BuildLog):
135
+ """Provide endpoint for log file submission and analysis.
136
+ Request must be in form {"url":"<YOUR_URL_HERE>"}.
137
+ URL must be valid for the request to be passed to the LLM server.
138
+ Meaning that it must contain appropriate scheme, path and netloc,
139
+ while lacking result, params or query fields.
140
+ """
141
+ log_text = process_url(build_log.url)
142
+ log_summary = mine_logs(log_text)
143
+ response = submit_text(PROMPT_TEMPLATE.format(log_summary))
144
+
145
+ if "logprobs" in response["choices"][0]:
146
+ try:
147
+ certainty = compute_certainty(
148
+ response["choices"][0]["logprobs"]["top_logprobs"])
149
+ except ValueError as ex:
150
+ LOG.error("Error encountered while computing certainty: %s", ex)
151
+ raise HTTPException(
152
+ status_code=400,
153
+ detail=f"Couldn't compute certainty with data:\n"
154
+ f"{response["choices"][0]["logprobs"]["top_logprobs"]}") from ex
155
+
156
+ return Response(explanation=response, response_certainty=certainty)
157
+
158
+
159
+ @app.post("/analyze/staged", response_model=StagedResponse)
160
+ async def analyze_log_staged(build_log: BuildLog):
161
+ """Provide endpoint for log file submission and analysis.
162
+ Request must be in form {"url":"<YOUR_URL_HERE>"}.
163
+ URL must be valid for the request to be passed to the LLM server.
164
+ Meaning that it must contain appropriate scheme, path and netloc,
165
+ while lacking result, params or query fields.
166
+ """
167
+ log_text = process_url(build_log.url)
168
+ log_summary = mine_logs(log_text)
169
+
170
+ analyzed_snippets = []
171
+
172
+ for snippet in log_summary:
173
+ response = submit_text(SNIPPET_PROMPT_TEMPLATE.format(snippet))
174
+ analyzed_snippets.append(response)
175
+
176
+ final_analysis = submit_text(
177
+ PROMPT_TEMPLATE.format([e["choices"][0]["text"] for e in analyzed_snippets]))
48
178
 
49
- # Expects llama-cpp server to run on LLM_CPP_SERVER_ADDRESS:LLM_CPP_SERVER_PORT
50
- response = requests.post(
51
- f"{LLM_CPP_SERVER_ADDRESS}:{LLM_CPP_SERVER_PORT}/v1/completions",
52
- headers={"Content-Type":"application/json"},
53
- data=json.dumps(data),
54
- timeout=int(LLM_CPP_SERVER_TIMEOUT))
179
+ certainty = 0
180
+ if "logprobs" in final_analysis["choices"][0]:
181
+ try:
182
+ certainty = compute_certainty(
183
+ final_analysis["choices"][0]["logprobs"]["top_logprobs"])
184
+ except ValueError as ex:
185
+ LOG.error("Error encountered while computing certainty: %s", ex)
186
+ raise HTTPException(
187
+ status_code=400,
188
+ detail=f"Couldn't compute certainty with data:\n"
189
+ f"{final_analysis["choices"][0]["logprobs"]["top_logprobs"]}") from ex
55
190
 
56
- return response.text
191
+ return StagedResponse(
192
+ explanation=final_analysis, snippets=analyzed_snippets, response_certainty=certainty)
logdetective/utils.py CHANGED
@@ -1,10 +1,11 @@
1
1
  import logging
2
2
  import os
3
+ from typing import Iterator, List, Dict
3
4
  from urllib.parse import urlparse
4
-
5
+ import numpy as np
5
6
  import requests
6
7
 
7
- from llama_cpp import Llama
8
+ from llama_cpp import Llama, CreateCompletionResponse, CreateCompletionStreamResponse
8
9
  from logdetective.constants import PROMPT_TEMPLATE
9
10
 
10
11
 
@@ -53,24 +54,45 @@ def initialize_model(model_pth: str, filename_suffix: str = ".gguf", verbose: bo
53
54
  filename_suffix (str): suffix of the model file name to be pulled from Hugging Face
54
55
  verbose (bool): level of verbosity for llamacpp
55
56
  """
57
+
58
+ LOG.info("Loading model from %s", model_pth)
59
+
56
60
  if os.path.isfile(model_pth):
57
61
  model = Llama(
58
62
  model_path=model_pth,
59
63
  n_ctx=0, # Maximum context for the model
60
- verbose=verbose)
64
+ verbose=verbose,
65
+ logits_all=True)
61
66
  else:
62
67
  model = Llama.from_pretrained(
63
68
  model_pth,
64
69
  f"*{filename_suffix}",
65
70
  n_ctx=0, # Maximum context for the model
66
- verbose=verbose)
71
+ verbose=verbose,
72
+ logits_all=True)
67
73
 
68
74
  return model
69
75
 
70
76
 
71
- def process_log(log: str, model: Llama) -> str:
77
+ def compute_certainty(probs: List[Dict[str, float] | None]) -> float:
78
+ """Compute certainty of repsponse based on average logit probability.
79
+ Log probability is log(p), isn't really readable for most people, especially in compound.
80
+ In this case it's just a matter of applying inverse operation exp.
81
+ Of course that leaves you with a value in range <0, 1> so it needs to be multiplied by 100.
82
+ Simply put, this is the most straightforward way to get the numbers out.
72
83
  """
73
- Processes a given log using the provided language model and returns its summary.
84
+
85
+ top_logprobs = [
86
+ np.exp(x) * 100 for e in probs if isinstance(e, dict) for x in e.values()]
87
+ certainty = np.median(top_logprobs, axis=0)
88
+ if np.isnan(certainty):
89
+ raise ValueError("NaN certainty of answer")
90
+ return certainty
91
+
92
+
93
+ def process_log(log: str, model: Llama, stream: bool) -> (
94
+ CreateCompletionResponse | Iterator[CreateCompletionStreamResponse]):
95
+ """Processes a given log using the provided language model and returns its summary.
74
96
 
75
97
  Args:
76
98
  log (str): The input log to be processed.
@@ -79,11 +101,19 @@ def process_log(log: str, model: Llama) -> str:
79
101
  Returns:
80
102
  str: The summary of the given log generated by the language model.
81
103
  """
82
- return model(PROMPT_TEMPLATE.format(log), max_tokens=0)["choices"][0]["text"]
104
+ response = model(
105
+ prompt=PROMPT_TEMPLATE.format(log),
106
+ stream=stream,
107
+ max_tokens=0,
108
+ logprobs=1)
109
+
110
+ return response
83
111
 
84
112
 
85
113
  def retrieve_log_content(log_path: str) -> str:
86
- """Get content of the file on the log_path path."""
114
+ """Get content of the file on the log_path path.
115
+ Path is assumed to be valid URL if it has a scheme.
116
+ Otherwise it attempts to pull it from local filesystem."""
87
117
  parsed_url = urlparse(log_path)
88
118
  log = ""
89
119
 
@@ -113,3 +143,18 @@ def format_snippets(snippets: list[str]) -> str:
113
143
  ================
114
144
  """
115
145
  return summary
146
+
147
+
148
+ def validate_url(url: str) -> bool:
149
+ """Validate incoming URL to be at least somewhat sensible for log files
150
+ Only http and https protocols permitted. No result, params or query fields allowed.
151
+ Either netloc or path must have non-zero length.
152
+ """
153
+ result = urlparse(url)
154
+ if result.scheme not in ['http', 'https']:
155
+ return False
156
+ if any([result.params, result.query, result.fragment]):
157
+ return False
158
+ if not (result.path or result.netloc):
159
+ return False
160
+ return True
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: logdetective
3
- Version: 0.2.5
3
+ Version: 0.2.7
4
4
  Summary: Log using LLM AI to search for build/test failures and provide ideas for fixing these.
5
5
  License: Apache-2.0
6
6
  Author: Jiri Podivin
@@ -21,6 +21,7 @@ Provides-Extra: server
21
21
  Requires-Dist: drain3 (>=0.9.11,<0.10.0)
22
22
  Requires-Dist: huggingface-hub (>0.23.2)
23
23
  Requires-Dist: llama-cpp-python (>=0.2.56,<0.3.0,!=0.2.86)
24
+ Requires-Dist: numpy (>=1.26.0,<2.0.0)
24
25
  Requires-Dist: requests (>=2.31.0,<3.0.0)
25
26
  Project-URL: homepage, https://github.com/fedora-copr/logdetective
26
27
  Project-URL: issues, https://github.com/fedora-copr/logdetective/issues
@@ -38,7 +39,7 @@ A Python tool to analyze logs using a Language Model (LLM) and Drain template mi
38
39
  Installation
39
40
  ------------
40
41
 
41
- ** Fedora 40+ **
42
+ **Fedora 40+**
42
43
 
43
44
  dnf install logdetective
44
45
 
@@ -83,6 +84,10 @@ Example you want to use a different model:
83
84
  logdetective https://example.com/logs.txt --model https://huggingface.co/QuantFactory/Meta-Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct.Q5_K_S.gguf?download=true
84
85
  logdetective https://example.com/logs.txt --model QuantFactory/Meta-Llama-3-8B-Instruct-GGUF
85
86
 
87
+ Note that streaming with some models (notably Meta-Llama-3 is broken) is broken and can be workarounded by `no-stream` option:
88
+
89
+ logdetective https://example.com/logs.txt --model QuantFactory/Meta-Llama-3-8B-Instruct-GGUF --no-stream
90
+
86
91
 
87
92
  Real Example
88
93
  ------------
@@ -181,7 +186,7 @@ or
181
186
  Server
182
187
  ------
183
188
 
184
- FastApi based server is implemented in `logdetective/server.py`. In order to run in a development mode,
189
+ FastApi based server is implemented in `logdetective/server.py`. In order to run it in a development mode,
185
190
  simply start llama-cpp-python server with your chosen model as described in llama-cpp-python [docs](https://llama-cpp-python.readthedocs.io/en/latest/server/#running-the-server).
186
191
 
187
192
  Afterwards, start the logdetective server with `fastapi dev logdetective/server.py --port 8080`.
@@ -189,6 +194,17 @@ Requests can then be made with post requests, for example:
189
194
 
190
195
  curl --header "Content-Type: application/json" --request POST --data '{"url":"<YOUR_URL_HERE>"}' http://localhost:8080/analyze
191
196
 
197
+ We also have a Containerfile and composefile to run the logdetective server and llama server in containers.
198
+
199
+ Before doing `podman-compose up`, make sure to set `MODELS_PATH` environment variable and point to a directory with your local model files:
200
+ ```
201
+ $ export MODELS_PATH=/path/to/models/
202
+ $ ll $MODELS_PATH
203
+ -rw-r--r--. 1 tt tt 3.9G apr 10 17:18 mistral-7b-instruct-v0.2.Q4_K_S.gguf
204
+ ```
205
+
206
+ If the variable is not set, `./models` is mounted inside by default.
207
+
192
208
 
193
209
  License
194
210
  -------
@@ -0,0 +1,12 @@
1
+ logdetective/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ logdetective/constants.py,sha256=1Ls2VJXb7NwSgi_HmTOA1c52K16SZIeDYBXlvBJ07zU,991
3
+ logdetective/drain3.ini,sha256=ni91eCT1TwTznZwcqWoOVMQcGEnWhEDNCoTPF7cfGfY,1360
4
+ logdetective/extractors.py,sha256=eRizRiKhC3MPTHXS5nlRKcEudEaqct7G28V1bZYGkqI,3103
5
+ logdetective/logdetective.py,sha256=f7ASCJg_Yt6VBFieXBYgQYdenfXjC60ZdLHhzQHideI,4372
6
+ logdetective/server.py,sha256=m0NPtk9tAUzyu9O8jIAfgEzynZ-WCHqVvCJkHOm08Ks,7073
7
+ logdetective/utils.py,sha256=nTbaDVEfbHVQPTZe58T04HHZ6JWUJ1PonRRnzGX8hY0,4794
8
+ logdetective-0.2.7.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
9
+ logdetective-0.2.7.dist-info/METADATA,sha256=3iqnKnVJy6aTaAqP77btyqSGqCpjT8_PQqpWaNwLKHg,9100
10
+ logdetective-0.2.7.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
11
+ logdetective-0.2.7.dist-info/entry_points.txt,sha256=3K_vXja6PmcA8sNdUi63WdImeiNhVZcEGPTaoJmltfA,63
12
+ logdetective-0.2.7.dist-info/RECORD,,
@@ -1,12 +0,0 @@
1
- logdetective/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- logdetective/constants.py,sha256=ObrYDQiPvZwCpokLbLQoSY_w_-wHl7l94EkXae7Xgq0,708
3
- logdetective/drain3.ini,sha256=ni91eCT1TwTznZwcqWoOVMQcGEnWhEDNCoTPF7cfGfY,1360
4
- logdetective/extractors.py,sha256=eRizRiKhC3MPTHXS5nlRKcEudEaqct7G28V1bZYGkqI,3103
5
- logdetective/logdetective.py,sha256=8pCnkXJ2qfSmXGHoKmTichLiI9tMAot3OOge2lgQBuI,3545
6
- logdetective/server.py,sha256=GAU6mggoZSf-ER3AHhmd7BKGDLh5ZcsnmkdHTFd_lTU,1715
7
- logdetective/utils.py,sha256=XRqVvPbAQ0ZAHGivHhAA1kTY8Tv6JAeSsA7gMMoPz8E,3034
8
- logdetective-0.2.5.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
9
- logdetective-0.2.5.dist-info/METADATA,sha256=nSobNDJzk6iOUl0oqFRNV9Ha0LLCLDbL8uLsX3aPnWY,8362
10
- logdetective-0.2.5.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
11
- logdetective-0.2.5.dist-info/entry_points.txt,sha256=3K_vXja6PmcA8sNdUi63WdImeiNhVZcEGPTaoJmltfA,63
12
- logdetective-0.2.5.dist-info/RECORD,,