logdetective 0.5.8__tar.gz → 0.5.10__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (23) hide show
  1. {logdetective-0.5.8 → logdetective-0.5.10}/PKG-INFO +18 -3
  2. {logdetective-0.5.8 → logdetective-0.5.10}/README.md +15 -0
  3. {logdetective-0.5.8 → logdetective-0.5.10}/logdetective/constants.py +6 -1
  4. {logdetective-0.5.8 → logdetective-0.5.10}/logdetective/extractors.py +9 -3
  5. {logdetective-0.5.8 → logdetective-0.5.10}/logdetective/logdetective.py +23 -3
  6. logdetective-0.5.10/logdetective/models.py +33 -0
  7. logdetective-0.5.10/logdetective/prompts.yml +55 -0
  8. {logdetective-0.5.8 → logdetective-0.5.10}/logdetective/server/server.py +11 -10
  9. {logdetective-0.5.8 → logdetective-0.5.10}/logdetective/server/utils.py +13 -1
  10. {logdetective-0.5.8 → logdetective-0.5.10}/logdetective/utils.py +17 -15
  11. logdetective-0.5.10/logdetective.1.asciidoc +83 -0
  12. {logdetective-0.5.8 → logdetective-0.5.10}/pyproject.toml +7 -5
  13. {logdetective-0.5.8 → logdetective-0.5.10}/LICENSE +0 -0
  14. {logdetective-0.5.8 → logdetective-0.5.10}/logdetective/__init__.py +0 -0
  15. {logdetective-0.5.8 → logdetective-0.5.10}/logdetective/drain3.ini +0 -0
  16. {logdetective-0.5.8 → logdetective-0.5.10}/logdetective/server/__init__.py +0 -0
  17. {logdetective-0.5.8 → logdetective-0.5.10}/logdetective/server/database/__init__.py +0 -0
  18. {logdetective-0.5.8 → logdetective-0.5.10}/logdetective/server/database/base.py +0 -0
  19. {logdetective-0.5.8 → logdetective-0.5.10}/logdetective/server/database/models.py +0 -0
  20. {logdetective-0.5.8 → logdetective-0.5.10}/logdetective/server/metric.py +0 -0
  21. {logdetective-0.5.8 → logdetective-0.5.10}/logdetective/server/models.py +0 -0
  22. {logdetective-0.5.8 → logdetective-0.5.10}/logdetective/server/plot.py +0 -0
  23. {logdetective-0.5.8 → logdetective-0.5.10}/logdetective/server/templates/gitlab_comment.md.j2 +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: logdetective
3
- Version: 0.5.8
3
+ Version: 0.5.10
4
4
  Summary: Log using LLM AI to search for build/test failures and provide ideas for fixing these.
5
5
  License: Apache-2.0
6
6
  Author: Jiri Podivin
@@ -29,9 +29,9 @@ Requires-Dist: matplotlib (>=3.8.4,<4.0.0) ; extra == "server" or extra == "serv
29
29
  Requires-Dist: numpy (>=1.26.0)
30
30
  Requires-Dist: psycopg2 (>=2.9.9,<3.0.0) ; extra == "server"
31
31
  Requires-Dist: psycopg2-binary (>=2.9.9,<3.0.0) ; extra == "server-testing"
32
- Requires-Dist: pydantic (>=2.8.2,<3.0.0) ; extra == "server" or extra == "server-testing"
32
+ Requires-Dist: pydantic (>=2.8.2,<3.0.0)
33
33
  Requires-Dist: python-gitlab (>=4.4.0)
34
- Requires-Dist: pyyaml (>=6.0.1,<7.0.0) ; extra == "server" or extra == "server-testing"
34
+ Requires-Dist: pyyaml (>=6.0.1,<7.0.0)
35
35
  Requires-Dist: requests (>0.2.31)
36
36
  Requires-Dist: sqlalchemy (>=2.0.36,<3.0.0) ; extra == "server" or extra == "server-testing"
37
37
  Project-URL: homepage, https://github.com/fedora-copr/logdetective
@@ -363,6 +363,21 @@ http GET "localhost:8080/metrics/analyze/requests?days=5" > /tmp/plot_days.svg
363
363
  http GET "localhost:8080/metrics/analyze/requests?weeks=5" > /tmp/plot_weeks.svg
364
364
  ```
365
365
 
366
+ System Prompts
367
+ --------------
368
+
369
+ Prompt templates used by Log Detective are stored in the `prompts.yml` file.
370
+ It is possible to modify the file in place, or provide your own.
371
+ In CLI you can override prompt templates location using `--prompts` option,
372
+ while in the container service deployment the `LOGDETECTIVE_PROMPTS` environment variable
373
+ is used instead.
374
+
375
+ Prompts need to have a form compatible with python [format string syntax](https://docs.python.org/3/library/string.html#format-string-syntax)
376
+ with spaces, or replacement fields marked with curly braces, `{}` left for insertion of snippets.
377
+
378
+ Number of replacement fields in new prompts, must be the same as in originals.
379
+ Although their position may be different.
380
+
366
381
  License
367
382
  -------
368
383
 
@@ -323,6 +323,21 @@ http GET "localhost:8080/metrics/analyze/requests?days=5" > /tmp/plot_days.svg
323
323
  http GET "localhost:8080/metrics/analyze/requests?weeks=5" > /tmp/plot_weeks.svg
324
324
  ```
325
325
 
326
+ System Prompts
327
+ --------------
328
+
329
+ Prompt templates used by Log Detective are stored in the `prompts.yml` file.
330
+ It is possible to modify the file in place, or provide your own.
331
+ In CLI you can override prompt templates location using `--prompts` option,
332
+ while in the container service deployment the `LOGDETECTIVE_PROMPTS` environment variable
333
+ is used instead.
334
+
335
+ Prompts need to have a form compatible with python [format string syntax](https://docs.python.org/3/library/string.html#format-string-syntax)
336
+ with spaces, or replacement fields marked with curly braces, `{}` left for insertion of snippets.
337
+
338
+ Number of replacement fields in new prompts, must be the same as in originals.
339
+ Although their position may be different.
340
+
326
341
  License
327
342
  -------
328
343
 
@@ -1,3 +1,8 @@
1
+ """This file contains various constants to be used as a fallback
2
+ in case other values are not specified. Prompt templates should be modified
3
+ in prompts.yaml instead.
4
+ """
5
+
1
6
  # pylint: disable=line-too-long
2
7
  DEFAULT_ADVISOR = "fedora-copr/Mistral-7B-Instruct-v0.2-GGUF"
3
8
 
@@ -19,7 +24,7 @@ Analysis:
19
24
 
20
25
  """
21
26
 
22
- SUMMARIZE_PROMPT_TEMPLATE = """
27
+ SUMMARIZATION_PROMPT_TEMPLATE = """
23
28
  Does following log contain error or issue?
24
29
 
25
30
  Log:
@@ -6,7 +6,7 @@ import drain3
6
6
  from drain3.template_miner_config import TemplateMinerConfig
7
7
  from llama_cpp import Llama, LlamaGrammar
8
8
 
9
- from logdetective.constants import SUMMARIZE_PROMPT_TEMPLATE
9
+ from logdetective.constants import SUMMARIZATION_PROMPT_TEMPLATE
10
10
  from logdetective.utils import get_chunks
11
11
 
12
12
  LOG = logging.getLogger("logdetective")
@@ -17,12 +17,18 @@ class LLMExtractor:
17
17
  A class that extracts relevant information from logs using a language model.
18
18
  """
19
19
 
20
- def __init__(self, model: Llama, n_lines: int = 2):
20
+ def __init__(
21
+ self,
22
+ model: Llama,
23
+ n_lines: int = 2,
24
+ prompt: str = SUMMARIZATION_PROMPT_TEMPLATE,
25
+ ):
21
26
  self.model = model
22
27
  self.n_lines = n_lines
23
28
  self.grammar = LlamaGrammar.from_string(
24
29
  'root ::= ("Yes" | "No")', verbose=False
25
30
  )
31
+ self.prompt = prompt
26
32
 
27
33
  def __call__(
28
34
  self, log: str, n_lines: int = 2, neighbors: bool = False
@@ -41,7 +47,7 @@ class LLMExtractor:
41
47
 
42
48
  for i in range(0, len(log_lines), self.n_lines):
43
49
  block = "\n".join(log_lines[i: i + self.n_lines])
44
- prompt = SUMMARIZE_PROMPT_TEMPLATE.format(log)
50
+ prompt = self.prompt.format(log)
45
51
  out = self.model(prompt, max_tokens=7, grammar=self.grammar)
46
52
  out = f"{out['choices'][0]['text']}\n"
47
53
  results.append((block, out))
@@ -1,6 +1,7 @@
1
1
  import argparse
2
2
  import logging
3
3
  import sys
4
+ import os
4
5
 
5
6
  from logdetective.constants import DEFAULT_ADVISOR
6
7
  from logdetective.utils import (
@@ -9,6 +10,7 @@ from logdetective.utils import (
9
10
  retrieve_log_content,
10
11
  format_snippets,
11
12
  compute_certainty,
13
+ load_prompts,
12
14
  )
13
15
  from logdetective.extractors import LLMExtractor, DrainExtractor
14
16
 
@@ -65,10 +67,16 @@ def setup_args():
65
67
  )
66
68
  parser.add_argument("-v", "--verbose", action="count", default=0)
67
69
  parser.add_argument("-q", "--quiet", action="store_true")
70
+ parser.add_argument(
71
+ "--prompts",
72
+ type=str,
73
+ default=f"{os.path.dirname(__file__)}/prompts.yml",
74
+ help="Path to prompt configuration file."
75
+ )
68
76
  return parser.parse_args()
69
77
 
70
78
 
71
- def main(): # pylint: disable=too-many-statements
79
+ def main(): # pylint: disable=too-many-statements,too-many-locals
72
80
  """Main execution function."""
73
81
  args = setup_args()
74
82
 
@@ -83,6 +91,9 @@ def main(): # pylint: disable=too-many-statements
83
91
  if args.quiet:
84
92
  log_level = 0
85
93
 
94
+ # Get prompts configuration
95
+ prompts_configuration = load_prompts(args.prompts)
96
+
86
97
  logging.basicConfig(stream=sys.stdout)
87
98
  LOG.setLevel(log_level)
88
99
 
@@ -103,7 +114,11 @@ def main(): # pylint: disable=too-many-statements
103
114
  )
104
115
  else:
105
116
  summarizer_model = initialize_model(args.summarizer, verbose=args.verbose > 2)
106
- extractor = LLMExtractor(summarizer_model, args.verbose > 1)
117
+ extractor = LLMExtractor(
118
+ summarizer_model,
119
+ args.verbose > 1,
120
+ prompts_configuration.summarization_prompt_template,
121
+ )
107
122
 
108
123
  LOG.info("Getting summary")
109
124
 
@@ -127,7 +142,12 @@ def main(): # pylint: disable=too-many-statements
127
142
  stream = True
128
143
  if args.no_stream:
129
144
  stream = False
130
- response = process_log(log_summary, model, stream)
145
+ response = process_log(
146
+ log_summary,
147
+ model,
148
+ stream,
149
+ prompt_template=prompts_configuration.prompt_template,
150
+ )
131
151
  probs = []
132
152
  print("Explanation:")
133
153
  # We need to extract top token probability from the response
@@ -0,0 +1,33 @@
1
+ from typing import Optional
2
+ from pydantic import BaseModel
3
+
4
+ from logdetective.constants import (
5
+ PROMPT_TEMPLATE,
6
+ PROMPT_TEMPLATE_STAGED,
7
+ SUMMARIZATION_PROMPT_TEMPLATE,
8
+ SNIPPET_PROMPT_TEMPLATE,
9
+ )
10
+
11
+
12
+ class PromptConfig(BaseModel):
13
+ """Configuration for basic log detective prompts."""
14
+
15
+ prompt_template: str = PROMPT_TEMPLATE
16
+ summarization_prompt_template: str = SUMMARIZATION_PROMPT_TEMPLATE
17
+ snippet_prompt_template: str = SNIPPET_PROMPT_TEMPLATE
18
+ prompt_template_staged: str = PROMPT_TEMPLATE_STAGED
19
+
20
+ def __init__(self, data: Optional[dict] = None):
21
+ super().__init__()
22
+ if data is None:
23
+ return
24
+ self.prompt_template = data.get("prompt_template", PROMPT_TEMPLATE)
25
+ self.summarization_prompt_template = data.get(
26
+ "summarization_prompt_template", SUMMARIZATION_PROMPT_TEMPLATE
27
+ )
28
+ self.snippet_prompt_template = data.get(
29
+ "snippet_prompt_template", SNIPPET_PROMPT_TEMPLATE
30
+ )
31
+ self.prompt_template_staged = data.get(
32
+ "prompt_template_staged", PROMPT_TEMPLATE_STAGED
33
+ )
@@ -0,0 +1,55 @@
1
+ # This file is intended for customization of prompts
2
+ # It is used only in server mode.
3
+ # On command line you have to load it using --prompts
4
+ # The defaults are stored in constants.py
5
+
6
+ prompt_template: |
7
+ Given following log snippets, and nothing else, explain what failure, if any, occured during build of this package.
8
+
9
+ Analysis of the snippets must be in a format of [X] : [Y], where [X] is a log snippet, and [Y] is the explanation.
10
+ Snippets themselves must not be altered in any way whatsoever.
11
+
12
+ Snippets are delimited with '================'.
13
+
14
+ Finally, drawing on information from all snippets, provide complete explanation of the issue and recommend solution.
15
+
16
+ Snippets:
17
+
18
+ {}
19
+
20
+ Analysis:
21
+
22
+
23
+ summarization_prompt_template: |
24
+ Does following log contain error or issue?
25
+
26
+ Log:
27
+
28
+ {}
29
+
30
+ Answer:
31
+
32
+
33
+ snippet_prompt_template: |
34
+ Analyse following RPM build log snippet. Describe contents accurately, without speculation or suggestions for resolution.
35
+
36
+ Snippet:
37
+
38
+ {}
39
+
40
+ Analysis:
41
+
42
+ prompt_template_staged: |
43
+ Given following log snippets, their explanation, and nothing else, explain what failure, if any, occured during build of this package.
44
+
45
+ Snippets are in a format of [X] : [Y], where [X] is a log snippet, and [Y] is the explanation.
46
+
47
+ Snippets are delimited with '================'.
48
+
49
+ Drawing on information from all snippets, provide complete explanation of the issue and recommend solution.
50
+
51
+ Snippets:
52
+
53
+ {}
54
+
55
+ Analysis:
@@ -21,19 +21,18 @@ import gitlab.v4.objects
21
21
  import jinja2
22
22
  import requests
23
23
 
24
- from logdetective.constants import (
25
- PROMPT_TEMPLATE,
26
- SNIPPET_PROMPT_TEMPLATE,
27
- PROMPT_TEMPLATE_STAGED,
28
- )
29
24
  from logdetective.extractors import DrainExtractor
30
25
  from logdetective.utils import (
31
26
  validate_url,
32
27
  compute_certainty,
33
28
  format_snippets,
29
+ load_prompts,
30
+ )
31
+ from logdetective.server.utils import (
32
+ load_server_config,
33
+ get_log,
34
34
  format_analyzed_snippets,
35
35
  )
36
- from logdetective.server.utils import load_server_config, get_log
37
36
  from logdetective.server.metric import track_request
38
37
  from logdetective.server.models import (
39
38
  BuildLog,
@@ -51,8 +50,10 @@ LLM_CPP_SERVER_TIMEOUT = os.environ.get("LLAMA_CPP_SERVER_TIMEOUT", 600)
51
50
  LOG_SOURCE_REQUEST_TIMEOUT = os.environ.get("LOG_SOURCE_REQUEST_TIMEOUT", 60)
52
51
  API_TOKEN = os.environ.get("LOGDETECTIVE_TOKEN", None)
53
52
  SERVER_CONFIG_PATH = os.environ.get("LOGDETECTIVE_SERVER_CONF", None)
53
+ SERVER_PROMPT_PATH = os.environ.get("LOGDETECTIVE_PROMPTS", None)
54
54
 
55
55
  SERVER_CONFIG = load_server_config(SERVER_CONFIG_PATH)
56
+ PROMPT_CONFIG = load_prompts(SERVER_PROMPT_PATH)
56
57
 
57
58
  MR_REGEX = re.compile(r"refs/merge-requests/(\d+)/.*$")
58
59
  FAILURE_LOG_REGEX = re.compile(r"(\w*\.log)")
@@ -298,7 +299,7 @@ async def analyze_log(build_log: BuildLog):
298
299
  log_summary = mine_logs(log_text)
299
300
  log_summary = format_snippets(log_summary)
300
301
  response = await submit_text(
301
- PROMPT_TEMPLATE.format(log_summary),
302
+ PROMPT_CONFIG.prompt_template.format(log_summary),
302
303
  api_endpoint=SERVER_CONFIG.inference.api_endpoint,
303
304
  )
304
305
  certainty = 0
@@ -338,7 +339,7 @@ async def perform_staged_analysis(log_text: str) -> StagedResponse:
338
339
  analyzed_snippets = await asyncio.gather(
339
340
  *[
340
341
  submit_text(
341
- SNIPPET_PROMPT_TEMPLATE.format(s),
342
+ PROMPT_CONFIG.snippet_prompt_template.format(s),
342
343
  api_endpoint=SERVER_CONFIG.inference.api_endpoint,
343
344
  )
344
345
  for s in log_summary
@@ -349,7 +350,7 @@ async def perform_staged_analysis(log_text: str) -> StagedResponse:
349
350
  AnalyzedSnippet(line_number=e[0][0], text=e[0][1], explanation=e[1])
350
351
  for e in zip(log_summary, analyzed_snippets)
351
352
  ]
352
- final_prompt = PROMPT_TEMPLATE_STAGED.format(
353
+ final_prompt = PROMPT_CONFIG.prompt_template_staged.format(
353
354
  format_analyzed_snippets(analyzed_snippets)
354
355
  )
355
356
 
@@ -395,7 +396,7 @@ async def analyze_log_stream(build_log: BuildLog):
395
396
  headers["Authorization"] = f"Bearer {SERVER_CONFIG.inference.api_token}"
396
397
 
397
398
  stream = await submit_text_chat_completions(
398
- PROMPT_TEMPLATE.format(log_summary), stream=True, headers=headers
399
+ PROMPT_CONFIG.prompt_template.format(log_summary), stream=True, headers=headers
399
400
  )
400
401
 
401
402
  return StreamingResponse(stream)
@@ -1,6 +1,18 @@
1
1
  import logging
2
2
  import yaml
3
- from logdetective.server.models import Config
3
+ from logdetective.constants import SNIPPET_DELIMITER
4
+ from logdetective.server.models import Config, AnalyzedSnippet
5
+
6
+
7
+ def format_analyzed_snippets(snippets: list[AnalyzedSnippet]) -> str:
8
+ """Format snippets for submission into staged prompt."""
9
+ summary = f"\n{SNIPPET_DELIMITER}\n".join(
10
+ [
11
+ f"[{e.text}] at line [{e.line_number}]: [{e.explanation.text}]"
12
+ for e in snippets
13
+ ]
14
+ )
15
+ return summary
4
16
 
5
17
 
6
18
  def load_server_config(path: str | None) -> Config:
@@ -4,10 +4,11 @@ from typing import Iterator, List, Dict, Tuple, Generator
4
4
  from urllib.parse import urlparse
5
5
  import numpy as np
6
6
  import requests
7
+ import yaml
7
8
 
8
9
  from llama_cpp import Llama, CreateCompletionResponse, CreateCompletionStreamResponse
9
- from logdetective.constants import PROMPT_TEMPLATE, SNIPPET_DELIMITER
10
- from logdetective.server.models import AnalyzedSnippet
10
+ from logdetective.models import PromptConfig
11
+
11
12
 
12
13
  LOG = logging.getLogger("logdetective")
13
14
 
@@ -110,7 +111,7 @@ def compute_certainty(probs: List[Dict]) -> float:
110
111
 
111
112
 
112
113
  def process_log(
113
- log: str, model: Llama, stream: bool
114
+ log: str, model: Llama, stream: bool, prompt_template: str
114
115
  ) -> CreateCompletionResponse | Iterator[CreateCompletionStreamResponse]:
115
116
  """Processes a given log using the provided language model and returns its summary.
116
117
 
@@ -122,7 +123,7 @@ def process_log(
122
123
  str: The summary of the given log generated by the language model.
123
124
  """
124
125
  response = model(
125
- prompt=PROMPT_TEMPLATE.format(log), stream=stream, max_tokens=0, logprobs=1
126
+ prompt=prompt_template.format(log), stream=stream, max_tokens=0, logprobs=1
126
127
  )
127
128
 
128
129
  return response
@@ -175,17 +176,6 @@ def format_snippets(snippets: list[str] | list[Tuple[int, str]]) -> str:
175
176
  return summary
176
177
 
177
178
 
178
- def format_analyzed_snippets(snippets: list[AnalyzedSnippet]) -> str:
179
- """Format snippets for submission into staged prompt."""
180
- summary = f"\n{SNIPPET_DELIMITER}\n".join(
181
- [
182
- f"[{e.text}] at line [{e.line_number}]: [{e.explanation.text}]"
183
- for e in snippets
184
- ]
185
- )
186
- return summary
187
-
188
-
189
179
  def validate_url(url: str) -> bool:
190
180
  """Validate incoming URL to be at least somewhat sensible for log files
191
181
  Only http and https protocols permitted. No result, params or query fields allowed.
@@ -199,3 +189,15 @@ def validate_url(url: str) -> bool:
199
189
  if not (result.path or result.netloc):
200
190
  return False
201
191
  return True
192
+
193
+
194
+ def load_prompts(path: str | None) -> PromptConfig:
195
+ """Load prompts from given yaml file if there is one.
196
+ Alternatively use defaults."""
197
+ if path:
198
+ try:
199
+ with open(path, "r") as file:
200
+ return PromptConfig(yaml.safe_load(file))
201
+ except FileNotFoundError:
202
+ print("Prompt configuration file not found, reverting to defaults.")
203
+ return PromptConfig()
@@ -0,0 +1,83 @@
1
+ = logdetective(1)
2
+ :doctype: manpage
3
+ :man source: logdetective 1.0
4
+ :man manual: User Commands
5
+
6
+ == NAME
7
+
8
+ logdetective - Analyze and summarize log files using LLM or Drain templates
9
+
10
+ == SYNOPSIS
11
+
12
+ *logdetective* [_OPTIONS_] *file*
13
+
14
+ == DESCRIPTION
15
+
16
+ *logdetective* is a tool that analyzes log files using either a large language
17
+ model (LLM) or the Drain log template miner. It can consume logs from a local
18
+ path or a URL, summarize them, and cluster them for easier inspection.
19
+
20
+ == POSITIONAL ARGUMENTS
21
+
22
+ *file*::
23
+ The URL or path to the log file to be analyzed.
24
+
25
+ == OPTIONS
26
+
27
+ *-h*, *--help*::
28
+ Show this help message and exit.
29
+
30
+ *-M* *MODEL*, *--model* *MODEL*::
31
+ The path or URL of the language model for analysis. As we are using LLama.cpp we want this to be in the gguf format. You can include the download link to the model here. If the model is already on your machine it will skip the download. (optional, default: "Mistral-7B-Instruct-v0.2-GGUF")
32
+
33
+ *-F* *FILENAME_SUFFIX*, *--filename_suffix* *FILENAME_SUFFIX*::
34
+ Define the suffix of the model file name to retrieve from Hugging Face. This option only applies when the model is specified by name (not a path).
35
+
36
+ *-n*, *--no-stream*::
37
+ Disable streaming output of analysis results.
38
+
39
+ *-S* *SUMMARIZER*, *--summarizer* *SUMMARIZER*::
40
+ Choose between LLM and Drain template miner as the log summarizer. You can also provide the path to an existing language model file instead of using a URL. (optional, default: "drain")
41
+
42
+ *-N* *N_LINES*, *--n_lines* *N_LINES*::
43
+ Number of lines per chunk for LLM analysis. Only applicable when `LLM` is used as the summarizer. (optional, default: 8)
44
+
45
+ *-C* *N_CLUSTERS*, *--n_clusters* *N_CLUSTERS*::
46
+ Number of clusters to use with the Drain summarizer. Ignored if `LLM` summarizer is selected. (optional, default 8)
47
+
48
+ *-v*, *--verbose*::
49
+ Enable verbose output during processing.
50
+
51
+ *-q*, *--quiet*::
52
+ Suppress non-essential output.
53
+
54
+ *--prompts* *PROMPTS*::
55
+ Path to prompt configuration file where you can customize prompts sent to `LLM`.
56
+
57
+
58
+ == EXAMPLES
59
+
60
+ Example usage:
61
+
62
+ $ logdetective https://example.com/logs.txt
63
+
64
+ Or if the log file is stored locally:
65
+
66
+ $ logdetective ./data/logs.txt
67
+
68
+ Analyze a local log file using an LLM model:
69
+
70
+ $ logdetective -M /path/to/llm-model -S LLM -N 100 /var/log/syslog
71
+
72
+ With specific models:
73
+
74
+ $ logdetective https://example.com/logs.txt --model https://huggingface.co/QuantFactory/Meta-Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct.Q5_K_S.gguf?download=true
75
+ $ logdetective https://example.com/logs.txt --model QuantFactory/Meta-Llama-3-8B-Instruct-GGUF
76
+
77
+ Cluster logs from a URL using Drain:
78
+
79
+ $ logdetective -S Drain -C 10 https://example.com/logs.txt
80
+
81
+ == SEE ALSO
82
+
83
+ https://logdetective.com
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "logdetective"
3
- version = "0.5.8"
3
+ version = "0.5.10"
4
4
  description = "Log using LLM AI to search for build/test failures and provide ideas for fixing these."
5
5
  authors = ["Jiri Podivin <jpodivin@gmail.com>"]
6
6
  license = "Apache-2.0"
@@ -8,6 +8,8 @@ readme = "README.md"
8
8
  include = [
9
9
  "logdetective/drain3.ini",
10
10
  "logdetective/server/templates/gitlab_comment.md.j2",
11
+ "logdetective/prompts.yml",
12
+ "logdetective.1.asciidoc",
11
13
  ]
12
14
  packages = [
13
15
  { include = "logdetective" }
@@ -38,10 +40,10 @@ huggingface-hub = ">0.23.2"
38
40
  # we need to support both versions
39
41
  numpy = ">=1.26.0"
40
42
  python-gitlab = ">=4.4.0"
43
+ pydantic = "^2.8.2"
44
+ pyyaml = "^6.0.1"
41
45
 
42
- pydantic = {version = "^2.8.2", optional = true }
43
46
  fastapi = {version = ">=0.111.1", optional = true }
44
- pyyaml = {version = "^6.0.1", optional = true }
45
47
  sqlalchemy = {version = "^2.0.36", optional = true }
46
48
  psycopg2-binary = {version = "^2.9.9", optional = true }
47
49
  psycopg2 = {version = "^2.9.9", optional = true }
@@ -49,8 +51,8 @@ alembic = {version = "^1.13.3", optional = true }
49
51
  matplotlib = {version = "^3.8.4", optional = true }
50
52
 
51
53
  [tool.poetry.extras]
52
- server = ["pydantic", "fastapi", "pyyaml", "sqlalchemy", "psycopg2", "alembic", "matplotlib"]
53
- server-testing = ["pydantic", "fastapi", "pyyaml", "sqlalchemy", "psycopg2-binary", "alembic", "matplotlib"]
54
+ server = ["fastapi", "sqlalchemy", "psycopg2", "alembic", "matplotlib"]
55
+ server-testing = ["fastapi", "sqlalchemy", "psycopg2-binary", "alembic", "matplotlib"]
54
56
 
55
57
  [build-system]
56
58
  requires = ["poetry-core"]
File without changes