logdetective 2.0.0__tar.gz → 2.0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. {logdetective-2.0.0 → logdetective-2.0.1}/PKG-INFO +1 -1
  2. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/extractors.py +4 -2
  3. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/server/models.py +2 -0
  4. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/server/templates/gitlab_full_comment.md.j2 +1 -1
  5. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/server/utils.py +1 -0
  6. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/utils.py +2 -2
  7. {logdetective-2.0.0 → logdetective-2.0.1}/pyproject.toml +1 -1
  8. {logdetective-2.0.0 → logdetective-2.0.1}/LICENSE +0 -0
  9. {logdetective-2.0.0 → logdetective-2.0.1}/README.md +0 -0
  10. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/__init__.py +0 -0
  11. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/constants.py +0 -0
  12. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/drain3.ini +0 -0
  13. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/logdetective.py +0 -0
  14. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/models.py +0 -0
  15. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/prompts-summary-first.yml +0 -0
  16. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/prompts-summary-only.yml +0 -0
  17. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/prompts.yml +0 -0
  18. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/remote_log.py +0 -0
  19. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/server/__init__.py +0 -0
  20. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/server/compressors.py +0 -0
  21. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/server/config.py +0 -0
  22. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/server/database/__init__.py +0 -0
  23. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/server/database/base.py +0 -0
  24. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/server/database/models/__init__.py +0 -0
  25. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/server/database/models/exceptions.py +0 -0
  26. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/server/database/models/koji.py +0 -0
  27. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/server/database/models/merge_request_jobs.py +0 -0
  28. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/server/database/models/metrics.py +0 -0
  29. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/server/emoji.py +0 -0
  30. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/server/exceptions.py +0 -0
  31. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/server/gitlab.py +0 -0
  32. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/server/koji.py +0 -0
  33. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/server/llm.py +0 -0
  34. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/server/metric.py +0 -0
  35. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/server/plot.py +0 -0
  36. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/server/server.py +0 -0
  37. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/server/templates/gitlab_short_comment.md.j2 +0 -0
  38. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective/skip_snippets.yml +0 -0
  39. {logdetective-2.0.0 → logdetective-2.0.1}/logdetective.1.asciidoc +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: logdetective
3
- Version: 2.0.0
3
+ Version: 2.0.1
4
4
  Summary: Log using LLM AI to search for build/test failures and provide ideas for fixing these.
5
5
  License: Apache-2.0
6
6
  Author: Jiri Podivin
@@ -20,7 +20,8 @@ class DrainExtractor:
20
20
  context: bool = False,
21
21
  max_clusters=8,
22
22
  skip_snippets: SkipSnippets = SkipSnippets({}),
23
- ):
23
+ max_snippet_len: int = 2000
24
+ ): # pylint: disable=R0913,R0917
24
25
  config = TemplateMinerConfig()
25
26
  config.load(f"{os.path.dirname(__file__)}/drain3.ini")
26
27
  config.profiling_enabled = verbose
@@ -29,11 +30,12 @@ class DrainExtractor:
29
30
  self.verbose = verbose
30
31
  self.context = context
31
32
  self.skip_snippets = skip_snippets
33
+ self.max_snippet_len = max_snippet_len
32
34
 
33
35
  def __call__(self, log: str) -> list[Tuple[int, str]]:
34
36
  out = []
35
37
  # Create chunks
36
- chunks = list(get_chunks(log))
38
+ chunks = list(get_chunks(log, self.max_snippet_len))
37
39
  # Keep only chunks that don't match any of the excluded patterns
38
40
  chunks = [
39
41
  (_, chunk)
@@ -247,6 +247,7 @@ class ExtractorConfig(BaseModel):
247
247
  context: bool = True
248
248
  max_clusters: int = 8
249
249
  verbose: bool = False
250
+ max_snippet_len: int = 2000
250
251
 
251
252
  def __init__(self, data: Optional[dict] = None):
252
253
  super().__init__()
@@ -256,6 +257,7 @@ class ExtractorConfig(BaseModel):
256
257
  self.context = data.get("context", True)
257
258
  self.max_clusters = data.get("max_clusters", 8)
258
259
  self.verbose = data.get("verbose", False)
260
+ self.max_snippet_len = data.get("max_snippet_len", 2000)
259
261
 
260
262
 
261
263
  class GitLabInstanceConfig(BaseModel): # pylint: disable=too-many-instance-attributes
@@ -12,7 +12,7 @@ In this case, we are {{ "%.2f" | format(certainty) }}% certain of the response {
12
12
  {% for snippet in snippets %}
13
13
  <li>
14
14
  <b>Line {{ snippet.line_number }}:</b> <code>{{ snippet.text }}</code>
15
- {{ snippet.explanation }}
15
+ {{ snippet.explanation.text }}
16
16
  </li>
17
17
  {% endfor %}
18
18
  </ul>
@@ -29,6 +29,7 @@ def mine_logs(log: str) -> List[Tuple[int, str]]:
29
29
  context=True,
30
30
  max_clusters=SERVER_CONFIG.extractor.max_clusters,
31
31
  skip_snippets=SKIP_SNIPPETS_CONFIG,
32
+ max_snippet_len=SERVER_CONFIG.extractor.max_snippet_len
32
33
  )
33
34
 
34
35
  LOG.info("Getting summary")
@@ -39,7 +39,7 @@ def chunk_continues(text: str, index: int) -> bool:
39
39
  return False
40
40
 
41
41
 
42
- def get_chunks(text: str) -> Generator[Tuple[int, str], None, None]:
42
+ def get_chunks(text: str, max_len: int = 2000) -> Generator[Tuple[int, str], None, None]:
43
43
  """Split log into chunks according to heuristic
44
44
  based on whitespace and backslash presence.
45
45
  """
@@ -54,7 +54,7 @@ def get_chunks(text: str) -> Generator[Tuple[int, str], None, None]:
54
54
  chunk += text[i]
55
55
  if text[i] == "\n":
56
56
  next_line_number += 1
57
- if i + 1 < text_len and chunk_continues(text, i):
57
+ if i + 1 < text_len and chunk_continues(text, i) and i + 1 < max_len:
58
58
  i += 1
59
59
  continue
60
60
  yield (original_line_number, chunk)
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "logdetective"
3
- version = "2.0.0"
3
+ version = "2.0.1"
4
4
  description = "Log using LLM AI to search for build/test failures and provide ideas for fixing these."
5
5
  authors = ["Jiri Podivin <jpodivin@gmail.com>"]
6
6
  license = "Apache-2.0"
File without changes
File without changes