logdetective 2.13.0__py3-none-any.whl → 3.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5,12 +5,8 @@ from enum import Enum
5
5
  from collections import defaultdict
6
6
  from contextlib import asynccontextmanager
7
7
  from typing import Annotated
8
- from io import BytesIO
9
8
 
10
9
  from aiolimiter import AsyncLimiter
11
- import matplotlib
12
- import matplotlib.figure
13
- import matplotlib.pyplot
14
10
  from koji import ClientSession
15
11
  from gitlab import Gitlab
16
12
  from fastapi import (
@@ -50,7 +46,14 @@ from logdetective.server.llm import (
50
46
  perform_analysis_stream,
51
47
  )
52
48
  from logdetective.server.gitlab import process_gitlab_job_event
53
- from logdetective.server.metric import track_request, add_new_metrics, update_metrics
49
+ from logdetective.server.metric import (
50
+ track_request,
51
+ add_new_metrics,
52
+ update_metrics,
53
+ requests_per_time,
54
+ average_time_per_responses,
55
+ emojis_per_time
56
+ )
54
57
  from logdetective.server.models import (
55
58
  BuildLog,
56
59
  Config,
@@ -62,8 +65,8 @@ from logdetective.server.models import (
62
65
  StagedResponse,
63
66
  TimePeriod,
64
67
  ExtractorConfig,
68
+ MetricResponse,
65
69
  )
66
- from logdetective.server import plot as plot_engine
67
70
  from logdetective.server.database.models import (
68
71
  EndpointType,
69
72
  Forge,
@@ -730,38 +733,6 @@ async def schedule_emoji_collection_for_mr(
730
733
  del emoji_lookup[key]
731
734
 
732
735
 
733
- def _svg_figure_response(fig: matplotlib.figure.Figure):
734
- """Create a response with the given svg figure."""
735
- buf = BytesIO()
736
- fig.savefig(buf, format="svg", bbox_inches="tight")
737
- matplotlib.pyplot.close(fig)
738
-
739
- buf.seek(0)
740
- return StreamingResponse(
741
- buf,
742
- media_type="image/svg+xml",
743
- headers={"Content-Disposition": "inline; filename=plot.svg"},
744
- )
745
-
746
-
747
- def _multiple_svg_figures_response(figures: list[matplotlib.figure.Figure]):
748
- """Create a response with multiple svg figures."""
749
- svg_contents = []
750
- for i, fig in enumerate(figures):
751
- buf = BytesIO()
752
- fig.savefig(buf, format="svg", bbox_inches="tight")
753
- matplotlib.pyplot.close(fig)
754
- buf.seek(0)
755
- svg_contents.append(buf.read().decode("utf-8"))
756
-
757
- html_content = "<html><body>\n"
758
- for i, svg in enumerate(svg_contents):
759
- html_content += f"<div id='figure-{i}'>\n{svg}\n</div>\n"
760
- html_content += "</body></html>"
761
-
762
- return BasicResponse(content=html_content, media_type="text/html")
763
-
764
-
765
736
  class MetricRoute(str, Enum):
766
737
  """Routes for metrics"""
767
738
 
@@ -770,13 +741,13 @@ class MetricRoute(str, Enum):
770
741
  ANALYZE_GITLAB_JOB = "analyze-gitlab"
771
742
 
772
743
 
773
- class Plot(str, Enum):
774
- """Type of served plots"""
744
+ class MetricType(str, Enum):
745
+ """Type of metric retrieved"""
775
746
 
776
747
  REQUESTS = "requests"
777
748
  RESPONSES = "responses"
778
749
  EMOJIS = "emojis"
779
- BOTH = ""
750
+ ALL = "all"
780
751
 
781
752
 
782
753
  ROUTE_TO_ENDPOINT_TYPES = {
@@ -786,58 +757,57 @@ ROUTE_TO_ENDPOINT_TYPES = {
786
757
  }
787
758
 
788
759
 
789
- @app.get("/metrics/{route}/", response_class=StreamingResponse)
790
- @app.get("/metrics/{route}/{plot}", response_class=StreamingResponse)
760
+ @app.get("/metrics/{route}/", response_model=MetricResponse)
761
+ @app.get("/metrics/{route}/{metric_type}", response_model=MetricResponse)
791
762
  async def get_metrics(
792
763
  route: MetricRoute,
793
- plot: Plot = Plot.BOTH,
764
+ metric_type: MetricType = MetricType.ALL,
794
765
  period_since_now: TimePeriod = Depends(TimePeriod),
795
766
  ):
796
- """Get an handler for visualize statistics for the specified endpoint and plot."""
767
+ """Get an handler returning statistics for the specified endpoint and metric_type."""
797
768
  endpoint_type = ROUTE_TO_ENDPOINT_TYPES[route]
798
769
 
799
- async def handler():
800
- """Show statistics for the specified endpoint and plot."""
801
- if plot == Plot.REQUESTS:
802
- fig = await plot_engine.requests_per_time(period_since_now, endpoint_type)
803
- return _svg_figure_response(fig)
804
- if plot == Plot.RESPONSES:
805
- fig = await plot_engine.average_time_per_responses(
770
+ async def handler() -> MetricResponse:
771
+ """Return statistics for the specified endpoint and metric type."""
772
+ statistics = []
773
+ if metric_type == MetricType.ALL:
774
+ statistics.append(await requests_per_time(
806
775
  period_since_now, endpoint_type
807
- )
808
- return _svg_figure_response(fig)
809
- if plot == Plot.EMOJIS:
810
- fig = await plot_engine.emojis_per_time(period_since_now)
811
- return _svg_figure_response(fig)
812
- # BOTH
813
- fig_requests = await plot_engine.requests_per_time(
814
- period_since_now, endpoint_type
815
- )
816
- fig_responses = await plot_engine.average_time_per_responses(
817
- period_since_now, endpoint_type
818
- )
819
- fig_emojis = await plot_engine.emojis_per_time(period_since_now)
820
- return _multiple_svg_figures_response([fig_requests, fig_responses, fig_emojis])
776
+ ))
777
+ statistics.append(await average_time_per_responses(
778
+ period_since_now, endpoint_type
779
+ ))
780
+ statistics.extend(await emojis_per_time(period_since_now))
781
+ return MetricResponse(time_series=statistics)
782
+ if metric_type == MetricType.REQUESTS:
783
+ statistics.append(await requests_per_time(period_since_now, endpoint_type))
784
+ elif metric_type == MetricType.RESPONSES:
785
+ statistics.append(await average_time_per_responses(
786
+ period_since_now, endpoint_type
787
+ ))
788
+ elif metric_type == MetricType.EMOJIS:
789
+ statistics = await emojis_per_time(period_since_now)
790
+ return MetricResponse(time_series=statistics)
821
791
 
822
792
  descriptions = {
823
- Plot.REQUESTS: (
824
- "Show statistics for the requests received in the given period of time "
793
+ MetricType.REQUESTS: (
794
+ "Get statistics for the requests received in the given period of time "
825
795
  f"for the /{endpoint_type.value} API endpoint."
826
796
  ),
827
- Plot.RESPONSES: (
828
- "Show statistics for responses given in the specified period of time "
797
+ MetricType.RESPONSES: (
798
+ "Get statistics for responses given in the specified period of time "
829
799
  f"for the /{endpoint_type.value} API endpoint."
830
800
  ),
831
- Plot.EMOJIS: (
832
- "Show statistics for emoji feedback in the specified period of time "
801
+ MetricType.EMOJIS: (
802
+ "Get statistics for emoji feedback in the specified period of time "
833
803
  f"for the /{endpoint_type.value} API endpoint."
834
804
  ),
835
- Plot.BOTH: (
836
- "Show statistics for requests and responses in the given period of time "
805
+ MetricType.ALL: (
806
+ "Get statistics for requests and responses in the given period of time "
837
807
  f"for the /{endpoint_type.value} API endpoint."
838
808
  ),
839
809
  }
840
- handler.__doc__ = descriptions[plot]
810
+ handler.__doc__ = descriptions[metric_type]
841
811
 
842
812
  return await handler()
843
813
 
logdetective/utils.py CHANGED
@@ -1,10 +1,11 @@
1
1
  import logging
2
2
  import os
3
3
  import subprocess as sp
4
- from typing import Iterator, List, Dict, Tuple, Generator
4
+ from typing import Iterator, List, Dict, Tuple, Generator, Optional
5
5
  from urllib.parse import urlparse
6
6
 
7
7
  import aiohttp
8
+ from jinja2 import exceptions
8
9
  import numpy as np
9
10
  import yaml
10
11
 
@@ -15,6 +16,7 @@ from llama_cpp import (
15
16
  )
16
17
  from logdetective.constants import SNIPPET_DELIMITER
17
18
  from logdetective.models import PromptConfig, SkipSnippets
19
+ from logdetective.prompts import PromptManager
18
20
  from logdetective.remote_log import RemoteLog
19
21
 
20
22
  LOG = logging.getLogger("logdetective")
@@ -127,7 +129,11 @@ def compute_certainty(probs: List[Dict]) -> float:
127
129
 
128
130
 
129
131
  def process_log(
130
- log: str, model: Llama, stream: bool, prompt_templates: PromptConfig, temperature: float
132
+ log: str,
133
+ model: Llama,
134
+ stream: bool,
135
+ prompt_templates: PromptConfig | PromptManager,
136
+ temperature: float,
131
137
  ) -> CreateChatCompletionResponse | Iterator[CreateChatCompletionStreamResponse]:
132
138
  """Processes a given log using the provided language model and returns its summary.
133
139
 
@@ -135,20 +141,14 @@ def process_log(
135
141
  log (str): The input log to be processed.
136
142
  model (Llama): The language model used for processing the log.
137
143
  stream (bool): Return output as Iterator.
138
- prompt_template (str): Which prompt template to use.
144
+ prompt_templates (PromptConfig | PromptManager): Prompt templates to use with LLM.
139
145
  temperature (float): Temperature parameter for model runtime.
140
146
  Returns:
141
147
  str: The summary of the given log generated by the language model.
142
148
  """
143
149
  messages = [
144
- {
145
- "role": "system",
146
- "content": prompt_templates.default_system_prompt
147
- },
148
- {
149
- "role": "user",
150
- "content": prompt_templates.prompt_template.format(log)
151
- },
150
+ {"role": "system", "content": prompt_templates.default_system_prompt},
151
+ {"role": "user", "content": prompt_templates.prompt_template.format(log)},
152
152
  ]
153
153
 
154
154
  response = model.create_chat_completion(
@@ -200,26 +200,35 @@ def format_snippets(snippets: list[str] | list[Tuple[int, str]]) -> str:
200
200
  else:
201
201
  header = f"Snippet No. {i}:"
202
202
  snippet_content = s
203
- summary += (
204
- f"{header}\n"
205
- "\n"
206
- f"{snippet_content}\n"
207
- f"{SNIPPET_DELIMITER}\n"
208
- f"\n"
209
- )
203
+ summary += f"{header}\n\n{snippet_content}\n{SNIPPET_DELIMITER}\n\n"
210
204
  return summary
211
205
 
212
206
 
213
- def load_prompts(path: str | None) -> PromptConfig:
214
- """Load prompts from given yaml file if there is one.
215
- Alternatively use defaults."""
216
- if path:
207
+ def load_prompts(
208
+ config_path: Optional[str] = None, template_path: Optional[str] = None
209
+ ) -> PromptConfig | PromptManager:
210
+ """Load prompts from yaml file, and optionally initialize `PromptManager`
211
+ if provided with path to prompt templates.
212
+ """
213
+ configuration = PromptConfig()
214
+ if config_path:
217
215
  try:
218
- with open(path, "r") as file:
219
- return PromptConfig(yaml.safe_load(file))
216
+ with open(config_path, "r") as file:
217
+ configuration = PromptConfig(**yaml.safe_load(file))
220
218
  except FileNotFoundError:
221
- print("Prompt configuration file not found, reverting to defaults.")
222
- return PromptConfig()
219
+ LOG.error(
220
+ "Prompt configuration file not found, reverting to defaults.",
221
+ exc_info=True,
222
+ )
223
+ if template_path:
224
+ try:
225
+ return PromptManager(template_path, configuration)
226
+ except exceptions.TemplateError:
227
+ LOG.error(
228
+ "Prompt templates couldn't be rendered, reverting to defaults.",
229
+ exc_info=True,
230
+ )
231
+ return configuration
223
232
 
224
233
 
225
234
  def prompt_to_messages(
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: logdetective
3
- Version: 2.13.0
4
- Summary: Log using LLM AI to search for build/test failures and provide ideas for fixing these.
3
+ Version: 3.1.0
4
+ Summary: Analyze logs with a template miner and an LLM to discover errors and suggest solutions.
5
5
  License: Apache-2.0
6
6
  License-File: LICENSE
7
7
  Author: Jiri Podivin
@@ -36,7 +36,6 @@ Requires-Dist: flexmock (>=0.12.2,<0.13.0) ; extra == "testing"
36
36
  Requires-Dist: huggingface-hub (>=0.23.0,<1.4.0)
37
37
  Requires-Dist: koji (>=1.35.0,<2.0.0) ; extra == "server" or extra == "server-testing"
38
38
  Requires-Dist: llama-cpp-python (>0.2.56,!=0.2.86,<1.0.0)
39
- Requires-Dist: matplotlib (>=3.8.4,<4.0.0) ; extra == "server" or extra == "server-testing"
40
39
  Requires-Dist: numpy (>=1.26.0)
41
40
  Requires-Dist: openai (>=1.82.1,<2.0.0) ; extra == "server" or extra == "server-testing"
42
41
  Requires-Dist: pydantic (>=2.8.2,<3.0.0)
@@ -97,12 +96,15 @@ Usage
97
96
 
98
97
  To analyze a log file, run the script with the following command line arguments:
99
98
  - `file` (required): The path or URL of the log file to be analyzed.
100
- - `--model` (optional, default: "Mistral-7B-Instruct-v0.3-GGUF"): The path or Hugging space name of the language model for analysis. For models from Hugging Face, write them as `namespace/repo_name`. As we are using LLama.cpp we want this to be in the `gguf` format. If the model is already on your machine it will skip the download.
101
- - `--filename_suffix` (optional, default "Q4_K.gguf"): You can specify which suffix of the file to use. This option is applied when specifying model using the Hugging Face repository.
102
- - `--summarizer` DISABLED: LLM summarization option was removed. Argument is kept for backward compatibility only.(optional, default: "drain"): Choose between LLM and Drain template miner as the log summarizer. You can also provide the path to an existing language model file instead of using a URL.
103
- - `--n_lines` DISABLED: LLM summarization option was removed. Argument is kept for backward compatibility only. (optional, default: 8): The number of lines per chunk for LLM analysis. This only makes sense when you are summarizing with LLM.
104
- - `--n_clusters` (optional, default 8): Number of clusters for Drain to organize log chunks into. This only makes sense when you are summarizing with Drain.
105
- - `--skip_snippets` Path to patterns for skipping snippets (in YAML).
99
+ - `--model` (optional, default: "granite-3.2-8b-instruct-GGUF"): The path or Hugging space name of the language model for analysis. For models from Hugging Face, write them as `namespace/repo_name`. As we are using LLama.cpp we want this to be in the `gguf` format. If the model is already on your machine it will skip the download.
100
+ - `--filename-suffix` (optional, default "Q4_K.gguf"): You can specify which suffix of the file to use. This option is applied when specifying model using the Hugging Face repository.
101
+ - `--n-clusters` (optional, default 8): Number of clusters for Drain to organize log chunks into. This only makes sense when you are summarizing with Drain.
102
+ - `--prompts PROMPTS` (Deprecated, replaced by `--prompts-config`) Path to prompt configuration file.
103
+ - `--prompts-config PROMPTS` Path to prompt configuration file.
104
+ - `--prompt-templates` Path to prompt template dir. Prompts must be valid Jinja templates, and system prompts must include field `system_time`.
105
+ - `--temperature` Temperature for inference.
106
+ - `--skip-snippets` Path to patterns for skipping snippets.
107
+ - `--csgrep` Use csgrep to process the log.
106
108
 
107
109
  Example usage:
108
110
 
@@ -112,21 +114,27 @@ Or if the log file is stored locally:
112
114
 
113
115
  logdetective ./data/logs.txt
114
116
 
115
- Examples of using different models. Note the use of `--filename_suffix` (or `-F`) option, useful for models that were quantized:
117
+ Examples of using different models. Note the use of `--filename-suffix` (or `-F`) option, useful for models that were quantized:
116
118
 
117
- logdetective https://example.com/logs.txt --model QuantFactory/Meta-Llama-3-8B-Instruct-GGUF --filename_suffix Q5_K_S.gguf
119
+ logdetective https://example.com/logs.txt --model QuantFactory/Meta-Llama-3-8B-Instruct-GGUF --filename-suffix Q5_K_S.gguf
118
120
  logdetective https://kojipkgs.fedoraproject.org//work/tasks/3367/131313367/build.log --model 'fedora-copr/granite-3.2-8b-instruct-GGUF' -F Q4_K_M.gguf
119
121
 
120
122
  Example of altered prompts:
121
123
 
122
- cp ~/.local/lib/python3.13/site-packages/logdetective/prompts.yml ~/my-prompts.yml
123
- vi ~/my-prompts.yml # edit the prompts there to better fit your needs
124
- logdetective https://kojipkgs.fedoraproject.org//work/tasks/3367/131313367/build.log --prompts ~/my-prompts.yml
124
+ cp -r ~/.local/lib/python3.13/site-packages/logdetective/prompts ~/my-prompts
125
+ vi ~/my-prompts/system_prompt.j2 # edit the system prompt there to better fit your needs
126
+ logdetective https://kojipkgs.fedoraproject.org//work/tasks/3367/131313367/build.log --prompt-templates ~/my-prompts
127
+
128
+ Example of altered prompts (Deprecated):
129
+
130
+ cp ~/.local/lib/python3.13/site-packages/logdetective/prompts.yml ~/my-prompts.yml
131
+ vi ~/my-prompts.yml # edit the prompts there to better fit your needs
132
+ logdetective https://kojipkgs.fedoraproject.org//work/tasks/3367/131313367/build.log --prompts ~/my-prompts.yml
125
133
 
126
134
 
127
135
  Note that streaming with some models (notably Meta-Llama-3) is broken and can be worked around by `no-stream` option:
128
136
 
129
- logdetective https://example.com/logs.txt --model QuantFactory/Meta-Llama-3-8B-Instruct-GGUF --filename_suffix Q5_K_M.gguf --no-stream
137
+ logdetective https://example.com/logs.txt --model QuantFactory/Meta-Llama-3-8B-Instruct-GGUF --filename-suffix Q5_K_M.gguf --no-stream
130
138
 
131
139
  Choice of LLM
132
140
  -------------
@@ -205,7 +213,8 @@ message is reported indicating that the 'check' phase of the rpm build process
205
213
  failed with a bad exit status.
206
214
  ```
207
215
 
208
- It looks like a wall of text. Similar to any log. The main difference is that here we have the most significant lines of a logfile wrapped in `[ ] : ` and followed by textual explanation of the log text done by mistral 7b.
216
+ It looks like a wall of text. Similar to any log.
217
+ The main difference is that here we have the most significant lines of a logfile wrapped in `[ ] : ` and followed by textual explanation of the log text done by local LLM.
209
218
 
210
219
 
211
220
  Contributing
@@ -373,14 +382,14 @@ Before doing `podman-compose up`, make sure to set `MODELS_PATH` environment var
373
382
  ```
374
383
  $ export MODELS_PATH=/path/to/models/
375
384
  $ ll $MODELS_PATH
376
- -rw-r--r--. 1 tt tt 3.9G apr 10 17:18 mistral-7b-instruct-v0.2.Q4_K_S.gguf
385
+ -rw-r--r--. 1 tt tt 3.9G apr 10 17:18 granite-4.0-h-tiny-Q8_0.gguf
377
386
  ```
378
387
 
379
388
  If the variable is not set, `./models` is mounted inside by default.
380
389
 
381
390
  Model can be downloaded from [our Hugging Space](https://huggingface.co/fedora-copr) by:
382
391
  ```
383
- $ curl -L -o models/mistral-7b-instruct-v0.3.Q4_K.gguf https://huggingface.co/fedora-copr/Mistral-7B-Instruct-v0.3-GGUF/resolve/main/ggml-model-Q4_K.gguf
392
+ $ curl -L -o models/granite-3.2-8b-instruct-v0.3.Q4_K.gguf https://huggingface.co/fedora-copr/granite-3.2-8b-instruct-GGUF/resolve/main/ggml-model-Q4_K.gguf
384
393
  ```
385
394
 
386
395
  Filtering snippet analysis by relevance
@@ -500,17 +509,38 @@ http GET "localhost:8080/metrics/analyze/requests?weeks=5" > /tmp/plot_weeks.svg
500
509
  System Prompts
501
510
  --------------
502
511
 
503
- Prompt templates used by Log Detective are stored in the `prompts.yml` file.
512
+ Prompts are defined as Jinja templates and placed in location specified by `--prompt-templates` option of the CLI utility, or `LOGDETECTIVE_PROMPT_TEMPLATES` environment variable of the container service. With further, optional, configuration in the `prompts.yml` configuration file.
513
+
514
+ All system prompt templates must include place for `system_time` variable.
515
+
516
+ If `references` list is defined in `prompts.yml`, templates must also include a handling for a list of references.
517
+
518
+ Example:
519
+
520
+ ```jinja
521
+ {% if references %}
522
+ ## References:
523
+
524
+ {% for reference in references %}
525
+ * {{ reference.name }} : {{ reference.link }}
526
+ {% endfor %}
527
+ {% endif %}
528
+
529
+ ```
530
+
531
+ *Deprecated:*
532
+
533
+ *Prompt templates used by Log Detective are stored in the `prompts.yml` file.
504
534
  It is possible to modify the file in place, or provide your own.
505
535
  In CLI you can override prompt templates location using `--prompts` option,
506
536
  while in the container service deployment the `LOGDETECTIVE_PROMPTS` environment variable
507
- is used instead.
537
+ is used instead.*
508
538
 
509
- Prompts need to have a form compatible with python [format string syntax](https://docs.python.org/3/library/string.html#format-string-syntax)
510
- with spaces, or replacement fields marked with curly braces, `{}` left for insertion of snippets.
539
+ *Prompts need to have a form compatible with python [format string syntax](https://docs.python.org/3/library/string.html#format-string-syntax)
540
+ with spaces, or replacement fields marked with curly braces, `{}` left for insertion of snippets.*
511
541
 
512
- Number of replacement fields in new prompts, must be the same as in originals.
513
- Although their position may be different.
542
+ *Number of replacement fields in new prompts, must be the same as in originals.
543
+ Although their position may be different.*
514
544
 
515
545
 
516
546
  Skip Snippets
@@ -1,40 +1,46 @@
1
1
  logdetective/__init__.py,sha256=VqRngDcuFT7JWms8Qc_MsOvajoXVOKPr-S1kqY3Pqhc,59
2
- logdetective/constants.py,sha256=aCwrkBrDdS_kbNESK-Z-ewg--DSzodV2OMgwEq3UE38,2456
2
+ logdetective/constants.py,sha256=GKTHO77MrJQS5W97nnWY6AZqxcMHxGJDx5y97cpnymk,2455
3
3
  logdetective/drain3.ini,sha256=ni91eCT1TwTznZwcqWoOVMQcGEnWhEDNCoTPF7cfGfY,1360
4
4
  logdetective/extractors.py,sha256=vT-je4NkDgSj9rRtSeLpqBU52gIUnnVgJPHFbVihpCw,5993
5
- logdetective/logdetective.py,sha256=S0abGrAQH2oi0MRisCV64Sa1UXdQLIfXFBA4tYAYqhM,6896
6
- logdetective/models.py,sha256=uczmQtWFgSp_ZGssngdTM4qzPF1o64dCy0469GoSbjQ,2937
5
+ logdetective/logdetective.py,sha256=um7rHKqwLGYdYLs46koRDADvYVYL2Q9-B9Y7veVHYO8,6563
6
+ logdetective/models.py,sha256=UV5eTo8mVMkJuY2W869iBZv-ZfMIXh4_jLhOf-dhdyQ,2204
7
+ logdetective/prompts/message_template.j2,sha256=yuniCNUtYvrAoQChOXI6-CV8XDeV8cX3EJmOn5hWbVw,13
8
+ logdetective/prompts/snippet_message_template.j2,sha256=YRKZ_rdK1xqN4i66dlACStgP_gFroC7k_EGmQgzFEU0,12
9
+ logdetective/prompts/snippet_system_prompt.j2,sha256=zaQk6pYvf7oY2KZJihg2gILYY1Xq7k2ifIcibigl-UE,1259
10
+ logdetective/prompts/staged_message_template.j2,sha256=yuniCNUtYvrAoQChOXI6-CV8XDeV8cX3EJmOn5hWbVw,13
11
+ logdetective/prompts/staged_system_prompt.j2,sha256=kiTTspTCMinRBcO9broks2RDwJqRSfJ7xHqoNOoKUUo,1808
12
+ logdetective/prompts/system_prompt.j2,sha256=mOf3yGG830sqHO_JWBUtq5TMITUVDgwbW5XihbgNRM8,2330
7
13
  logdetective/prompts-summary-first.yml,sha256=kmyMFQmqFXpojkz7p3CyCWCPxMpFLpfDdMGisB4YwL0,808
8
14
  logdetective/prompts-summary-only.yml,sha256=8U9AMJV8ePW-0CoXOXlQoO92DAJDeutIT8ntSkkm6W0,470
9
- logdetective/prompts.yml,sha256=i3z6Jcb4ScVi7LsxOpDlKiXrcvql3qO_JnLzkAKMn1c,3870
15
+ logdetective/prompts.py,sha256=H-cIyXMVxtiL_v3GBCKMaeBzThwOK6MejRxka3-lNqE,3062
16
+ logdetective/prompts.yml,sha256=dFvWlvzf9HAC_VGyEjUTzBWSQ6FojNa5jkJ9KHpuwqs,4171
10
17
  logdetective/remote_log.py,sha256=28QvdQiy7RBnd86EKCq_A75P21gSNlCbgxJe5XAe9MA,2258
11
18
  logdetective/server/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
19
  logdetective/server/compressors.py,sha256=y4aFYJ_9CbYdKuAI39Kc9GQSdPN8cSJ2c_VAz3T47EE,5249
13
- logdetective/server/config.py,sha256=dYoqvexnMo8LBXhXezMIEqUwzTsRD-eWvRIFIYNv388,2540
20
+ logdetective/server/config.py,sha256=Q1y4Ta1UUILLfK7iZ6H1Tf599Xa5gwtY3XF-4DoMLSs,2645
14
21
  logdetective/server/database/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
22
  logdetective/server/database/base.py,sha256=bqMkhL2D96i_QiSnO5u1FqxYuJJu0m0wXLkqj_A9WBs,2093
16
23
  logdetective/server/database/models/__init__.py,sha256=zoZMCt1_7tewDa6eEIIX_xrdN-tLegSiPNg5NiYaV3o,850
17
24
  logdetective/server/database/models/exceptions.py,sha256=4ED7FSSA1liV9-7VIN2BwUiz6XlmP97Y1loKnsoNdD8,507
18
25
  logdetective/server/database/models/koji.py,sha256=HNWxHYDxf4JN9K2ue8-V8dH-0XY5ZmxqH7Y9lAIbILA,6436
19
26
  logdetective/server/database/models/merge_request_jobs.py,sha256=MxiAVKQIsQMbFylBsmYBmVXYvid-4_5mwwXLfWdp6_w,19965
20
- logdetective/server/database/models/metrics.py,sha256=4xsUdbtlp5PI1-iJQc5Dd8EPDgVVplD9hJRWeRDn43k,15443
27
+ logdetective/server/database/models/metrics.py,sha256=XpiGrZJ-SuHfePBOeek_WiV-i0p1wjoCBTekSMiZZM0,15559
21
28
  logdetective/server/emoji.py,sha256=zSaYtLpSkpRCXpjMWnHR1bYwkmobMJASZ7YNalrd85U,5274
22
29
  logdetective/server/exceptions.py,sha256=WN715KLL3ya6FiZ95v70VSbNuVhGuHFzxm2OeEPWQCw,981
23
30
  logdetective/server/gitlab.py,sha256=X9JSotUUlG9bOWYbUNKt9KqLUAj6Uocd2KNpfn35ccU,17192
24
31
  logdetective/server/koji.py,sha256=LG1pRiKUFvYFRKzgQoUG3pUHfcEwMoaMNjUSMKw_pBA,5640
25
32
  logdetective/server/llm.py,sha256=wHMxRbAjI0q3osR5mRDR1kqww_6Pkc7JpF1mh9e6Mg8,10855
26
- logdetective/server/metric.py,sha256=wLOpgcAch3rwhPA5P2YWUeMNAPsvRGseRjH5HlTb7JM,4529
27
- logdetective/server/models.py,sha256=iJ-5UgScKKSRL8fRCsM23Z34P3p98LaduwWO-q9rudo,13041
28
- logdetective/server/plot.py,sha256=8LERgY3vQckaHZV2PZfOrZT8CjCAiji57QCmRW24Rfo,14697
29
- logdetective/server/server.py,sha256=AM10P72tc_7N0GhH_N7msFhLr7ZGNgIfgTxt2sjasVE,30982
33
+ logdetective/server/metric.py,sha256=8ZhJNbl3eSzZiY0344YXMxLk_MkgjgZB6NcZsPozkkk,11317
34
+ logdetective/server/models.py,sha256=edAHzJoxMh-8v-JzSwHNS5FoV-v1PlmLI-3ZwxfBnf4,13303
35
+ logdetective/server/server.py,sha256=lCIctjXjkaOzto5H_qadYB6RLxAbbHvFOOwYdE_sIgY,29981
30
36
  logdetective/server/templates/base_response.html.j2,sha256=BJGGV_Xb0Lnue8kq32oG9lI5CQDf9vce7HMYsP-Pvb4,2040
31
37
  logdetective/server/templates/gitlab_full_comment.md.j2,sha256=4UujUzl3lmdbNEADsxn3HVrjfUiUu2FvUlp9MDFGXQI,2321
32
38
  logdetective/server/templates/gitlab_short_comment.md.j2,sha256=2krnMlGqqju2V_6pE0UqUR1P674OFaeX5BMyY5htTOQ,2022
33
39
  logdetective/server/utils.py,sha256=0BZ8WmzXNEtkUty1kOyFbBxDZWL0Icc8BUrxuHw9uvs,4015
34
40
  logdetective/skip_snippets.yml,sha256=reGlhPPCo06nNUJWiC2LY-OJOoPdcyOB7QBTSMeh0eg,487
35
- logdetective/utils.py,sha256=yalhySOF_Gzmqx_Ft9qad3TplAfZ6LOmauGXEJfKWiE,9803
36
- logdetective-2.13.0.dist-info/METADATA,sha256=uwiSy7i6qIvLUxz-J5hCqzlLWWqmdAsi0IIvrGgQmMs,23302
37
- logdetective-2.13.0.dist-info/WHEEL,sha256=3ny-bZhpXrU6vSQ1UPG34FoxZBp3lVcvK0LkgUz6VLk,88
38
- logdetective-2.13.0.dist-info/entry_points.txt,sha256=3K_vXja6PmcA8sNdUi63WdImeiNhVZcEGPTaoJmltfA,63
39
- logdetective-2.13.0.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
40
- logdetective-2.13.0.dist-info/RECORD,,
41
+ logdetective/utils.py,sha256=buis3FVNOb5lsZtM2LDTcYDgTTHxvi4Y59E4fvHdSuE,10327
42
+ logdetective-3.1.0.dist-info/METADATA,sha256=H6nNDHy16aB8sW4McoLD6vEAlMIz2iL3P1nkNKltj-w,24063
43
+ logdetective-3.1.0.dist-info/WHEEL,sha256=3ny-bZhpXrU6vSQ1UPG34FoxZBp3lVcvK0LkgUz6VLk,88
44
+ logdetective-3.1.0.dist-info/entry_points.txt,sha256=3K_vXja6PmcA8sNdUi63WdImeiNhVZcEGPTaoJmltfA,63
45
+ logdetective-3.1.0.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
46
+ logdetective-3.1.0.dist-info/RECORD,,