logdetective 3.0.0__py3-none-any.whl → 3.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
logdetective/constants.py CHANGED
@@ -4,7 +4,7 @@ in prompts.yaml instead.
4
4
  """
5
5
 
6
6
  # pylint: disable=line-too-long
7
- DEFAULT_ADVISOR = "fedora-copr/Mistral-7B-Instruct-v0.3-GGUF"
7
+ DEFAULT_ADVISOR = "fedora-copr/granite-3.2-8b-instruct-GGUF"
8
8
 
9
9
  PROMPT_TEMPLATE = """
10
10
  Given following log snippets, and nothing else, explain what failure, if any, occured during build of this package.
@@ -59,10 +59,18 @@ def setup_args():
59
59
  parser.add_argument("-q", "--quiet", action="store_true")
60
60
  parser.add_argument(
61
61
  "--prompts",
62
+ "--prompts-config",
62
63
  type=str,
63
64
  default=f"{os.path.dirname(__file__)}/prompts.yml",
64
65
  help="Path to prompt configuration file.",
65
66
  )
67
+ parser.add_argument(
68
+ "--prompt-templates",
69
+ type=str,
70
+ default=f"{os.path.dirname(__file__)}/prompts",
71
+ help="Path to prompt template dir. Prompts must be valid Jinja templates, \
72
+ and system prompts must include field `system_time`.",
73
+ )
66
74
  parser.add_argument(
67
75
  "--temperature",
68
76
  type=float,
@@ -97,7 +105,7 @@ async def run(): # pylint: disable=too-many-statements,too-many-locals,too-many
97
105
  log_level = 0
98
106
 
99
107
  # Get prompts configuration
100
- prompts_configuration = load_prompts(args.prompts)
108
+ prompts_configuration = load_prompts(args.prompts, args.prompt_templates)
101
109
 
102
110
  logging.basicConfig(stream=sys.stdout)
103
111
  LOG.setLevel(log_level)
logdetective/models.py CHANGED
@@ -21,26 +21,7 @@ class PromptConfig(BaseModel):
21
21
  snippet_system_prompt: str = DEFAULT_SYSTEM_PROMPT
22
22
  staged_system_prompt: str = DEFAULT_SYSTEM_PROMPT
23
23
 
24
- def __init__(self, data: Optional[dict] = None):
25
- super().__init__()
26
- if data is None:
27
- return
28
- self.prompt_template = data.get("prompt_template", PROMPT_TEMPLATE)
29
- self.snippet_prompt_template = data.get(
30
- "snippet_prompt_template", SNIPPET_PROMPT_TEMPLATE
31
- )
32
- self.prompt_template_staged = data.get(
33
- "prompt_template_staged", PROMPT_TEMPLATE_STAGED
34
- )
35
- self.default_system_prompt = data.get(
36
- "default_system_prompt", DEFAULT_SYSTEM_PROMPT
37
- )
38
- self.snippet_system_prompt = data.get(
39
- "snippet_system_prompt", DEFAULT_SYSTEM_PROMPT
40
- )
41
- self.staged_system_prompt = data.get(
42
- "staged_system_prompt", DEFAULT_SYSTEM_PROMPT
43
- )
24
+ references: Optional[list[dict[str, str]]] = None
44
25
 
45
26
 
46
27
  class SkipSnippets(BaseModel):
@@ -0,0 +1,2 @@
1
+ Snippets:
2
+ {}
@@ -0,0 +1,2 @@
1
+ Snippet:
2
+ {}
@@ -0,0 +1,34 @@
1
+ System time: {{ system_time }}
2
+
3
+ You are an expert system specialized in RPM package build failures. Your purpose is to provide concise analysis of build log snippets.
4
+
5
+ ## Instructions:
6
+
7
+ 1. Provide a short explanation of the diagnostic information contained in the snippet.
8
+ 2. Do not quote or repeat any text from the raw snippet in your response.
9
+ 3. If the snippet contains no information useful for identifying a build failure, state only: "This snippet is irrelevant."
10
+ 4. Be truthful and do not fabricate information.
11
+
12
+ Your response must be as short as possible.
13
+
14
+ ## Examples:
15
+
16
+ User: "Snippet: RPM build errors:"
17
+ Assistant: "Errors occurred during package build.
18
+ ---
19
+ User: "Snippet: Copr build error: Build failed"
20
+ Assistant: "The build in Copr has failed."
21
+ ---
22
+ User: "Snippet: /bin/tar: Removing leading `/' from member names"
23
+ Assistant: "This snippet is irrelevant."
24
+ ---
25
+
26
+ {% if references %}
27
+ ## References:
28
+
29
+ When necessary, suggest resources that may be helpful to user.
30
+
31
+ {% for reference in references %}
32
+ * {{ reference.name }} : {{ reference.link }}
33
+ {% endfor %}
34
+ {% endif %}
@@ -0,0 +1,2 @@
1
+ Snippets:
2
+ {}
@@ -0,0 +1,41 @@
1
+ System time: {{ system_time }}
2
+
3
+ You are an expert system specialized in RPM package build failures in Fedora ecosystem.
4
+ Your purpose is to diagnose the root cause of failures and propose solutions.
5
+
6
+ ## Input Format:
7
+
8
+ Snippets are provided as [Log Snippet] : [Explanation].
9
+
10
+ ## Instructions:
11
+
12
+ 1. Identify the single primary error that caused the build to stop. Ignore subsequent errors that are merely side effects of the first failure.
13
+ 2. Ignore generic status messages such as "Copr build error" or "Build failed" as they are not root causes.
14
+ 3. Provide one concise paragraph explaining the technical root cause and a specific solution.
15
+ 4. Avoid generic boilerplate (e.g., "check the logs" or "ensure dependencies are met").
16
+ 5. Do not quote the snippets.
17
+ 6. Be truthful and do not fabricate information.
18
+
19
+ Your response must be as short as possible.
20
+
21
+ ## Examples:
22
+
23
+ User: "
24
+ Snippets:
25
+ ================
26
+ Snippet No. 1 at line #452:
27
+ [error: command 'gcc' failed: No such file or directory]: [`gcc` compiler is not available in the build environment]
28
+ ================
29
+ Snippet No. 2 at line #452:
30
+ [Copr build error: Build failed]: [Package build in Copr failed]"
31
+ Assistant: "Package build in Copr failed due to missing `gcc` compiler. Ensure that all build requirements are correctly specified in the spec file."
32
+
33
+ {% if references %}
34
+ ## References:
35
+
36
+ When necessary, suggest resources that may be helpful to user.
37
+
38
+ {% for reference in references %}
39
+ * {{ reference.name }} : {{ reference.link }}
40
+ {% endfor %}
41
+ {% endif %}
@@ -0,0 +1,57 @@
1
+ System time: {{ system_time }}
2
+
3
+ You are a highly capable expert system specialized in packaging and delivery of software using RPM,
4
+ within the RHEL ecosystem. Your purpose is to help package maintainers diagnose and resolve package build failures.
5
+ You are truthful, concise, and helpful.
6
+
7
+ ## Input processing
8
+
9
+ You will work with snippets of logs produced during a failed package build.
10
+ These snippets were extracted using data mining algorithm, and may not contain information
11
+ useful for diagnosing the root cause. Snippets without useful information must be disregarded.
12
+ General error messages, such as failure of commands used during build, are expected.
13
+
14
+ ## Temporal Logic and Causality
15
+
16
+ Log snippets are typically provided in chronological order. When analyzing multiple snippets
17
+ the first significant error in the log is usually the root cause.
18
+
19
+ An error occurring at line #500 cannot be caused by an error occurring at line #1000.
20
+
21
+ Subsequent errors are often side effects of the initial failure. Focus your diagnosis on the primary trigger.
22
+
23
+ ## Analysis procedure
24
+
25
+ Snippets are provided in order of appearance in the original log, with attached line number,
26
+ and are delimited with '================'.
27
+ Avoid generic or boilerplate recommendations (e.g., "check the logs," "ensure dependencies are met").
28
+ If a specific root cause is identified, the recommendation must directly address that cause.
29
+
30
+ 1. Analyze individual snippets. Do not quote analyzed snippets.
31
+ 2. Disregard snippets that do not contain useful information.
32
+ 3. Using information from all snippets provide explanation of the issue. Be as specific as possible.
33
+ 4. (Optional) Recommend a solution for the package maintainer, only if the cause is clear.
34
+
35
+ ## Examples:
36
+
37
+ User: "
38
+ Snippets:
39
+ Snippet No. 1 at line #452:
40
+
41
+ error: command 'gcc' failed: No such file or directory
42
+ ================
43
+ Snippet No. 2 at line #560:
44
+
45
+ Copr build error: Build failed
46
+ ================"
47
+ Assistant: "Package build in Copr failed due to missing `gcc` compiler. Ensure that all build requirements are correctly specified in the spec file."
48
+
49
+ {% if references %}
50
+ ## References:
51
+
52
+ When necessary, suggest resources that may be helpful to user.
53
+
54
+ {% for reference in references %}
55
+ * {{ reference.name }} : {{ reference.link }}
56
+ {% endfor %}
57
+ {% endif %}
@@ -0,0 +1,87 @@
1
+ from datetime import datetime, timezone
2
+ from typing import Optional
3
+ from jinja2 import Environment, FileSystemLoader, Template
4
+
5
+ from logdetective.models import PromptConfig
6
+
7
+
8
+ class PromptManager: # pylint: disable=too-many-instance-attributes
9
+ """Manages prompts defined as jinja templates"""
10
+ _tmp_env: Environment
11
+
12
+ # Templates for system prompts
13
+ _default_system_prompt_template: Template
14
+ _snippet_system_prompt_template: Template
15
+ _staged_system_prompt_template: Template
16
+
17
+ # Templates for messages
18
+ default_message_template: Template
19
+ snippet_message_template: Template
20
+ staged_message_template: Template
21
+
22
+ _references: Optional[list[dict[str, str]]] = None
23
+
24
+ def __init__(
25
+ self, prompts_path: str, prompts_configuration: Optional[PromptConfig] = None
26
+ ) -> None:
27
+ self._tmp_env = Environment(loader=FileSystemLoader(prompts_path))
28
+
29
+ self._default_system_prompt_template = self._tmp_env.get_template(
30
+ "system_prompt.j2"
31
+ )
32
+ self._snippet_system_prompt_template = self._tmp_env.get_template(
33
+ "snippet_system_prompt.j2"
34
+ )
35
+ self._staged_system_prompt_template = self._tmp_env.get_template(
36
+ "staged_system_prompt.j2"
37
+ )
38
+
39
+ self.default_message_template = self._tmp_env.get_template(
40
+ "message_template.j2"
41
+ )
42
+ self.snippet_message_template = self._tmp_env.get_template(
43
+ "snippet_message_template.j2"
44
+ )
45
+ self.staged_message_template = self._tmp_env.get_template(
46
+ "staged_message_template.j2"
47
+ )
48
+
49
+ if prompts_configuration:
50
+ self._references = prompts_configuration.references
51
+
52
+ # To maintain backward compatibility with `logdetective.models.PromptConfig`
53
+ @property
54
+ def default_system_prompt(self) -> str:
55
+ """Render system prompt from a template"""
56
+ return self._default_system_prompt_template.render(
57
+ system_time=datetime.now(timezone.utc), references=self._references
58
+ )
59
+
60
+ @property
61
+ def snippet_system_prompt(self) -> str:
62
+ """Render system prompt from a template"""
63
+ return self._snippet_system_prompt_template.render(
64
+ system_time=datetime.now(timezone.utc), references=self._references
65
+ )
66
+
67
+ @property
68
+ def staged_system_prompt(self) -> str:
69
+ """Render system prompt from a template"""
70
+ return self._staged_system_prompt_template.render(
71
+ system_time=datetime.now(timezone.utc), references=self._references
72
+ )
73
+
74
+ @property
75
+ def prompt_template(self) -> str:
76
+ """Render message prompt from the template"""
77
+ return self.default_message_template.render()
78
+
79
+ @property
80
+ def snippet_prompt_template(self) -> str:
81
+ """Render message prompt from the template"""
82
+ return self.snippet_message_template.render()
83
+
84
+ @property
85
+ def prompt_template_staged(self) -> str:
86
+ """Render message prompt from the template"""
87
+ return self.staged_message_template.render()
logdetective/prompts.yml CHANGED
@@ -88,3 +88,10 @@ staged_system_prompt: |
88
88
  You never speculate about package being built or fabricate information.
89
89
  If you do not know the answer, you acknowledge the fact and end your response.
90
90
  Your responses must be as short as possible.
91
+
92
+ # Optional references, to be used when constructing prompt from Jinja template
93
+ # references:
94
+ # - name: Fedora Packaging Guidelines
95
+ # link: https://docs.fedoraproject.org/en-US/packaging-guidelines/
96
+ # - name: Mock user documentation
97
+ # link: https://rpm-software-management.github.io/mock/
@@ -61,7 +61,8 @@ def get_openai_api_client(inference_config: InferenceConfig):
61
61
 
62
62
 
63
63
  SERVER_CONFIG_PATH = os.environ.get("LOGDETECTIVE_SERVER_CONF", None)
64
- SERVER_PROMPT_PATH = os.environ.get("LOGDETECTIVE_PROMPTS", None)
64
+ SERVER_PROMPT_CONF_PATH = os.environ.get("LOGDETECTIVE_PROMPTS", None)
65
+ SERVER_PROMPT_PATH = os.environ.get("LOGDETECTIVE_PROMPT_TEMPLATES", None)
65
66
  # The default location for skip patterns is in the same directory
66
67
  # as logdetective __init__.py file.
67
68
  SERVER_SKIP_PATTERNS_PATH = os.environ.get(
@@ -70,7 +71,7 @@ SERVER_SKIP_PATTERNS_PATH = os.environ.get(
70
71
  )
71
72
 
72
73
  SERVER_CONFIG = load_server_config(SERVER_CONFIG_PATH)
73
- PROMPT_CONFIG = load_prompts(SERVER_PROMPT_PATH)
74
+ PROMPT_CONFIG = load_prompts(SERVER_PROMPT_CONF_PATH, SERVER_PROMPT_PATH)
74
75
  SKIP_SNIPPETS_CONFIG = load_skip_snippet_patterns(SERVER_SKIP_PATTERNS_PATH)
75
76
 
76
77
  LOG = get_log(SERVER_CONFIG)
@@ -71,14 +71,13 @@ async def collect_emojis_in_comments( # pylint: disable=too-many-locals
71
71
  else:
72
72
  project = projects[mr_job_db.id]
73
73
  merge_request_iid = mr_job_db.mr_iid
74
- if merge_request_iid not in merge_requests:
75
- merge_request = await asyncio.to_thread(
74
+ project_id = mr_job_db.project_id
75
+ if (project_id, merge_request_iid) not in merge_requests:
76
+ merge_requests[(project_id, merge_request_iid)] = await asyncio.to_thread(
76
77
  project.mergerequests.get, merge_request_iid
77
78
  )
78
79
 
79
- merge_requests[merge_request_iid] = merge_request
80
- else:
81
- merge_request = merge_requests[merge_request_iid]
80
+ merge_request = merge_requests[(project_id, merge_request_iid)]
82
81
 
83
82
  discussion = await asyncio.to_thread(
84
83
  merge_request.discussions.get, comment.comment_id
@@ -87,9 +86,10 @@ async def collect_emojis_in_comments( # pylint: disable=too-many-locals
87
86
  # Get the ID of the first note
88
87
  if "notes" not in discussion.attributes or len(discussion.attributes["notes"]) == 0:
89
88
  LOG.warning(
90
- "No notes were found in comment %s in merge request %d",
89
+ "No notes were found in comment %s in merge request %d of project %d",
91
90
  comment.comment_id,
92
91
  merge_request_iid,
92
+ project_id,
93
93
  )
94
94
  continue
95
95
 
logdetective/utils.py CHANGED
@@ -1,10 +1,11 @@
1
1
  import logging
2
2
  import os
3
3
  import subprocess as sp
4
- from typing import Iterator, List, Dict, Tuple, Generator
4
+ from typing import Iterator, List, Dict, Tuple, Generator, Optional
5
5
  from urllib.parse import urlparse
6
6
 
7
7
  import aiohttp
8
+ from jinja2 import exceptions
8
9
  import numpy as np
9
10
  import yaml
10
11
 
@@ -15,6 +16,7 @@ from llama_cpp import (
15
16
  )
16
17
  from logdetective.constants import SNIPPET_DELIMITER
17
18
  from logdetective.models import PromptConfig, SkipSnippets
19
+ from logdetective.prompts import PromptManager
18
20
  from logdetective.remote_log import RemoteLog
19
21
 
20
22
  LOG = logging.getLogger("logdetective")
@@ -127,7 +129,11 @@ def compute_certainty(probs: List[Dict]) -> float:
127
129
 
128
130
 
129
131
  def process_log(
130
- log: str, model: Llama, stream: bool, prompt_templates: PromptConfig, temperature: float
132
+ log: str,
133
+ model: Llama,
134
+ stream: bool,
135
+ prompt_templates: PromptConfig | PromptManager,
136
+ temperature: float,
131
137
  ) -> CreateChatCompletionResponse | Iterator[CreateChatCompletionStreamResponse]:
132
138
  """Processes a given log using the provided language model and returns its summary.
133
139
 
@@ -135,20 +141,14 @@ def process_log(
135
141
  log (str): The input log to be processed.
136
142
  model (Llama): The language model used for processing the log.
137
143
  stream (bool): Return output as Iterator.
138
- prompt_template (str): Which prompt template to use.
144
+ prompt_templates (PromptConfig | PromptManager): Prompt templates to use with LLM.
139
145
  temperature (float): Temperature parameter for model runtime.
140
146
  Returns:
141
147
  str: The summary of the given log generated by the language model.
142
148
  """
143
149
  messages = [
144
- {
145
- "role": "system",
146
- "content": prompt_templates.default_system_prompt
147
- },
148
- {
149
- "role": "user",
150
- "content": prompt_templates.prompt_template.format(log)
151
- },
150
+ {"role": "system", "content": prompt_templates.default_system_prompt},
151
+ {"role": "user", "content": prompt_templates.prompt_template.format(log)},
152
152
  ]
153
153
 
154
154
  response = model.create_chat_completion(
@@ -200,26 +200,35 @@ def format_snippets(snippets: list[str] | list[Tuple[int, str]]) -> str:
200
200
  else:
201
201
  header = f"Snippet No. {i}:"
202
202
  snippet_content = s
203
- summary += (
204
- f"{header}\n"
205
- "\n"
206
- f"{snippet_content}\n"
207
- f"{SNIPPET_DELIMITER}\n"
208
- f"\n"
209
- )
203
+ summary += f"{header}\n\n{snippet_content}\n{SNIPPET_DELIMITER}\n\n"
210
204
  return summary
211
205
 
212
206
 
213
- def load_prompts(path: str | None) -> PromptConfig:
214
- """Load prompts from given yaml file if there is one.
215
- Alternatively use defaults."""
216
- if path:
207
+ def load_prompts(
208
+ config_path: Optional[str] = None, template_path: Optional[str] = None
209
+ ) -> PromptConfig | PromptManager:
210
+ """Load prompts from yaml file, and optionally initialize `PromptManager`
211
+ if provided with path to prompt templates.
212
+ """
213
+ configuration = PromptConfig()
214
+ if config_path:
217
215
  try:
218
- with open(path, "r") as file:
219
- return PromptConfig(yaml.safe_load(file))
216
+ with open(config_path, "r") as file:
217
+ configuration = PromptConfig(**yaml.safe_load(file))
220
218
  except FileNotFoundError:
221
- print("Prompt configuration file not found, reverting to defaults.")
222
- return PromptConfig()
219
+ LOG.error(
220
+ "Prompt configuration file not found, reverting to defaults.",
221
+ exc_info=True,
222
+ )
223
+ if template_path:
224
+ try:
225
+ return PromptManager(template_path, configuration)
226
+ except exceptions.TemplateError:
227
+ LOG.error(
228
+ "Prompt templates couldn't be rendered, reverting to defaults.",
229
+ exc_info=True,
230
+ )
231
+ return configuration
223
232
 
224
233
 
225
234
  def prompt_to_messages(
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: logdetective
3
- Version: 3.0.0
4
- Summary: Log using LLM AI to search for build/test failures and provide ideas for fixing these.
3
+ Version: 3.2.0
4
+ Summary: Analyze logs with a template miner and an LLM to discover errors and suggest solutions.
5
5
  License: Apache-2.0
6
6
  License-File: LICENSE
7
7
  Author: Jiri Podivin
@@ -96,11 +96,12 @@ Usage
96
96
 
97
97
  To analyze a log file, run the script with the following command line arguments:
98
98
  - `file` (required): The path or URL of the log file to be analyzed.
99
- - `--model` (optional, default: "Mistral-7B-Instruct-v0.3-GGUF"): The path or Hugging space name of the language model for analysis. For models from Hugging Face, write them as `namespace/repo_name`. As we are using LLama.cpp we want this to be in the `gguf` format. If the model is already on your machine it will skip the download.
99
+ - `--model` (optional, default: "granite-3.2-8b-instruct-GGUF"): The path or Hugging space name of the language model for analysis. For models from Hugging Face, write them as `namespace/repo_name`. As we are using LLama.cpp we want this to be in the `gguf` format. If the model is already on your machine it will skip the download.
100
100
  - `--filename-suffix` (optional, default "Q4_K.gguf"): You can specify which suffix of the file to use. This option is applied when specifying model using the Hugging Face repository.
101
101
  - `--n-clusters` (optional, default 8): Number of clusters for Drain to organize log chunks into. This only makes sense when you are summarizing with Drain.
102
- - `--skip-snippets` Path to patterns for skipping snippets (in YAML).
103
- - `--prompts PROMPTS` Path to prompt configuration file.
102
+ - `--prompts PROMPTS` (Deprecated, replaced by `--prompts-config`) Path to prompt configuration file.
103
+ - `--prompts-config PROMPTS` Path to prompt configuration file.
104
+ - `--prompt-templates` Path to prompt template dir. Prompts must be valid Jinja templates, and system prompts must include field `system_time`.
104
105
  - `--temperature` Temperature for inference.
105
106
  - `--skip-snippets` Path to patterns for skipping snippets.
106
107
  - `--csgrep` Use csgrep to process the log.
@@ -120,9 +121,15 @@ Examples of using different models. Note the use of `--filename-suffix` (or `-F`
120
121
 
121
122
  Example of altered prompts:
122
123
 
123
- cp ~/.local/lib/python3.13/site-packages/logdetective/prompts.yml ~/my-prompts.yml
124
- vi ~/my-prompts.yml # edit the prompts there to better fit your needs
125
- logdetective https://kojipkgs.fedoraproject.org//work/tasks/3367/131313367/build.log --prompts ~/my-prompts.yml
124
+ cp -r ~/.local/lib/python3.13/site-packages/logdetective/prompts ~/my-prompts
125
+ vi ~/my-prompts/system_prompt.j2 # edit the system prompt there to better fit your needs
126
+ logdetective https://kojipkgs.fedoraproject.org//work/tasks/3367/131313367/build.log --prompt-templates ~/my-prompts
127
+
128
+ Example of altered prompts (Deprecated):
129
+
130
+ cp ~/.local/lib/python3.13/site-packages/logdetective/prompts.yml ~/my-prompts.yml
131
+ vi ~/my-prompts.yml # edit the prompts there to better fit your needs
132
+ logdetective https://kojipkgs.fedoraproject.org//work/tasks/3367/131313367/build.log --prompts ~/my-prompts.yml
126
133
 
127
134
 
128
135
  Note that streaming with some models (notably Meta-Llama-3) is broken and can be worked around by `no-stream` option:
@@ -206,7 +213,8 @@ message is reported indicating that the 'check' phase of the rpm build process
206
213
  failed with a bad exit status.
207
214
  ```
208
215
 
209
- It looks like a wall of text. Similar to any log. The main difference is that here we have the most significant lines of a logfile wrapped in `[ ] : ` and followed by textual explanation of the log text done by mistral 7b.
216
+ It looks like a wall of text. Similar to any log.
217
+ The main difference is that here we have the most significant lines of a logfile wrapped in `[ ] : ` and followed by textual explanation of the log text done by local LLM.
210
218
 
211
219
 
212
220
  Contributing
@@ -374,14 +382,14 @@ Before doing `podman-compose up`, make sure to set `MODELS_PATH` environment var
374
382
  ```
375
383
  $ export MODELS_PATH=/path/to/models/
376
384
  $ ll $MODELS_PATH
377
- -rw-r--r--. 1 tt tt 3.9G apr 10 17:18 mistral-7b-instruct-v0.2.Q4_K_S.gguf
385
+ -rw-r--r--. 1 tt tt 3.9G apr 10 17:18 granite-4.0-h-tiny-Q8_0.gguf
378
386
  ```
379
387
 
380
388
  If the variable is not set, `./models` is mounted inside by default.
381
389
 
382
390
  Model can be downloaded from [our Hugging Space](https://huggingface.co/fedora-copr) by:
383
391
  ```
384
- $ curl -L -o models/mistral-7b-instruct-v0.3.Q4_K.gguf https://huggingface.co/fedora-copr/Mistral-7B-Instruct-v0.3-GGUF/resolve/main/ggml-model-Q4_K.gguf
392
+ $ curl -L -o models/granite-3.2-8b-instruct-v0.3.Q4_K.gguf https://huggingface.co/fedora-copr/granite-3.2-8b-instruct-GGUF/resolve/main/ggml-model-Q4_K.gguf
385
393
  ```
386
394
 
387
395
  Filtering snippet analysis by relevance
@@ -501,17 +509,38 @@ http GET "localhost:8080/metrics/analyze/requests?weeks=5" > /tmp/plot_weeks.svg
501
509
  System Prompts
502
510
  --------------
503
511
 
504
- Prompt templates used by Log Detective are stored in the `prompts.yml` file.
512
+ Prompts are defined as Jinja templates and placed in location specified by `--prompt-templates` option of the CLI utility, or `LOGDETECTIVE_PROMPT_TEMPLATES` environment variable of the container service. With further, optional, configuration in the `prompts.yml` configuration file.
513
+
514
+ All system prompt templates must include place for `system_time` variable.
515
+
516
+ If `references` list is defined in `prompts.yml`, templates must also include a handling for a list of references.
517
+
518
+ Example:
519
+
520
+ ```jinja
521
+ {% if references %}
522
+ ## References:
523
+
524
+ {% for reference in references %}
525
+ * {{ reference.name }} : {{ reference.link }}
526
+ {% endfor %}
527
+ {% endif %}
528
+
529
+ ```
530
+
531
+ *Deprecated:*
532
+
533
+ *Prompt templates used by Log Detective are stored in the `prompts.yml` file.
505
534
  It is possible to modify the file in place, or provide your own.
506
535
  In CLI you can override prompt templates location using `--prompts` option,
507
536
  while in the container service deployment the `LOGDETECTIVE_PROMPTS` environment variable
508
- is used instead.
537
+ is used instead.*
509
538
 
510
- Prompts need to have a form compatible with python [format string syntax](https://docs.python.org/3/library/string.html#format-string-syntax)
511
- with spaces, or replacement fields marked with curly braces, `{}` left for insertion of snippets.
539
+ *Prompts need to have a form compatible with python [format string syntax](https://docs.python.org/3/library/string.html#format-string-syntax)
540
+ with spaces, or replacement fields marked with curly braces, `{}` left for insertion of snippets.*
512
541
 
513
- Number of replacement fields in new prompts, must be the same as in originals.
514
- Although their position may be different.
542
+ *Number of replacement fields in new prompts, must be the same as in originals.
543
+ Although their position may be different.*
515
544
 
516
545
 
517
546
  Skip Snippets
@@ -1,16 +1,23 @@
1
1
  logdetective/__init__.py,sha256=VqRngDcuFT7JWms8Qc_MsOvajoXVOKPr-S1kqY3Pqhc,59
2
- logdetective/constants.py,sha256=aCwrkBrDdS_kbNESK-Z-ewg--DSzodV2OMgwEq3UE38,2456
2
+ logdetective/constants.py,sha256=GKTHO77MrJQS5W97nnWY6AZqxcMHxGJDx5y97cpnymk,2455
3
3
  logdetective/drain3.ini,sha256=ni91eCT1TwTznZwcqWoOVMQcGEnWhEDNCoTPF7cfGfY,1360
4
4
  logdetective/extractors.py,sha256=vT-je4NkDgSj9rRtSeLpqBU52gIUnnVgJPHFbVihpCw,5993
5
- logdetective/logdetective.py,sha256=W4yY5PDK0zO_6ObCnLQc6K6xY8zOd8MXJJDaE3LH6Wo,6224
6
- logdetective/models.py,sha256=uczmQtWFgSp_ZGssngdTM4qzPF1o64dCy0469GoSbjQ,2937
5
+ logdetective/logdetective.py,sha256=um7rHKqwLGYdYLs46koRDADvYVYL2Q9-B9Y7veVHYO8,6563
6
+ logdetective/models.py,sha256=UV5eTo8mVMkJuY2W869iBZv-ZfMIXh4_jLhOf-dhdyQ,2204
7
+ logdetective/prompts/message_template.j2,sha256=yuniCNUtYvrAoQChOXI6-CV8XDeV8cX3EJmOn5hWbVw,13
8
+ logdetective/prompts/snippet_message_template.j2,sha256=YRKZ_rdK1xqN4i66dlACStgP_gFroC7k_EGmQgzFEU0,12
9
+ logdetective/prompts/snippet_system_prompt.j2,sha256=pLSDf_LnM1ddexbH1ROsCgyuUld5JZvs0nZhddDiT3s,1093
10
+ logdetective/prompts/staged_message_template.j2,sha256=yuniCNUtYvrAoQChOXI6-CV8XDeV8cX3EJmOn5hWbVw,13
11
+ logdetective/prompts/staged_system_prompt.j2,sha256=Qdpc1IrKvSkaiuzUchJsJekEafYekF-T44GfA-_cwTs,1584
12
+ logdetective/prompts/system_prompt.j2,sha256=mOf3yGG830sqHO_JWBUtq5TMITUVDgwbW5XihbgNRM8,2330
7
13
  logdetective/prompts-summary-first.yml,sha256=kmyMFQmqFXpojkz7p3CyCWCPxMpFLpfDdMGisB4YwL0,808
8
14
  logdetective/prompts-summary-only.yml,sha256=8U9AMJV8ePW-0CoXOXlQoO92DAJDeutIT8ntSkkm6W0,470
9
- logdetective/prompts.yml,sha256=i3z6Jcb4ScVi7LsxOpDlKiXrcvql3qO_JnLzkAKMn1c,3870
15
+ logdetective/prompts.py,sha256=H-cIyXMVxtiL_v3GBCKMaeBzThwOK6MejRxka3-lNqE,3062
16
+ logdetective/prompts.yml,sha256=dFvWlvzf9HAC_VGyEjUTzBWSQ6FojNa5jkJ9KHpuwqs,4171
10
17
  logdetective/remote_log.py,sha256=28QvdQiy7RBnd86EKCq_A75P21gSNlCbgxJe5XAe9MA,2258
11
18
  logdetective/server/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
19
  logdetective/server/compressors.py,sha256=y4aFYJ_9CbYdKuAI39Kc9GQSdPN8cSJ2c_VAz3T47EE,5249
13
- logdetective/server/config.py,sha256=dYoqvexnMo8LBXhXezMIEqUwzTsRD-eWvRIFIYNv388,2540
20
+ logdetective/server/config.py,sha256=Q1y4Ta1UUILLfK7iZ6H1Tf599Xa5gwtY3XF-4DoMLSs,2645
14
21
  logdetective/server/database/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
22
  logdetective/server/database/base.py,sha256=bqMkhL2D96i_QiSnO5u1FqxYuJJu0m0wXLkqj_A9WBs,2093
16
23
  logdetective/server/database/models/__init__.py,sha256=zoZMCt1_7tewDa6eEIIX_xrdN-tLegSiPNg5NiYaV3o,850
@@ -18,7 +25,7 @@ logdetective/server/database/models/exceptions.py,sha256=4ED7FSSA1liV9-7VIN2BwUi
18
25
  logdetective/server/database/models/koji.py,sha256=HNWxHYDxf4JN9K2ue8-V8dH-0XY5ZmxqH7Y9lAIbILA,6436
19
26
  logdetective/server/database/models/merge_request_jobs.py,sha256=MxiAVKQIsQMbFylBsmYBmVXYvid-4_5mwwXLfWdp6_w,19965
20
27
  logdetective/server/database/models/metrics.py,sha256=XpiGrZJ-SuHfePBOeek_WiV-i0p1wjoCBTekSMiZZM0,15559
21
- logdetective/server/emoji.py,sha256=zSaYtLpSkpRCXpjMWnHR1bYwkmobMJASZ7YNalrd85U,5274
28
+ logdetective/server/emoji.py,sha256=1hPFjoA_zdefPNm5bz2eSerjBFqrPZjnq4x7GgE4ADA,5340
22
29
  logdetective/server/exceptions.py,sha256=WN715KLL3ya6FiZ95v70VSbNuVhGuHFzxm2OeEPWQCw,981
23
30
  logdetective/server/gitlab.py,sha256=X9JSotUUlG9bOWYbUNKt9KqLUAj6Uocd2KNpfn35ccU,17192
24
31
  logdetective/server/koji.py,sha256=LG1pRiKUFvYFRKzgQoUG3pUHfcEwMoaMNjUSMKw_pBA,5640
@@ -31,9 +38,9 @@ logdetective/server/templates/gitlab_full_comment.md.j2,sha256=4UujUzl3lmdbNEADs
31
38
  logdetective/server/templates/gitlab_short_comment.md.j2,sha256=2krnMlGqqju2V_6pE0UqUR1P674OFaeX5BMyY5htTOQ,2022
32
39
  logdetective/server/utils.py,sha256=0BZ8WmzXNEtkUty1kOyFbBxDZWL0Icc8BUrxuHw9uvs,4015
33
40
  logdetective/skip_snippets.yml,sha256=reGlhPPCo06nNUJWiC2LY-OJOoPdcyOB7QBTSMeh0eg,487
34
- logdetective/utils.py,sha256=yalhySOF_Gzmqx_Ft9qad3TplAfZ6LOmauGXEJfKWiE,9803
35
- logdetective-3.0.0.dist-info/METADATA,sha256=nHYpSkE4pz3W557RWhBUwQMh3i52KTlx7_rDXoL3wzQ,22874
36
- logdetective-3.0.0.dist-info/WHEEL,sha256=3ny-bZhpXrU6vSQ1UPG34FoxZBp3lVcvK0LkgUz6VLk,88
37
- logdetective-3.0.0.dist-info/entry_points.txt,sha256=3K_vXja6PmcA8sNdUi63WdImeiNhVZcEGPTaoJmltfA,63
38
- logdetective-3.0.0.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
39
- logdetective-3.0.0.dist-info/RECORD,,
41
+ logdetective/utils.py,sha256=buis3FVNOb5lsZtM2LDTcYDgTTHxvi4Y59E4fvHdSuE,10327
42
+ logdetective-3.2.0.dist-info/METADATA,sha256=K2C3gvvxmLWkbCop8g4TVerIMNzBbeL99e3oULfkitI,24063
43
+ logdetective-3.2.0.dist-info/WHEEL,sha256=kJCRJT_g0adfAJzTx2GUMmS80rTJIVHRCfG0DQgLq3o,88
44
+ logdetective-3.2.0.dist-info/entry_points.txt,sha256=3K_vXja6PmcA8sNdUi63WdImeiNhVZcEGPTaoJmltfA,63
45
+ logdetective-3.2.0.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
46
+ logdetective-3.2.0.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 2.3.0
2
+ Generator: poetry-core 2.3.1
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any