logdetective 1.3.0__py3-none-any.whl → 1.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
logdetective/constants.py CHANGED
@@ -26,17 +26,6 @@ Analysis:
26
26
 
27
27
  """
28
28
 
29
- SUMMARIZATION_PROMPT_TEMPLATE = """
30
- Does following log contain error or issue?
31
-
32
- Log:
33
-
34
- {}
35
-
36
- Answer:
37
-
38
- """
39
-
40
29
  SNIPPET_PROMPT_TEMPLATE = """
41
30
  Analyse following RPM build log snippet. Describe contents accurately, without speculation or suggestions for resolution.
42
31
 
@@ -69,6 +58,17 @@ Analysis:
69
58
 
70
59
  """
71
60
 
61
+ DEFAULT_SYSTEM_PROMPT = """
62
+ You are a highly capable large language model based expert system specialized in
63
+ packaging and delivery of software using RPM (RPM Package Manager). Your purpose is to diagnose
64
+ RPM build failures, identifying root causes and proposing solutions if possible.
65
+ You are truthful, concise, and helpful.
66
+
67
+ You never speculate about package being built or fabricate information.
68
+ If you do not know the answer, you acknowledge the fact and end your response.
69
+ Your responses must be as short as possible.
70
+ """
71
+
72
72
  SNIPPET_DELIMITER = "================"
73
73
 
74
74
  DEFAULT_TEMPERATURE = 0.8
@@ -76,3 +76,7 @@ DEFAULT_TEMPERATURE = 0.8
76
76
  # Tuning for LLM-as-a-Service
77
77
  LLM_DEFAULT_MAX_QUEUE_SIZE = 50
78
78
  LLM_DEFAULT_REQUESTS_PER_MINUTE = 60
79
+
80
+ # Roles for chat API
81
+ SYSTEM_ROLE_DEFAULT = "developer"
82
+ USER_ROLE_DEFAULT = "user"
@@ -4,75 +4,12 @@ from typing import Tuple
4
4
 
5
5
  import drain3
6
6
  from drain3.template_miner_config import TemplateMinerConfig
7
- from llama_cpp import Llama, LlamaGrammar
8
7
 
9
- from logdetective.constants import SUMMARIZATION_PROMPT_TEMPLATE
10
8
  from logdetective.utils import get_chunks
11
9
 
12
10
  LOG = logging.getLogger("logdetective")
13
11
 
14
12
 
15
- class LLMExtractor:
16
- """
17
- A class that extracts relevant information from logs using a language model.
18
- """
19
-
20
- def __init__(
21
- self,
22
- model: Llama,
23
- n_lines: int = 2,
24
- prompt: str = SUMMARIZATION_PROMPT_TEMPLATE,
25
- ):
26
- self.model = model
27
- self.n_lines = n_lines
28
- self.grammar = LlamaGrammar.from_string(
29
- 'root ::= ("Yes" | "No")', verbose=False
30
- )
31
- self.prompt = prompt
32
-
33
- def __call__(
34
- self, log: str, n_lines: int = 2, neighbors: bool = False
35
- ) -> list[str]:
36
- chunks = self.rate_chunks(log)
37
- out = self.create_extract(chunks, neighbors)
38
- return out
39
-
40
- def rate_chunks(self, log: str) -> list[tuple]:
41
- """Scan log by the model and store results.
42
-
43
- :param log: log file content
44
- """
45
- results = []
46
- log_lines = log.split("\n")
47
-
48
- for i in range(0, len(log_lines), self.n_lines):
49
- block = "\n".join(log_lines[i: i + self.n_lines])
50
- prompt = self.prompt.format(log)
51
- out = self.model(prompt, max_tokens=7, grammar=self.grammar)
52
- out = f"{out['choices'][0]['text']}\n"
53
- results.append((block, out))
54
-
55
- return results
56
-
57
- def create_extract(self, chunks: list[tuple], neighbors: bool = False) -> list[str]:
58
- """Extract interesting chunks from the model processing."""
59
- interesting = []
60
- summary = []
61
- # pylint: disable=consider-using-enumerate
62
- for i in range(len(chunks)):
63
- if chunks[i][1].startswith("Yes"):
64
- interesting.append(i)
65
- if neighbors:
66
- interesting.extend([max(i - 1, 0), min(i + 1, len(chunks) - 1)])
67
-
68
- interesting = set(interesting)
69
-
70
- for i in interesting:
71
- summary.append(chunks[i][0])
72
-
73
- return summary
74
-
75
-
76
13
  class DrainExtractor:
77
14
  """A class that extracts information from logs using a template miner algorithm."""
78
15
 
@@ -15,7 +15,7 @@ from logdetective.utils import (
15
15
  compute_certainty,
16
16
  load_prompts,
17
17
  )
18
- from logdetective.extractors import LLMExtractor, DrainExtractor
18
+ from logdetective.extractors import DrainExtractor
19
19
 
20
20
  LOG = logging.getLogger("logdetective")
21
21
 
@@ -49,16 +49,16 @@ def setup_args():
49
49
  "--summarizer",
50
50
  type=str,
51
51
  default="drain",
52
- help="Choose between LLM and Drain template miner as the log summarizer.\
53
- LLM must be specified as path to a model, URL or local file.",
52
+ help="DISABLED: LLM summarization option was removed. \
53
+ Argument is kept for backward compatibility only.",
54
54
  )
55
55
  parser.add_argument(
56
56
  "-N",
57
57
  "--n_lines",
58
58
  type=int,
59
- default=8,
60
- help="The number of lines per chunk for LLM analysis.\
61
- This only makes sense when you are summarizing with LLM.",
59
+ default=None,
60
+ help="DISABLED: LLM summarization option was removed. \
61
+ Argument is kept for backward compatibility only.",
62
62
  )
63
63
  parser.add_argument(
64
64
  "-C",
@@ -74,13 +74,13 @@ def setup_args():
74
74
  "--prompts",
75
75
  type=str,
76
76
  default=f"{os.path.dirname(__file__)}/prompts.yml",
77
- help="Path to prompt configuration file."
77
+ help="Path to prompt configuration file.",
78
78
  )
79
79
  parser.add_argument(
80
80
  "--temperature",
81
81
  type=float,
82
82
  default=DEFAULT_TEMPERATURE,
83
- help="Temperature for inference."
83
+ help="Temperature for inference.",
84
84
  )
85
85
  return parser.parse_args()
86
86
 
@@ -93,6 +93,10 @@ async def run(): # pylint: disable=too-many-statements,too-many-locals
93
93
  sys.stderr.write("Error: --quiet and --verbose is mutually exclusive.\n")
94
94
  sys.exit(2)
95
95
 
96
+ # Emit warning about use of discontinued args
97
+ if args.n_lines or args.summarizer != "drain":
98
+ LOG.warning("LLM based summarization was removed. Drain will be used instead.")
99
+
96
100
  # Logging facility setup
97
101
  log_level = logging.INFO
98
102
  if args.verbose >= 1:
@@ -116,18 +120,10 @@ async def run(): # pylint: disable=too-many-statements,too-many-locals
116
120
  LOG.error("You likely do not have enough memory to load the AI model")
117
121
  sys.exit(3)
118
122
 
119
- # Log file summarizer selection and initialization
120
- if args.summarizer == "drain":
121
- extractor = DrainExtractor(
122
- args.verbose > 1, context=True, max_clusters=args.n_clusters
123
- )
124
- else:
125
- summarizer_model = initialize_model(args.summarizer, verbose=args.verbose > 2)
126
- extractor = LLMExtractor(
127
- summarizer_model,
128
- args.verbose > 1,
129
- prompts_configuration.summarization_prompt_template,
130
- )
123
+ # Log file summarizer initialization
124
+ extractor = DrainExtractor(
125
+ args.verbose > 1, context=True, max_clusters=args.n_clusters
126
+ )
131
127
 
132
128
  LOG.info("Getting summary")
133
129
 
@@ -149,6 +145,11 @@ async def run(): # pylint: disable=too-many-statements,too-many-locals
149
145
  log_summary = format_snippets(log_summary)
150
146
  LOG.info("Log summary: \n %s", log_summary)
151
147
 
148
+ prompt = (
149
+ f"{prompts_configuration.default_system_prompt}\n"
150
+ f"{prompts_configuration.prompt_template}"
151
+ )
152
+
152
153
  stream = True
153
154
  if args.no_stream:
154
155
  stream = False
@@ -156,7 +157,7 @@ async def run(): # pylint: disable=too-many-statements,too-many-locals
156
157
  log_summary,
157
158
  model,
158
159
  stream,
159
- prompt_template=prompts_configuration.prompt_template,
160
+ prompt_template=prompt,
160
161
  temperature=args.temperature,
161
162
  )
162
163
  probs = []
@@ -187,7 +188,7 @@ async def run(): # pylint: disable=too-many-statements,too-many-locals
187
188
 
188
189
 
189
190
  def main():
190
- """ Evaluate logdetective program and wait for it to finish """
191
+ """Evaluate logdetective program and wait for it to finish"""
191
192
  asyncio.run(run())
192
193
 
193
194
 
logdetective/models.py CHANGED
@@ -4,8 +4,8 @@ from pydantic import BaseModel
4
4
  from logdetective.constants import (
5
5
  PROMPT_TEMPLATE,
6
6
  PROMPT_TEMPLATE_STAGED,
7
- SUMMARIZATION_PROMPT_TEMPLATE,
8
7
  SNIPPET_PROMPT_TEMPLATE,
8
+ DEFAULT_SYSTEM_PROMPT,
9
9
  )
10
10
 
11
11
 
@@ -13,21 +13,30 @@ class PromptConfig(BaseModel):
13
13
  """Configuration for basic log detective prompts."""
14
14
 
15
15
  prompt_template: str = PROMPT_TEMPLATE
16
- summarization_prompt_template: str = SUMMARIZATION_PROMPT_TEMPLATE
17
16
  snippet_prompt_template: str = SNIPPET_PROMPT_TEMPLATE
18
17
  prompt_template_staged: str = PROMPT_TEMPLATE_STAGED
19
18
 
19
+ default_system_prompt: str = DEFAULT_SYSTEM_PROMPT
20
+ snippet_system_prompt: str = DEFAULT_SYSTEM_PROMPT
21
+ staged_system_prompt: str = DEFAULT_SYSTEM_PROMPT
22
+
20
23
  def __init__(self, data: Optional[dict] = None):
21
24
  super().__init__()
22
25
  if data is None:
23
26
  return
24
27
  self.prompt_template = data.get("prompt_template", PROMPT_TEMPLATE)
25
- self.summarization_prompt_template = data.get(
26
- "summarization_prompt_template", SUMMARIZATION_PROMPT_TEMPLATE
27
- )
28
28
  self.snippet_prompt_template = data.get(
29
29
  "snippet_prompt_template", SNIPPET_PROMPT_TEMPLATE
30
30
  )
31
31
  self.prompt_template_staged = data.get(
32
32
  "prompt_template_staged", PROMPT_TEMPLATE_STAGED
33
33
  )
34
+ self.default_system_prompt = data.get(
35
+ "default_system_prompt", DEFAULT_SYSTEM_PROMPT
36
+ )
37
+ self.snippet_system_prompt = data.get(
38
+ "snippet_system_prompt", DEFAULT_SYSTEM_PROMPT
39
+ )
40
+ self.staged_system_prompt = data.get(
41
+ "staged_system_prompt", DEFAULT_SYSTEM_PROMPT
42
+ )
logdetective/prompts.yml CHANGED
@@ -21,17 +21,6 @@ prompt_template: |
21
21
 
22
22
  Analysis:
23
23
 
24
-
25
- summarization_prompt_template: |
26
- Does following log contain error or issue?
27
-
28
- Log:
29
-
30
- {}
31
-
32
- Answer:
33
-
34
-
35
24
  snippet_prompt_template: |
36
25
  Analyse following RPM build log snippet. Describe contents accurately, without speculation or suggestions for resolution.
37
26
 
@@ -59,3 +48,46 @@ prompt_template_staged: |
59
48
  {}
60
49
 
61
50
  Analysis:
51
+
52
+ # System prompts
53
+ # System prompts are meant to serve as general guide for model behavior,
54
+ # describing role and purpose it is meant to serve.
55
+ # Sample system prompts in this file are intentionally the same,
56
+ # however, in some circumstances it may be beneficial have different
57
+ # system prompts for each sub case. For example when a specialized model is deployed
58
+ # to analyze snippets.
59
+
60
+ # Default prompt is used by the CLI tool and also for final analysis
61
+ # with /analyze and /analyze/stream API endpoints
62
+ default_system_prompt: |
63
+ You are a highly capable large language model based expert system specialized in
64
+ packaging and delivery of software using RPM (RPM Package Manager). Your purpose is to diagnose
65
+ RPM build failures, identifying root causes and proposing solutions if possible.
66
+ You are truthful, concise, and helpful.
67
+
68
+ You never speculate about package being built or fabricate information.
69
+ If you do not know the answer, you acknowledge the fact and end your response.
70
+ Your responses must be as short as possible.
71
+
72
+ # Snippet system prompt is used for analysis of individual snippets
73
+ snippet_system_prompt: |
74
+ You are a highly capable large language model based expert system specialized in
75
+ packaging and delivery of software using RPM (RPM Package Manager). Your purpose is to diagnose
76
+ RPM build failures, identifying root causes and proposing solutions if possible.
77
+ You are truthful, concise, and helpful.
78
+
79
+ You never speculate about package being built or fabricate information.
80
+ If you do not know the answer, you acknowledge the fact and end your response.
81
+ Your responses must be as short as possible.
82
+
83
+
84
+ # Staged system prompt is used by /analyze/staged API endpoint
85
+ staged_system_prompt: |
86
+ You are a highly capable large language model based expert system specialized in
87
+ packaging and delivery of software using RPM (RPM Package Manager). Your purpose is to diagnose
88
+ RPM build failures, identifying root causes and proposing solutions if possible.
89
+ You are truthful, concise, and helpful.
90
+
91
+ You never speculate about package being built or fabricate information.
92
+ If you do not know the answer, you acknowledge the fact and end your response.
93
+ Your responses must be as short as possible.
@@ -64,6 +64,4 @@ class RemoteLog:
64
64
  try:
65
65
  return await self.get_url_content()
66
66
  except RuntimeError as ex:
67
- raise HTTPBadRequest(
68
- reason=f"We couldn't obtain the logs: {ex}"
69
- ) from ex
67
+ raise HTTPBadRequest(reason=f"We couldn't obtain the logs: {ex}") from ex
@@ -52,11 +52,10 @@ def get_log(config: Config):
52
52
 
53
53
 
54
54
  def get_openai_api_client(ineference_config: InferenceConfig):
55
- """Set up AsyncOpenAI client with default configuration.
56
- """
55
+ """Set up AsyncOpenAI client with default configuration."""
57
56
  return AsyncOpenAI(
58
- api_key=ineference_config.api_token,
59
- base_url=ineference_config.url)
57
+ api_key=ineference_config.api_token, base_url=ineference_config.url
58
+ )
60
59
 
61
60
 
62
61
  SERVER_CONFIG_PATH = os.environ.get("LOGDETECTIVE_SERVER_CONF", None)
@@ -51,7 +51,9 @@ async def _handle_gitlab_operation(func: Callable, *args):
51
51
  else:
52
52
  LOG.exception(log_msg)
53
53
  except Exception as e: # pylint: disable=broad-exception-caught
54
- LOG.exception("Unexpected error during GitLab operation %s(%s): %s", func, args, e)
54
+ LOG.exception(
55
+ "Unexpected error during GitLab operation %s(%s): %s", func, args, e
56
+ )
55
57
 
56
58
 
57
59
  async def collect_emojis_in_comments( # pylint: disable=too-many-locals
@@ -1,7 +1,7 @@
1
1
  import os
2
2
  import asyncio
3
3
  import random
4
- from typing import List, Tuple, Union
4
+ from typing import List, Tuple, Union, Dict
5
5
 
6
6
  import backoff
7
7
  from fastapi import HTTPException
@@ -14,6 +14,7 @@ from logdetective.constants import SNIPPET_DELIMITER
14
14
  from logdetective.extractors import DrainExtractor
15
15
  from logdetective.utils import (
16
16
  compute_certainty,
17
+ prompt_to_messages,
17
18
  )
18
19
  from logdetective.server.config import LOG, SERVER_CONFIG, PROMPT_CONFIG, CLIENT
19
20
  from logdetective.server.models import (
@@ -85,7 +86,7 @@ def we_give_up(details: backoff._typing.Details):
85
86
  on_giveup=we_give_up,
86
87
  )
87
88
  async def submit_text(
88
- text: str,
89
+ messages: List[Dict[str, str]],
89
90
  inference_cfg: InferenceConfig,
90
91
  stream: bool = False,
91
92
  ) -> Union[Explanation, AsyncStream[ChatCompletionChunk]]:
@@ -100,12 +101,7 @@ async def submit_text(
100
101
 
101
102
  async with inference_cfg.get_limiter():
102
103
  response = await CLIENT.chat.completions.create(
103
- messages=[
104
- {
105
- "role": "user",
106
- "content": text,
107
- }
108
- ],
104
+ messages=messages,
109
105
  max_tokens=inference_cfg.max_tokens,
110
106
  logprobs=inference_cfg.log_probs,
111
107
  stream=stream,
@@ -136,7 +132,12 @@ async def perform_staged_analysis(log_text: str) -> StagedResponse:
136
132
  # Process snippets asynchronously
137
133
  awaitables = [
138
134
  submit_text(
139
- PROMPT_CONFIG.snippet_prompt_template.format(s),
135
+ prompt_to_messages(
136
+ PROMPT_CONFIG.snippet_prompt_template.format(s),
137
+ PROMPT_CONFIG.snippet_system_prompt,
138
+ SERVER_CONFIG.inference.system_role,
139
+ SERVER_CONFIG.inference.user_role,
140
+ ),
140
141
  inference_cfg=SERVER_CONFIG.snippet_inference,
141
142
  )
142
143
  for s in log_summary
@@ -150,9 +151,14 @@ async def perform_staged_analysis(log_text: str) -> StagedResponse:
150
151
  final_prompt = PROMPT_CONFIG.prompt_template_staged.format(
151
152
  format_analyzed_snippets(analyzed_snippets)
152
153
  )
153
-
154
- final_analysis = await submit_text(
154
+ messages = prompt_to_messages(
155
155
  final_prompt,
156
+ PROMPT_CONFIG.staged_system_prompt,
157
+ SERVER_CONFIG.inference.system_role,
158
+ SERVER_CONFIG.inference.user_role,
159
+ )
160
+ final_analysis = await submit_text(
161
+ messages,
156
162
  inference_cfg=SERVER_CONFIG.inference,
157
163
  )
158
164
 
@@ -20,6 +20,8 @@ from logdetective.constants import (
20
20
  DEFAULT_TEMPERATURE,
21
21
  LLM_DEFAULT_MAX_QUEUE_SIZE,
22
22
  LLM_DEFAULT_REQUESTS_PER_MINUTE,
23
+ SYSTEM_ROLE_DEFAULT,
24
+ USER_ROLE_DEFAULT,
23
25
  )
24
26
 
25
27
 
@@ -143,6 +145,8 @@ class InferenceConfig(BaseModel): # pylint: disable=too-many-instance-attribute
143
145
  temperature: NonNegativeFloat = DEFAULT_TEMPERATURE
144
146
  max_queue_size: int = LLM_DEFAULT_MAX_QUEUE_SIZE
145
147
  http_timeout: float = 5.0
148
+ user_role: str = USER_ROLE_DEFAULT
149
+ system_role: str = SYSTEM_ROLE_DEFAULT
146
150
  _http_session: aiohttp.ClientSession = None
147
151
  _limiter: AsyncLimiter = AsyncLimiter(LLM_DEFAULT_REQUESTS_PER_MINUTE)
148
152
 
@@ -159,7 +163,8 @@ class InferenceConfig(BaseModel): # pylint: disable=too-many-instance-attribute
159
163
  self.model = data.get("model", "default-model")
160
164
  self.temperature = data.get("temperature", DEFAULT_TEMPERATURE)
161
165
  self.max_queue_size = data.get("max_queue_size", LLM_DEFAULT_MAX_QUEUE_SIZE)
162
-
166
+ self.user_role = data.get("user_role", USER_ROLE_DEFAULT)
167
+ self.system_role = data.get("system_role", SYSTEM_ROLE_DEFAULT)
163
168
  self._requests_per_minute = data.get(
164
169
  "requests_per_minute", LLM_DEFAULT_REQUESTS_PER_MINUTE
165
170
  )
@@ -340,7 +340,7 @@ def _plot_emoji_data( # pylint: disable=too-many-locals
340
340
  )
341
341
  all_counts.extend(counts)
342
342
 
343
- colors = [cm.viridis(i) for i in numpy.linspace(0, 1, len(reactions_values_dict))] # pylint: disable=no-member
343
+ colors = [cm.viridis(i) for i in numpy.linspace(0, 1, len(reactions_values_dict))] # pylint: disable=no-member
344
344
 
345
345
  first_emoji = True
346
346
  for i, (emoji, dict_counts) in enumerate(reactions_values_dict.items()):
@@ -20,6 +20,7 @@ import logdetective.server.database.base
20
20
  from logdetective.utils import (
21
21
  compute_certainty,
22
22
  format_snippets,
23
+ prompt_to_messages,
23
24
  )
24
25
 
25
26
  from logdetective.server.config import SERVER_CONFIG, PROMPT_CONFIG, LOG
@@ -135,9 +136,14 @@ async def analyze_log(
135
136
  log_text = await remote_log.process_url()
136
137
  log_summary = mine_logs(log_text)
137
138
  log_summary = format_snippets(log_summary)
138
-
139
- response = await submit_text(
139
+ messages = prompt_to_messages(
140
140
  PROMPT_CONFIG.prompt_template.format(log_summary),
141
+ PROMPT_CONFIG.default_system_prompt,
142
+ SERVER_CONFIG.inference.system_role,
143
+ SERVER_CONFIG.inference.user_role,
144
+ )
145
+ response = await submit_text(
146
+ messages,
141
147
  inference_cfg=SERVER_CONFIG.inference,
142
148
  )
143
149
  certainty = 0
@@ -204,10 +210,15 @@ async def analyze_log_stream(
204
210
  log_text = await remote_log.process_url()
205
211
  log_summary = mine_logs(log_text)
206
212
  log_summary = format_snippets(log_summary)
207
-
213
+ messages = prompt_to_messages(
214
+ PROMPT_CONFIG.prompt_template.format(log_summary),
215
+ PROMPT_CONFIG.default_system_prompt,
216
+ SERVER_CONFIG.inference.system_role,
217
+ SERVER_CONFIG.inference.user_role,
218
+ )
208
219
  try:
209
220
  stream = submit_text(
210
- PROMPT_CONFIG.prompt_template.format(log_summary),
221
+ messages,
211
222
  inference_cfg=SERVER_CONFIG.inference,
212
223
  stream=True,
213
224
  )
logdetective/utils.py CHANGED
@@ -179,7 +179,7 @@ def format_snippets(snippets: list[str] | list[Tuple[int, str]]) -> str:
179
179
  summary += f"""
180
180
  Snippet No. {i}:
181
181
 
182
- {s[1]}
182
+ {s}
183
183
  ================
184
184
  """
185
185
  return summary
@@ -195,3 +195,31 @@ def load_prompts(path: str | None) -> PromptConfig:
195
195
  except FileNotFoundError:
196
196
  print("Prompt configuration file not found, reverting to defaults.")
197
197
  return PromptConfig()
198
+
199
+
200
+ def prompt_to_messages(
201
+ user_message: str,
202
+ system_prompt: str | None = None,
203
+ system_role: str = "developer",
204
+ user_role: str = "user",
205
+ ) -> List[Dict[str, str]]:
206
+ """Turn prompt into list of message dictionaries.
207
+ If `system_role` and `user_role` are the same, only a single message is created,
208
+ as concatenation of `user_message` and `system_prompt`. This is useful for models which
209
+ do not have separate system role, such as mistral.
210
+ """
211
+
212
+ if system_role == user_role:
213
+ messages = [
214
+ {"role": system_role, "content": f"{system_prompt}\n{user_message}"}
215
+ ]
216
+ else:
217
+ messages = [
218
+ {"role": system_role, "content": system_prompt},
219
+ {
220
+ "role": user_role,
221
+ "content": user_message,
222
+ },
223
+ ]
224
+
225
+ return messages
@@ -1,12 +1,12 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: logdetective
3
- Version: 1.3.0
3
+ Version: 1.5.0
4
4
  Summary: Log using LLM AI to search for build/test failures and provide ideas for fixing these.
5
5
  License: Apache-2.0
6
6
  Author: Jiri Podivin
7
7
  Author-email: jpodivin@gmail.com
8
8
  Requires-Python: >=3.11,<4.0
9
- Classifier: Development Status :: 4 - Beta
9
+ Classifier: Development Status :: 5 - Production/Stable
10
10
  Classifier: Environment :: Console
11
11
  Classifier: Intended Audience :: Developers
12
12
  Classifier: License :: OSI Approved :: Apache Software License
@@ -87,8 +87,8 @@ Usage
87
87
  To analyze a log file, run the script with the following command line arguments:
88
88
  - `url` (required): The URL of the log file to be analyzed.
89
89
  - `--model` (optional, default: "Mistral-7B-Instruct-v0.2-GGUF"): The path or URL of the language model for analysis. As we are using LLama.cpp we want this to be in the `gguf` format. You can include the download link to the model here. If the model is already on your machine it will skip the download.
90
- - `--summarizer` (optional, default: "drain"): Choose between LLM and Drain template miner as the log summarizer. You can also provide the path to an existing language model file instead of using a URL.
91
- - `--n_lines` (optional, default: 8): The number of lines per chunk for LLM analysis. This only makes sense when you are summarizing with LLM.
90
+ - `--summarizer` DISABLED: LLM summarization option was removed. Argument is kept for backward compatibility only.(optional, default: "drain"): Choose between LLM and Drain template miner as the log summarizer. You can also provide the path to an existing language model file instead of using a URL.
91
+ - `--n_lines` DISABLED: LLM summarization option was removed. Argument is kept for backward compatibility only. (optional, default: 8): The number of lines per chunk for LLM analysis. This only makes sense when you are summarizing with LLM.
92
92
  - `--n_clusters` (optional, default 8): Number of clusters for Drain to organize log chunks into. This only makes sense when you are summarizing with Drain
93
93
 
94
94
  Example usage:
@@ -376,6 +376,9 @@ HTTPS certificate generated through:
376
376
  certbot certonly --standalone -d logdetective01.fedorainfracloud.org
377
377
  ```
378
378
 
379
+ Certificates need to be be placed into location specified by the`LOGDETECTIVE_CERTDIR`
380
+ env var and the service should be restarted.
381
+
379
382
  Querying statistics
380
383
  -------------------
381
384
 
@@ -1,33 +1,33 @@
1
1
  logdetective/__init__.py,sha256=VqRngDcuFT7JWms8Qc_MsOvajoXVOKPr-S1kqY3Pqhc,59
2
- logdetective/constants.py,sha256=UmYSutgy8yK-IDMQyXqLtQV-wDserDa4K1GmsMCGXHc,1949
2
+ logdetective/constants.py,sha256=KD5FtMvRMO5jO9O1a5FbHy6yFSF6ZkZ4lNrhI7D_S2Y,2456
3
3
  logdetective/drain3.ini,sha256=ni91eCT1TwTznZwcqWoOVMQcGEnWhEDNCoTPF7cfGfY,1360
4
- logdetective/extractors.py,sha256=7ahzWbTtU9MveG1Q7wU9LO8OJgs85X-cHmWltUhCe9M,3491
5
- logdetective/logdetective.py,sha256=cC2oL4yPNo94AB2nS4v1jpZi-Qo1g0_FEchL_yQL1UU,5832
6
- logdetective/models.py,sha256=nrGBmMRu8i6UhFflQKAp81Y3Sd_Aaoor0i_yqSJoLT0,1115
4
+ logdetective/extractors.py,sha256=sFsBFKpIBglejD2lxct2B0qEP0lFSep-ZIebq4KfaLM,1515
5
+ logdetective/logdetective.py,sha256=WKj8U5p329ek0T-G2rFtRxD5R07IZZGSVNZodcGT5PA,5722
6
+ logdetective/models.py,sha256=ONF7SK8VeuJk_gEj_l0ToYQ7asZYbrEmVUOUNQ5SEaA,1407
7
7
  logdetective/prompts-summary-first.yml,sha256=3Zfp4NNOfaFYq5xBlBjeQa5PdjYfS4v17OtJqQ-DRpU,821
8
8
  logdetective/prompts-summary-only.yml,sha256=8U9AMJV8ePW-0CoXOXlQoO92DAJDeutIT8ntSkkm6W0,470
9
- logdetective/prompts.yml,sha256=urPKG068TYxi58EicFVUH6FavZq_q36oM1LvfI4ddjg,1729
10
- logdetective/remote_log.py,sha256=1oeMIdDE_ob_2QrlXYTAA_m-36pNEicXbZwrCyzNgwo,2256
9
+ logdetective/prompts.yml,sha256=dOqaFrtBOkFRHLWte_2tGV-pNXwXP9Km9iWno_TZyic,3863
10
+ logdetective/remote_log.py,sha256=u-KlhO4Eu0ES6pPwrNbHBVhrZCdFi8894zJj33Lg3YA,2226
11
11
  logdetective/server/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
12
  logdetective/server/compressors.py,sha256=qzrT-BPSksXY6F2L6ger04GGrgdBsGOfK2YuCFRs0Q4,5427
13
- logdetective/server/config.py,sha256=yD6pRTZze9bqoCxfdxpupXrvb18auAf95mq4BoowAbs,2113
13
+ logdetective/server/config.py,sha256=WeEhgiYVdvNQEcE9ZcIt63U9CzScQRWl5QXfHh-KH9s,2105
14
14
  logdetective/server/database/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
15
  logdetective/server/database/base.py,sha256=1mcjEbhwLl4RalvT3oy6XVctjJoWIW3H9aI_sMWJBK8,1728
16
16
  logdetective/server/database/models/__init__.py,sha256=xy2hkygyw6_87zPKkG20i7g7_LXTGR__PUeojhbvv94,496
17
17
  logdetective/server/database/models/merge_request_jobs.py,sha256=hw88wV1-3x7i53sX7ZotKClc6OsH1njPpbRSZofnqr4,18670
18
18
  logdetective/server/database/models/metrics.py,sha256=yl9fS4IPVFWDeFvPAxO6zOVu6oLF319ApvVLAgnD5yU,13928
19
- logdetective/server/emoji.py,sha256=W1nJLU1UnTG8FGttOs6gC7x3TcjxiBuviXklD9f2Mu8,4370
19
+ logdetective/server/emoji.py,sha256=Iv1CFNyWhweBG13v59O1fQD-dZj-YGM1IKlkIaCzBaU,4392
20
20
  logdetective/server/gitlab.py,sha256=wQSlvdWn6XEi1oP6HhI75bIhm6bgdpWr3zu2WXF0_oE,16473
21
- logdetective/server/llm.py,sha256=GkbOjRRWEbw7EhFRpblalwNbwNVQPTTjrbLOqJXKqy0,5388
21
+ logdetective/server/llm.py,sha256=q9LdoAmsx9MpBjnjLyJ9GBU27jKViTaWbVXyMsmsCI0,5721
22
22
  logdetective/server/metric.py,sha256=B3ew_qSmtEMj6xl-FoOtS4F_bkplp-shhtfHF1cG_Io,4010
23
- logdetective/server/models.py,sha256=Pfvyd8CKlahIWeoVAJlQEt2TiLA5ndHEcigfm6xJwBI,15471
24
- logdetective/server/plot.py,sha256=eZs4r9gua-nW3yymSMIz1leL9mb4QKlh6FJZSeOfZ5M,14872
25
- logdetective/server/server.py,sha256=-JJnHj8fPzx8aCJD3q2wRwidxoHPCmwOP8FTWwc1C14,18386
23
+ logdetective/server/models.py,sha256=I45uLnq_zqn_r0FdOdop9zQPbsOWOY_M39NBBOXP134,15738
24
+ logdetective/server/plot.py,sha256=yS7TF_Gu7yV0uE9W50Ht5wQSlavgCx2CiU1XGO-iftE,14870
25
+ logdetective/server/server.py,sha256=V-lSG2cCTxoGwvUc8mEmLQQWS4g_W_dER2o118RufAk,18792
26
26
  logdetective/server/templates/gitlab_full_comment.md.j2,sha256=DQZ2WVFedpuXI6znbHIW4wpF9BmFS8FaUkowh8AnGhE,1627
27
27
  logdetective/server/templates/gitlab_short_comment.md.j2,sha256=fzScpayv2vpRLczP_0O0YxtA8rsKvR6gSv4ntNdWb98,1443
28
- logdetective/utils.py,sha256=hdExAC8FtDIxvdgIq-Ro6LVM-JZ-k_UofaMzaDAHvzM,6088
29
- logdetective-1.3.0.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
30
- logdetective-1.3.0.dist-info/METADATA,sha256=9UIOXKl7Ubj5TCsM2p_enbgDDC80d7uByqwA-VpPFZQ,17709
31
- logdetective-1.3.0.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
32
- logdetective-1.3.0.dist-info/entry_points.txt,sha256=3K_vXja6PmcA8sNdUi63WdImeiNhVZcEGPTaoJmltfA,63
33
- logdetective-1.3.0.dist-info/RECORD,,
28
+ logdetective/utils.py,sha256=5EcRjQcNG1UFPovrMLqlaApgxWSB2DHQhSExkEY3yk0,6932
29
+ logdetective-1.5.0.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
30
+ logdetective-1.5.0.dist-info/METADATA,sha256=ee4c820E_pX7ULV8PZGCJT0TzbfzpkkReRfrC7cznG4,18050
31
+ logdetective-1.5.0.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
32
+ logdetective-1.5.0.dist-info/entry_points.txt,sha256=3K_vXja6PmcA8sNdUi63WdImeiNhVZcEGPTaoJmltfA,63
33
+ logdetective-1.5.0.dist-info/RECORD,,