logdetective 0.5.9__py3-none-any.whl → 0.5.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
logdetective/constants.py CHANGED
@@ -16,6 +16,8 @@ Snippets are delimited with '================'.
16
16
 
17
17
  Finally, drawing on information from all snippets, provide complete explanation of the issue and recommend solution.
18
18
 
19
+ Explanation of the issue, and recommended solution, should take handful of sentences.
20
+
19
21
  Snippets:
20
22
 
21
23
  {}
@@ -38,6 +40,8 @@ Answer:
38
40
  SNIPPET_PROMPT_TEMPLATE = """
39
41
  Analyse following RPM build log snippet. Describe contents accurately, without speculation or suggestions for resolution.
40
42
 
43
+ Your analysis must be as concise as possible, while keeping relevant information intact.
44
+
41
45
  Snippet:
42
46
 
43
47
  {}
@@ -55,6 +59,8 @@ Snippets are delimited with '================'.
55
59
 
56
60
  Drawing on information from all snippets, provide complete explanation of the issue and recommend solution.
57
61
 
62
+ Explanation of the issue, and recommended solution, should take handful of sentences.
63
+
58
64
  Snippets:
59
65
 
60
66
  {}
@@ -64,3 +70,5 @@ Analysis:
64
70
  """
65
71
 
66
72
  SNIPPET_DELIMITER = "================"
73
+
74
+ DEFAULT_TEMPERATURE = 0.8
@@ -1,8 +1,9 @@
1
1
  import argparse
2
2
  import logging
3
3
  import sys
4
+ import os
4
5
 
5
- from logdetective.constants import DEFAULT_ADVISOR
6
+ from logdetective.constants import DEFAULT_ADVISOR, DEFAULT_TEMPERATURE
6
7
  from logdetective.utils import (
7
8
  process_log,
8
9
  initialize_model,
@@ -67,7 +68,16 @@ def setup_args():
67
68
  parser.add_argument("-v", "--verbose", action="count", default=0)
68
69
  parser.add_argument("-q", "--quiet", action="store_true")
69
70
  parser.add_argument(
70
- "--prompts", type=str, default="", help="Path to prompt configuration file."
71
+ "--prompts",
72
+ type=str,
73
+ default=f"{os.path.dirname(__file__)}/prompts.yml",
74
+ help="Path to prompt configuration file."
75
+ )
76
+ parser.add_argument(
77
+ "--temperature",
78
+ type=float,
79
+ default=DEFAULT_TEMPERATURE,
80
+ help="Temperature for inference."
71
81
  )
72
82
  return parser.parse_args()
73
83
 
@@ -143,6 +153,7 @@ def main(): # pylint: disable=too-many-statements,too-many-locals
143
153
  model,
144
154
  stream,
145
155
  prompt_template=prompts_configuration.prompt_template,
156
+ temperature=args.temperature,
146
157
  )
147
158
  probs = []
148
159
  print("Explanation:")
logdetective/prompts.yml CHANGED
@@ -1,3 +1,8 @@
1
+ # This file is intended for customization of prompts
2
+ # It is used only in server mode.
3
+ # On command line you have to load it using --prompts
4
+ # The defaults are stored in constants.py
5
+
1
6
  prompt_template: |
2
7
  Given following log snippets, and nothing else, explain what failure, if any, occured during build of this package.
3
8
 
@@ -8,6 +13,8 @@ prompt_template: |
8
13
 
9
14
  Finally, drawing on information from all snippets, provide complete explanation of the issue and recommend solution.
10
15
 
16
+ Explanation of the issue, and recommended solution, should take handful of sentences.
17
+
11
18
  Snippets:
12
19
 
13
20
  {}
@@ -28,6 +35,8 @@ summarization_prompt_template: |
28
35
  snippet_prompt_template: |
29
36
  Analyse following RPM build log snippet. Describe contents accurately, without speculation or suggestions for resolution.
30
37
 
38
+ Your analysis must be as concise as possible, while keeping relevant information intact.
39
+
31
40
  Snippet:
32
41
 
33
42
  {}
@@ -43,6 +52,8 @@ prompt_template_staged: |
43
52
 
44
53
  Drawing on information from all snippets, provide complete explanation of the issue and recommend solution.
45
54
 
55
+ Explanation of the issue, and recommended solution, should take handful of sentences.
56
+
46
57
  Snippets:
47
58
 
48
59
  {}
@@ -97,17 +97,35 @@ class AnalyzeRequestMetrics(Base):
97
97
  metrics.response_certainty = response_certainty
98
98
  session.add(metrics)
99
99
 
100
+ @classmethod
101
+ def get_postgres_time_format(cls, time_format):
102
+ """Map python time format in the PostgreSQL format."""
103
+ if time_format == "%Y-%m-%d":
104
+ pgsql_time_format = "YYYY-MM-DD"
105
+ else:
106
+ pgsql_time_format = "YYYY-MM-DD HH24"
107
+ return pgsql_time_format
108
+
109
+ @classmethod
110
+ def get_dictionary_with_datetime_keys(
111
+ cls, time_format: str, values_dict: dict[str, any]
112
+ ) -> dict[datetime.datetime, any]:
113
+ """Convert from a dictionary with str keys to a dictionary with datetime keys"""
114
+ new_dict = {
115
+ datetime.datetime.strptime(r[0], time_format): r[1] for r in values_dict
116
+ }
117
+ return new_dict
118
+
100
119
  @classmethod
101
120
  def _get_requests_by_time_for_postgres(
102
121
  cls, start_time, end_time, time_format, endpoint
103
122
  ):
104
- """func.to_char is PostgreSQL specific.
123
+ """Get total requests number in time period.
124
+
125
+ func.to_char is PostgreSQL specific.
105
126
  Let's unit tests replace this function with the SQLite version.
106
127
  """
107
- if time_format == "%Y-%m-%d":
108
- pgsql_time_format = "YYYY-MM-DD"
109
- else:
110
- pgsql_time_format = "YYYY-MM-DD HH24"
128
+ pgsql_time_format = cls.get_postgres_time_format(time_format)
111
129
 
112
130
  requests_by_time_format = (
113
131
  select(
@@ -123,10 +141,12 @@ class AnalyzeRequestMetrics(Base):
123
141
  return requests_by_time_format
124
142
 
125
143
  @classmethod
126
- def _get_requests_by_time_for_sqllite(
144
+ def _get_requests_by_time_for_sqlite(
127
145
  cls, start_time, end_time, time_format, endpoint
128
146
  ):
129
- """func.strftime is SQLite specific.
147
+ """Get total requests number in time period.
148
+
149
+ func.strftime is SQLite specific.
130
150
  Use this function in unit test using flexmock:
131
151
 
132
152
  flexmock(AnalyzeRequestMetrics).should_receive("_get_requests_by_time_for_postgres")
@@ -178,9 +198,193 @@ class AnalyzeRequestMetrics(Base):
178
198
  counts = session.execute(count_requests_by_time_format)
179
199
  results = counts.fetchall()
180
200
 
181
- # Convert results to a dictionary with proper datetime keys
182
- counts_dict = {
183
- datetime.datetime.strptime(r[0], time_format): r[1] for r in results
184
- }
201
+ return cls.get_dictionary_with_datetime_keys(time_format, results)
202
+
203
+ @classmethod
204
+ def _get_average_responses_times_for_postgres(
205
+ cls, start_time, end_time, time_format, endpoint
206
+ ):
207
+ """Get average responses time.
208
+
209
+ func.to_char is PostgreSQL specific.
210
+ Let's unit tests replace this function with the SQLite version.
211
+ """
212
+ with transaction(commit=False) as session:
213
+ pgsql_time_format = cls.get_postgres_time_format(time_format)
214
+
215
+ average_responses_times = (
216
+ select(
217
+ func.to_char(cls.request_received_at, pgsql_time_format).label(
218
+ "time_range"
219
+ ),
220
+ (
221
+ func.avg(
222
+ func.extract( # pylint: disable=not-callable
223
+ "epoch", cls.response_sent_at - cls.request_received_at
224
+ )
225
+ )
226
+ ).label("average_response_seconds"),
227
+ )
228
+ .filter(cls.request_received_at.between(start_time, end_time))
229
+ .filter(cls.endpoint == endpoint)
230
+ .group_by("time_range")
231
+ .order_by("time_range")
232
+ )
233
+
234
+ results = session.execute(average_responses_times).fetchall()
235
+ return results
236
+
237
+ @classmethod
238
+ def _get_average_responses_times_for_sqlite(
239
+ cls, start_time, end_time, time_format, endpoint
240
+ ):
241
+ """Get average responses time.
242
+
243
+ func.strftime is SQLite specific.
244
+ Use this function in unit test using flexmock:
245
+
246
+ flexmock(AnalyzeRequestMetrics).should_receive("_get_average_responses_times_for_postgres")
247
+ .replace_with(AnalyzeRequestMetrics._get_average_responses_times_for_sqlite)
248
+ """
249
+ with transaction(commit=False) as session:
250
+ average_responses_times = (
251
+ select(
252
+ func.strftime(time_format, cls.request_received_at).label(
253
+ "time_range"
254
+ ),
255
+ (
256
+ func.avg(
257
+ func.julianday(cls.response_sent_at)
258
+ - func.julianday(cls.request_received_at) # noqa: W503 flake8 vs ruff
259
+ )
260
+ * 86400 # noqa: W503 flake8 vs ruff
261
+ ).label("average_response_seconds"),
262
+ )
263
+ .filter(cls.request_received_at.between(start_time, end_time))
264
+ .filter(cls.endpoint == endpoint)
265
+ .group_by("time_range")
266
+ .order_by("time_range")
267
+ )
268
+
269
+ results = session.execute(average_responses_times).fetchall()
270
+ return results
271
+
272
+ @classmethod
273
+ def get_responses_average_time_in_period(
274
+ cls,
275
+ start_time: datetime.datetime,
276
+ end_time: datetime.datetime,
277
+ time_format: str,
278
+ endpoint: Optional[EndpointType] = EndpointType.ANALYZE,
279
+ ) -> dict[datetime.datetime, int]:
280
+ """
281
+ Get a dictionary with average responses times
282
+ grouped by time units within a specified period.
283
+
284
+ Args:
285
+ start_time (datetime): The start of the time period to query
286
+ end_time (datetime): The end of the time period to query
287
+ time_format (str): The strftime format string to format timestamps (e.g., '%Y-%m-%d')
288
+ endpoint (EndpointType): The analyze API endpoint to query
185
289
 
186
- return counts_dict
290
+ Returns:
291
+ dict[datetime, int]: A dictionary mapping datetime objects
292
+ to average responses times
293
+ """
294
+ with transaction(commit=False) as _:
295
+ average_responses_times = cls._get_average_responses_times_for_postgres(
296
+ start_time, end_time, time_format, endpoint
297
+ )
298
+
299
+ return cls.get_dictionary_with_datetime_keys(
300
+ time_format, average_responses_times
301
+ )
302
+
303
+ @classmethod
304
+ def _get_average_responses_lengths_for_postgres(
305
+ cls, start_time, end_time, time_format, endpoint
306
+ ):
307
+ """Get average responses length.
308
+
309
+ func.to_char is PostgreSQL specific.
310
+ Let's unit tests replace this function with the SQLite version.
311
+ """
312
+ with transaction(commit=False) as session:
313
+ pgsql_time_format = cls.get_postgres_time_format(time_format)
314
+
315
+ average_responses_lengths = (
316
+ select(
317
+ func.to_char(cls.request_received_at, pgsql_time_format).label(
318
+ "time_range"
319
+ ),
320
+ (func.avg(cls.response_length)).label("average_responses_length"),
321
+ )
322
+ .filter(cls.request_received_at.between(start_time, end_time))
323
+ .filter(cls.endpoint == endpoint)
324
+ .group_by("time_range")
325
+ .order_by("time_range")
326
+ )
327
+
328
+ results = session.execute(average_responses_lengths).fetchall()
329
+ return results
330
+
331
+ @classmethod
332
+ def _get_average_responses_lengths_for_sqlite(
333
+ cls, start_time, end_time, time_format, endpoint
334
+ ):
335
+ """Get average responses length.
336
+
337
+ func.strftime is SQLite specific.
338
+ Use this function in unit test using flexmock:
339
+
340
+ flexmock(AnalyzeRequestMetrics)
341
+ .should_receive("_get_average_responses_lengths_for_postgres")
342
+ .replace_with(AnalyzeRequestMetrics._get_average_responses_lengths_for_sqlite)
343
+ """
344
+ with transaction(commit=False) as session:
345
+ average_responses_lengths = (
346
+ select(
347
+ func.strftime(time_format, cls.request_received_at).label(
348
+ "time_range"
349
+ ),
350
+ (func.avg(cls.response_length)).label("average_responses_length"),
351
+ )
352
+ .filter(cls.request_received_at.between(start_time, end_time))
353
+ .filter(cls.endpoint == endpoint)
354
+ .group_by("time_range")
355
+ .order_by("time_range")
356
+ )
357
+
358
+ results = session.execute(average_responses_lengths).fetchall()
359
+ return results
360
+
361
+ @classmethod
362
+ def get_responses_average_length_in_period(
363
+ cls,
364
+ start_time: datetime.datetime,
365
+ end_time: datetime.datetime,
366
+ time_format: str,
367
+ endpoint: Optional[EndpointType] = EndpointType.ANALYZE,
368
+ ) -> dict[datetime.datetime, int]:
369
+ """
370
+ Get a dictionary with average responses length
371
+ grouped by time units within a specified period.
372
+
373
+ Args:
374
+ start_time (datetime): The start of the time period to query
375
+ end_time (datetime): The end of the time period to query
376
+ time_format (str): The strftime format string to format timestamps (e.g., '%Y-%m-%d')
377
+ endpoint (EndpointType): The analyze API endpoint to query
378
+
379
+ Returns:
380
+ dict[datetime, int]: A dictionary mapping datetime objects
381
+ to average responses lengths
382
+ """
383
+ with transaction(commit=False) as _:
384
+ average_responses_lengths = cls._get_average_responses_lengths_for_postgres(
385
+ start_time, end_time, time_format, endpoint
386
+ )
387
+
388
+ return cls.get_dictionary_with_datetime_keys(
389
+ time_format, average_responses_lengths
390
+ )
@@ -41,12 +41,10 @@ def update_metrics(
41
41
  sent_at if sent_at else datetime.datetime.now(datetime.timezone.utc)
42
42
  )
43
43
  response_length = None
44
- if hasattr(response, "explanation") and "choices" in response.explanation:
45
- response_length = sum(
46
- len(choice["text"])
47
- for choice in response.explanation["choices"]
48
- if "text" in choice
49
- )
44
+ if hasattr(response, "explanation") and isinstance(
45
+ response.explanation, models.Explanation
46
+ ):
47
+ response_length = len(response.explanation.text)
50
48
  response_certainty = (
51
49
  response.response_certainty if hasattr(response, "response_certainty") else None
52
50
  )
@@ -2,7 +2,9 @@ import datetime
2
2
  from logging import BASIC_FORMAT
3
3
  from typing import List, Dict, Optional, Literal
4
4
 
5
- from pydantic import BaseModel, Field, model_validator, field_validator
5
+ from pydantic import BaseModel, Field, model_validator, field_validator, NonNegativeFloat
6
+
7
+ from logdetective.constants import DEFAULT_TEMPERATURE
6
8
 
7
9
 
8
10
  class BuildLog(BaseModel):
@@ -95,6 +97,8 @@ class InferenceConfig(BaseModel):
95
97
  )
96
98
  url: str = ""
97
99
  api_token: str = ""
100
+ model: str = ""
101
+ temperature: NonNegativeFloat = DEFAULT_TEMPERATURE
98
102
 
99
103
  def __init__(self, data: Optional[dict] = None):
100
104
  super().__init__()
@@ -106,6 +110,8 @@ class InferenceConfig(BaseModel):
106
110
  self.api_endpoint = data.get("api_endpoint", "/chat/completions")
107
111
  self.url = data.get("url", "")
108
112
  self.api_token = data.get("api_token", "")
113
+ self.model = data.get("model", "default-model")
114
+ self.temperature = data.get("temperature", DEFAULT_TEMPERATURE)
109
115
 
110
116
 
111
117
  class ExtractorConfig(BaseModel):
@@ -150,7 +156,8 @@ class LogConfig(BaseModel):
150
156
  """Logging configuration"""
151
157
 
152
158
  name: str = "logdetective"
153
- level: str | int = "INFO"
159
+ level_stream: str | int = "INFO"
160
+ level_file: str | int = "INFO"
154
161
  path: str | None = None
155
162
  format: str = BASIC_FORMAT
156
163
 
@@ -160,7 +167,8 @@ class LogConfig(BaseModel):
160
167
  return
161
168
 
162
169
  self.name = data.get("name", "logdetective")
163
- self.level = data.get("level", "INFO").upper()
170
+ self.level_stream = data.get("level_stream", "INFO").upper()
171
+ self.level_file = data.get("level_file", "INFO").upper()
164
172
  self.path = data.get("path")
165
173
  self.format = data.get("format", BASIC_FORMAT)
166
174
 
@@ -1,5 +1,5 @@
1
1
  import datetime
2
- from typing import Optional
2
+ from typing import Optional, Union
3
3
 
4
4
  import numpy
5
5
  import matplotlib
@@ -62,24 +62,24 @@ class Definition:
62
62
 
63
63
 
64
64
  def create_time_series_arrays(
65
- counts_dict: dict[datetime.datetime, int],
65
+ values_dict: dict[datetime.datetime, int],
66
+ plot_def: Definition,
66
67
  start_time: datetime.datetime,
67
68
  end_time: datetime.datetime,
68
- time_delta: datetime.timedelta,
69
- time_format: str,
69
+ value_type: Optional[Union[int, float]] = int,
70
70
  ) -> tuple[numpy.ndarray, numpy.ndarray]:
71
- """Create time series arrays from a dictionary of counts.
71
+ """Create time series arrays from a dictionary of values.
72
72
 
73
73
  This function generates two aligned numpy arrays:
74
74
  1. An array of timestamps from start_time to end_time
75
- 2. A corresponding array of counts for each timestamp
75
+ 2. A corresponding array of valuesfor each timestamp
76
76
 
77
77
  The timestamps are truncated to the precision specified by time_format.
78
- If a timestamp in counts_dict matches a generated timestamp, its count is used;
79
- otherwise, the count defaults to zero.
78
+ If a timestamp in values_dict matches a generated timestamp, its values is used;
79
+ otherwise, the value defaults to zero.
80
80
 
81
81
  Args:
82
- counts_dict: Dictionary mapping timestamps to their respective counts
82
+ values_dict: Dictionary mapping timestamps to their respective values
83
83
  start_time: The starting timestamp of the time series
84
84
  end_time: The ending timestamp of the time series
85
85
  time_delta: The time interval between consecutive timestamps
@@ -88,67 +88,70 @@ def create_time_series_arrays(
88
88
  Returns:
89
89
  A tuple containing:
90
90
  - numpy.ndarray: Array of timestamps
91
- - numpy.ndarray: Array of corresponding counts
91
+ - numpy.ndarray: Array of corresponding values
92
92
  """
93
- num_intervals = int((end_time - start_time) / time_delta) + 1
93
+ num_intervals = int((end_time - start_time) / plot_def.time_delta) + 1
94
94
 
95
95
  timestamps = numpy.array(
96
96
  [
97
97
  datetime.datetime.strptime(
98
- (start_time + i * time_delta).strftime(format=time_format), time_format
98
+ (start_time + i * plot_def.time_delta).strftime(
99
+ format=plot_def.time_format
100
+ ),
101
+ plot_def.time_format,
99
102
  )
100
103
  for i in range(num_intervals)
101
104
  ]
102
105
  )
103
- counts = numpy.zeros(num_intervals, dtype=int)
106
+ values = numpy.zeros(num_intervals, dtype=value_type)
104
107
 
105
108
  timestamp_to_index = {timestamp: i for i, timestamp in enumerate(timestamps)}
106
109
 
107
- for timestamp, count in counts_dict.items():
110
+ for timestamp, count in values_dict.items():
108
111
  if timestamp in timestamp_to_index:
109
- counts[timestamp_to_index[timestamp]] = count
112
+ values[timestamp_to_index[timestamp]] = count
110
113
 
111
- return timestamps, counts
114
+ return timestamps, values
112
115
 
113
116
 
114
- def _add_bar_chart_for_requests_count(
115
- ax1: matplotlib.figure.Axes,
117
+ def _add_bar_chart(
118
+ ax: matplotlib.figure.Axes,
116
119
  plot_def: Definition,
117
120
  timestamps: numpy.array,
118
- counts: numpy.array,
121
+ values: numpy.array,
122
+ label: str,
119
123
  ) -> None:
120
- """Add a bar chart for requests count (axes 1)"""
124
+ """Add a blue bar chart"""
121
125
  bar_width = (
122
126
  0.8 * plot_def.time_delta.total_seconds() / 86400
123
127
  ) # Convert to days for matplotlib
124
- ax1.bar(
128
+ ax.bar(
125
129
  timestamps,
126
- counts,
130
+ values,
127
131
  width=bar_width,
128
132
  alpha=0.7,
129
133
  color="skyblue",
130
- label="Requests",
134
+ label=label,
131
135
  )
132
- ax1.set_xlabel("Time")
133
- ax1.set_ylabel("Requests", color="blue")
134
- ax1.tick_params(axis="y", labelcolor="blue")
136
+ ax.set_xlabel("Time")
137
+ ax.set_ylabel(label, color="blue")
138
+ ax.tick_params(axis="y", labelcolor="blue")
135
139
 
136
- ax1.xaxis.set_major_formatter(matplotlib.dates.DateFormatter(plot_def.time_format))
137
- ax1.xaxis.set_major_locator(plot_def.locator)
140
+ ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter(plot_def.time_format))
141
+ ax.xaxis.set_major_locator(plot_def.locator)
138
142
 
139
143
  matplotlib.pyplot.xticks(rotation=45)
140
144
 
141
- ax1.grid(True, alpha=0.3)
145
+ ax.grid(True, alpha=0.3)
142
146
 
143
147
 
144
- def _add_cumulative_line_for_requests_count(
145
- ax2: matplotlib.figure.Axes, timestamps: numpy.array, counts: numpy.array
148
+ def _add_line_chart(
149
+ ax: matplotlib.figure.Axes, timestamps: numpy.array, values: numpy.array, label: str
146
150
  ) -> None:
147
- """Add cumulative line on secondary y-axis"""
148
- cumulative = numpy.cumsum(counts)
149
- ax2.plot(timestamps, cumulative, "r-", linewidth=2, label="Cumulative")
150
- ax2.set_ylabel("Cumulative Requests", color="red")
151
- ax2.tick_params(axis="y", labelcolor="red")
151
+ """Add a red line chart"""
152
+ ax.plot(timestamps, values, "r-", linewidth=2, label=label)
153
+ ax.set_ylabel(label, color="red")
154
+ ax.tick_params(axis="y", labelcolor="red")
152
155
 
153
156
 
154
157
  def requests_per_time(
@@ -183,14 +186,14 @@ def requests_per_time(
183
186
  start_time, end_time, plot_def.time_format, endpoint
184
187
  )
185
188
  timestamps, counts = create_time_series_arrays(
186
- requests_counts, start_time, end_time, plot_def.time_delta, plot_def.time_format
189
+ requests_counts, plot_def, start_time, end_time
187
190
  )
188
191
 
189
192
  fig, ax1 = matplotlib.pyplot.subplots(figsize=(12, 6))
190
- _add_bar_chart_for_requests_count(ax1, plot_def, timestamps, counts)
193
+ _add_bar_chart(ax1, plot_def, timestamps, counts, "Requests")
191
194
 
192
195
  ax2 = ax1.twinx()
193
- _add_cumulative_line_for_requests_count(ax2, timestamps, counts)
196
+ _add_line_chart(ax2, timestamps, numpy.cumsum(counts), "Cumulative Requests")
194
197
 
195
198
  matplotlib.pyplot.title(
196
199
  f"Requests received for API {endpoint} ({start_time.strftime(plot_def.time_format)} "
@@ -204,3 +207,75 @@ def requests_per_time(
204
207
  matplotlib.pyplot.tight_layout()
205
208
 
206
209
  return fig
210
+
211
+
212
+ def average_time_per_responses( # pylint: disable=too-many-locals
213
+ period_of_time: models.TimePeriod,
214
+ endpoint: EndpointType = EndpointType.ANALYZE,
215
+ end_time: Optional[datetime.datetime] = None,
216
+ ) -> matplotlib.figure.Figure:
217
+ """
218
+ Generate a visualization of average response time and length over a specified time period.
219
+
220
+ This function creates a dual-axis plot showing:
221
+ 1. A bar chart of average response time per time interval
222
+ 1. A line chart of average response length per time interval
223
+
224
+ The time intervals are determined by the provided TimePeriod object, which defines
225
+ the granularity and formatting of the time axis.
226
+
227
+ Args:
228
+ period_of_time: A TimePeriod object that defines the time period and interval
229
+ for the analysis (e.g., hourly, daily, weekly)
230
+ endpoint: One of the API endpoints
231
+ end_time: The end time for the analysis period. If None, defaults to the current
232
+ UTC time
233
+
234
+ Returns:
235
+ A matplotlib Figure object containing the generated visualization
236
+ """
237
+ end_time = end_time or datetime.datetime.now(datetime.timezone.utc)
238
+ start_time = period_of_time.get_period_start_time(end_time)
239
+ plot_def = Definition(period_of_time)
240
+ responses_average_time = AnalyzeRequestMetrics.get_responses_average_time_in_period(
241
+ start_time, end_time, plot_def.time_format, endpoint
242
+ )
243
+ timestamps, average_time = create_time_series_arrays(
244
+ responses_average_time,
245
+ plot_def,
246
+ start_time,
247
+ end_time,
248
+ float,
249
+ )
250
+
251
+ fig, ax1 = matplotlib.pyplot.subplots(figsize=(12, 6))
252
+ _add_bar_chart(ax1, plot_def, timestamps, average_time, "average response time (seconds)")
253
+
254
+ responses_average_length = (
255
+ AnalyzeRequestMetrics.get_responses_average_length_in_period(
256
+ start_time, end_time, plot_def.time_format, endpoint
257
+ )
258
+ )
259
+ timestamps, average_length = create_time_series_arrays(
260
+ responses_average_length,
261
+ plot_def,
262
+ start_time,
263
+ end_time,
264
+ float,
265
+ )
266
+
267
+ ax2 = ax1.twinx()
268
+ _add_line_chart(ax2, timestamps, average_length, "average response length (chars)")
269
+
270
+ matplotlib.pyplot.title(
271
+ f"average response time for API {endpoint} ({start_time.strftime(plot_def.time_format)} "
272
+ f"to {end_time.strftime(plot_def.time_format)})"
273
+ )
274
+
275
+ lines1, labels1 = ax1.get_legend_handles_labels()
276
+ lines2, labels2 = ax2.get_legend_handles_labels()
277
+ ax1.legend(lines1 + lines2, labels1 + labels2, loc="center")
278
+
279
+ matplotlib.pyplot.tight_layout()
280
+
281
+ return fig
@@ -26,10 +26,13 @@ from logdetective.utils import (
26
26
  validate_url,
27
27
  compute_certainty,
28
28
  format_snippets,
29
- format_analyzed_snippets,
30
29
  load_prompts,
31
30
  )
32
- from logdetective.server.utils import load_server_config, get_log
31
+ from logdetective.server.utils import (
32
+ load_server_config,
33
+ get_log,
34
+ format_analyzed_snippets,
35
+ )
33
36
  from logdetective.server.metric import track_request
34
37
  from logdetective.server.models import (
35
38
  BuildLog,
@@ -183,7 +186,6 @@ async def submit_text( # pylint: disable=R0913,R0917
183
186
  log_probs: int = 1,
184
187
  stream: bool = False,
185
188
  model: str = "default-model",
186
- api_endpoint: str = "/chat/completions",
187
189
  ) -> Explanation:
188
190
  """Submit prompt to LLM using a selected endpoint.
189
191
  max_tokens: number of tokens to be produces, 0 indicates run until encountering EOS
@@ -196,7 +198,7 @@ async def submit_text( # pylint: disable=R0913,R0917
196
198
  if SERVER_CONFIG.inference.api_token:
197
199
  headers["Authorization"] = f"Bearer {SERVER_CONFIG.inference.api_token}"
198
200
 
199
- if api_endpoint == "/chat/completions":
201
+ if SERVER_CONFIG.inference.api_endpoint == "/chat/completions":
200
202
  return await submit_text_chat_completions(
201
203
  text, headers, max_tokens, log_probs > 0, stream, model
202
204
  )
@@ -224,6 +226,7 @@ async def submit_text_completions( # pylint: disable=R0913,R0917
224
226
  "logprobs": log_probs,
225
227
  "stream": stream,
226
228
  "model": model,
229
+ "temperature": SERVER_CONFIG.inference.temperature,
227
230
  }
228
231
 
229
232
  response = await submit_to_llm_endpoint(
@@ -263,6 +266,7 @@ async def submit_text_chat_completions( # pylint: disable=R0913,R0917
263
266
  "logprobs": log_probs,
264
267
  "stream": stream,
265
268
  "model": model,
269
+ "temperature": SERVER_CONFIG.inference.temperature,
266
270
  }
267
271
 
268
272
  response = await submit_to_llm_endpoint(
@@ -297,7 +301,8 @@ async def analyze_log(build_log: BuildLog):
297
301
  log_summary = format_snippets(log_summary)
298
302
  response = await submit_text(
299
303
  PROMPT_CONFIG.prompt_template.format(log_summary),
300
- api_endpoint=SERVER_CONFIG.inference.api_endpoint,
304
+ model=SERVER_CONFIG.inference.model,
305
+ max_tokens=SERVER_CONFIG.inference.max_tokens,
301
306
  )
302
307
  certainty = 0
303
308
 
@@ -337,7 +342,8 @@ async def perform_staged_analysis(log_text: str) -> StagedResponse:
337
342
  *[
338
343
  submit_text(
339
344
  PROMPT_CONFIG.snippet_prompt_template.format(s),
340
- api_endpoint=SERVER_CONFIG.inference.api_endpoint,
345
+ model=SERVER_CONFIG.inference.model,
346
+ max_tokens=SERVER_CONFIG.inference.max_tokens,
341
347
  )
342
348
  for s in log_summary
343
349
  ]
@@ -352,7 +358,9 @@ async def perform_staged_analysis(log_text: str) -> StagedResponse:
352
358
  )
353
359
 
354
360
  final_analysis = await submit_text(
355
- final_prompt, api_endpoint=SERVER_CONFIG.inference.api_endpoint
361
+ final_prompt,
362
+ model=SERVER_CONFIG.inference.model,
363
+ max_tokens=SERVER_CONFIG.inference.max_tokens,
356
364
  )
357
365
 
358
366
  certainty = 0
@@ -393,7 +401,9 @@ async def analyze_log_stream(build_log: BuildLog):
393
401
  headers["Authorization"] = f"Bearer {SERVER_CONFIG.inference.api_token}"
394
402
 
395
403
  stream = await submit_text_chat_completions(
396
- PROMPT_CONFIG.prompt_template.format(log_summary), stream=True, headers=headers
404
+ PROMPT_CONFIG.prompt_template.format(log_summary), stream=True, headers=headers,
405
+ model=SERVER_CONFIG.inference.model,
406
+ max_tokens=SERVER_CONFIG.inference.max_tokens,
397
407
  )
398
408
 
399
409
  return StreamingResponse(stream)
@@ -613,8 +623,8 @@ async def comment_on_mr(
613
623
  response.explanation.text,
614
624
  )
615
625
 
616
- # Get the formatted comment.
617
- comment = await generate_mr_comment(job, log_url, response)
626
+ # Get the formatted short comment.
627
+ short_comment = await generate_mr_comment(job, log_url, response, full=False)
618
628
 
619
629
  # Look up the merge request
620
630
  merge_request = await asyncio.to_thread(
@@ -622,11 +632,33 @@ async def comment_on_mr(
622
632
  )
623
633
 
624
634
  # Submit a new comment to the Merge Request using the Gitlab API
625
- await asyncio.to_thread(merge_request.discussions.create, {"body": comment})
635
+ discussion = await asyncio.to_thread(
636
+ merge_request.discussions.create, {"body": short_comment}
637
+ )
638
+
639
+ # Get the ID of the first note
640
+ note_id = discussion.attributes["notes"][0]["id"]
641
+ note = discussion.notes.get(note_id)
642
+
643
+ # Update the comment with the full details
644
+ # We do this in a second step so we don't bombard the user's email
645
+ # notifications with a massive message. Gitlab doesn't send email for
646
+ # comment edits.
647
+ full_comment = await generate_mr_comment(job, log_url, response, full=True)
648
+ note.body = full_comment
649
+
650
+ # Pause for five seconds before sending the snippet data, otherwise
651
+ # Gitlab may bundle the edited message together with the creation
652
+ # message in email.
653
+ await asyncio.sleep(5)
654
+ await asyncio.to_thread(note.save)
626
655
 
627
656
 
628
657
  async def generate_mr_comment(
629
- job: gitlab.v4.objects.ProjectJob, log_url: str, response: StagedResponse
658
+ job: gitlab.v4.objects.ProjectJob,
659
+ log_url: str,
660
+ response: StagedResponse,
661
+ full: bool = True,
630
662
  ) -> str:
631
663
  """Use a template to generate a comment string to submit to Gitlab"""
632
664
 
@@ -634,7 +666,11 @@ async def generate_mr_comment(
634
666
  script_path = Path(__file__).resolve().parent
635
667
  template_path = Path(script_path, "templates")
636
668
  jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_path))
637
- tpl = jinja_env.get_template("gitlab_comment.md.j2")
669
+
670
+ if full:
671
+ tpl = jinja_env.get_template("gitlab_full_comment.md.j2")
672
+ else:
673
+ tpl = jinja_env.get_template("gitlab_short_comment.md.j2")
638
674
 
639
675
  artifacts_url = f"{job.project_url}/-/jobs/{job.id}/artifacts/download"
640
676
 
@@ -673,6 +709,35 @@ def _svg_figure_response(fig: matplotlib.figure.Figure):
673
709
  )
674
710
 
675
711
 
712
+ def _multiple_svg_figures_response(figures: list[matplotlib.figure.Figure]):
713
+ """Create a response with multiple svg figures."""
714
+ svg_contents = []
715
+ for i, fig in enumerate(figures):
716
+ buf = BytesIO()
717
+ fig.savefig(buf, format="svg", bbox_inches="tight")
718
+ matplotlib.pyplot.close(fig)
719
+ buf.seek(0)
720
+ svg_contents.append(buf.read().decode("utf-8"))
721
+
722
+ html_content = "<html><body>\n"
723
+ for i, svg in enumerate(svg_contents):
724
+ html_content += f"<div id='figure-{i}'>\n{svg}\n</div>\n"
725
+ html_content += "</body></html>"
726
+
727
+ return BasicResponse(content=html_content, media_type="text/html")
728
+
729
+
730
+ @app.get("/metrics/analyze", response_class=StreamingResponse)
731
+ async def show_analyze_metrics(period_since_now: TimePeriod = Depends(TimePeriod)):
732
+ """Show statistics for requests and responses in the given period of time
733
+ for the /analyze API endpoint."""
734
+ fig_requests = plot.requests_per_time(period_since_now, EndpointType.ANALYZE)
735
+ fig_responses = plot.average_time_per_responses(
736
+ period_since_now, EndpointType.ANALYZE
737
+ )
738
+ return _multiple_svg_figures_response([fig_requests, fig_responses])
739
+
740
+
676
741
  @app.get("/metrics/analyze/requests", response_class=StreamingResponse)
677
742
  async def show_analyze_requests(period_since_now: TimePeriod = Depends(TimePeriod)):
678
743
  """Show statistics for the requests received in the given period of time
@@ -681,6 +746,27 @@ async def show_analyze_requests(period_since_now: TimePeriod = Depends(TimePerio
681
746
  return _svg_figure_response(fig)
682
747
 
683
748
 
749
+ @app.get("/metrics/analyze/responses", response_class=StreamingResponse)
750
+ async def show_analyze_responses(period_since_now: TimePeriod = Depends(TimePeriod)):
751
+ """Show statistics for responses given in the specified period of time
752
+ for the /analyze API endpoint."""
753
+ fig = plot.average_time_per_responses(period_since_now, EndpointType.ANALYZE)
754
+ return _svg_figure_response(fig)
755
+
756
+
757
+ @app.get("/metrics/analyze/staged", response_class=StreamingResponse)
758
+ async def show_analyze_staged_metrics(
759
+ period_since_now: TimePeriod = Depends(TimePeriod),
760
+ ):
761
+ """Show statistics for requests and responses in the given period of time
762
+ for the /analyze/staged API endpoint."""
763
+ fig_requests = plot.requests_per_time(period_since_now, EndpointType.ANALYZE_STAGED)
764
+ fig_responses = plot.average_time_per_responses(
765
+ period_since_now, EndpointType.ANALYZE_STAGED
766
+ )
767
+ return _multiple_svg_figures_response([fig_requests, fig_responses])
768
+
769
+
684
770
  @app.get("/metrics/analyze/staged/requests", response_class=StreamingResponse)
685
771
  async def show_analyze_staged_requests(
686
772
  period_since_now: TimePeriod = Depends(TimePeriod),
@@ -689,3 +775,13 @@ async def show_analyze_staged_requests(
689
775
  for the /analyze/staged API endpoint."""
690
776
  fig = plot.requests_per_time(period_since_now, EndpointType.ANALYZE_STAGED)
691
777
  return _svg_figure_response(fig)
778
+
779
+
780
+ @app.get("/metrics/analyze/staged/responses", response_class=StreamingResponse)
781
+ async def show_analyze_staged_responses(
782
+ period_since_now: TimePeriod = Depends(TimePeriod),
783
+ ):
784
+ """Show statistics for responses given in the specified period of time
785
+ for the /analyze/staged API endpoint."""
786
+ fig = plot.average_time_per_responses(period_since_now, EndpointType.ANALYZE_STAGED)
787
+ return _svg_figure_response(fig)
@@ -9,9 +9,7 @@ In this case, we are {{ certainty }}% certain of the response {{ emoji_face }}.
9
9
  <ul>
10
10
  {% for snippet in snippets %}
11
11
  <li>
12
- <code>
13
- Line {{ snippet.line_number }}: {{ snippet.text }}
14
- </code>
12
+ <b>Line {{ snippet.line_number }}:</b> <code>{{ snippet.text }}</code>
15
13
  {{ snippet.explanation }}
16
14
  </li>
17
15
  {% endfor %}
@@ -0,0 +1,53 @@
1
+ The package {{ package }} failed to build, here is a possible explanation why.
2
+
3
+ Please know that the explanation was provided by AI and may be incorrect.
4
+ In this case, we are {{ certainty }}% certain of the response {{ emoji_face }}.
5
+
6
+ {{ explanation }}
7
+
8
+ <details>
9
+ <summary>Logs</summary>
10
+ <p>
11
+ Log Detective analyzed the following logs files to provide an explanation:
12
+ </p>
13
+
14
+ <ul>
15
+ <li><a href="{{ log_url }}">{{ log_url }}</a></li>
16
+ </ul>
17
+
18
+ <p>
19
+ Additional logs are available from:
20
+ <ul>
21
+ <li><a href="{{ artifacts_url }}">artifacts.zip</a></li>
22
+ </ul>
23
+ </p>
24
+
25
+ <p>
26
+ Please know that these log files are automatically removed after some
27
+ time, so you might need a backup.
28
+ </p>
29
+ </details>
30
+
31
+ <details>
32
+ <summary>Help</summary>
33
+ <p>Don't hesitate to reach out.</p>
34
+
35
+ <ul>
36
+ <li><a href="https://github.com/fedora-copr/logdetective">Upstream</a></li>
37
+ <li><a href="https://github.com/fedora-copr/logdetective/issues">Issue tracker</a></li>
38
+ <li><a href="https://redhat.enterprise.slack.com/archives/C06DWNVKKDE">Slack</a></li>
39
+ <li><a href="https://log-detective.com/documentation">Documentation</a></li>
40
+ </ul>
41
+ </details>
42
+
43
+
44
+ ---
45
+ This comment was created by [Log Detective][log-detective].
46
+
47
+ Was the provided feedback accurate and helpful? <br>Please vote with :thumbsup:
48
+ or :thumbsdown: to help us improve.<br>
49
+
50
+
51
+
52
+ [log-detective]: https://log-detective.com/
53
+ [contact]: https://github.com/fedora-copr
@@ -1,6 +1,18 @@
1
1
  import logging
2
2
  import yaml
3
- from logdetective.server.models import Config
3
+ from logdetective.constants import SNIPPET_DELIMITER
4
+ from logdetective.server.models import Config, AnalyzedSnippet
5
+
6
+
7
+ def format_analyzed_snippets(snippets: list[AnalyzedSnippet]) -> str:
8
+ """Format snippets for submission into staged prompt."""
9
+ summary = f"\n{SNIPPET_DELIMITER}\n".join(
10
+ [
11
+ f"[{e.text}] at line [{e.line_number}]: [{e.explanation.text}]"
12
+ for e in snippets
13
+ ]
14
+ )
15
+ return summary
4
16
 
5
17
 
6
18
  def load_server_config(path: str | None) -> Config:
@@ -25,7 +37,7 @@ def get_log(config: Config):
25
37
  if getattr(log, "initialized", False):
26
38
  return log
27
39
 
28
- log.setLevel(config.log.level)
40
+ log.setLevel("DEBUG")
29
41
 
30
42
  # Drop the default handler, we will create it ourselves
31
43
  log.handlers = []
@@ -33,12 +45,14 @@ def get_log(config: Config):
33
45
  # STDOUT
34
46
  stream_handler = logging.StreamHandler()
35
47
  stream_handler.setFormatter(logging.Formatter(config.log.format))
48
+ stream_handler.setLevel(config.log.level_stream)
36
49
  log.addHandler(stream_handler)
37
50
 
38
51
  # Log to file
39
52
  if config.log.path:
40
53
  file_handler = logging.FileHandler(config.log.path)
41
54
  file_handler.setFormatter(logging.Formatter(config.log.format))
55
+ file_handler.setLevel(config.log.level_file)
42
56
  log.addHandler(file_handler)
43
57
 
44
58
  log.initialized = True
logdetective/utils.py CHANGED
@@ -7,8 +7,6 @@ import requests
7
7
  import yaml
8
8
 
9
9
  from llama_cpp import Llama, CreateCompletionResponse, CreateCompletionStreamResponse
10
- from logdetective.constants import SNIPPET_DELIMITER
11
- from logdetective.server.models import AnalyzedSnippet
12
10
  from logdetective.models import PromptConfig
13
11
 
14
12
 
@@ -113,19 +111,23 @@ def compute_certainty(probs: List[Dict]) -> float:
113
111
 
114
112
 
115
113
  def process_log(
116
- log: str, model: Llama, stream: bool, prompt_template: str
114
+ log: str, model: Llama, stream: bool, prompt_template: str,
115
+ temperature: float
117
116
  ) -> CreateCompletionResponse | Iterator[CreateCompletionStreamResponse]:
118
117
  """Processes a given log using the provided language model and returns its summary.
119
118
 
120
119
  Args:
121
120
  log (str): The input log to be processed.
122
121
  model (Llama): The language model used for processing the log.
123
-
122
+ stream (bool): Return output as Iterator.
123
+ prompt_template (str): Which prompt template to use.
124
+ temperature (float): Temperature parameter for model runtime.
124
125
  Returns:
125
126
  str: The summary of the given log generated by the language model.
126
127
  """
127
128
  response = model(
128
- prompt=prompt_template.format(log), stream=stream, max_tokens=0, logprobs=1
129
+ prompt=prompt_template.format(log), stream=stream, max_tokens=0, logprobs=1,
130
+ temperature=temperature
129
131
  )
130
132
 
131
133
  return response
@@ -178,17 +180,6 @@ def format_snippets(snippets: list[str] | list[Tuple[int, str]]) -> str:
178
180
  return summary
179
181
 
180
182
 
181
- def format_analyzed_snippets(snippets: list[AnalyzedSnippet]) -> str:
182
- """Format snippets for submission into staged prompt."""
183
- summary = f"\n{SNIPPET_DELIMITER}\n".join(
184
- [
185
- f"[{e.text}] at line [{e.line_number}]: [{e.explanation.text}]"
186
- for e in snippets
187
- ]
188
- )
189
- return summary
190
-
191
-
192
183
  def validate_url(url: str) -> bool:
193
184
  """Validate incoming URL to be at least somewhat sensible for log files
194
185
  Only http and https protocols permitted. No result, params or query fields allowed.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: logdetective
3
- Version: 0.5.9
3
+ Version: 0.5.11
4
4
  Summary: Log using LLM AI to search for build/test failures and provide ideas for fixing these.
5
5
  License: Apache-2.0
6
6
  Author: Jiri Podivin
@@ -47,6 +47,8 @@ Log Detective
47
47
 
48
48
  A Python tool to analyze logs using a Language Model (LLM) and Drain template miner.
49
49
 
50
+ Note: if you are looking for code of website logdetective.com it is in [github.com/fedora-copr/logdetective-website](https://github.com/fedora-copr/logdetective-website).
51
+
50
52
  Installation
51
53
  ------------
52
54
 
@@ -95,6 +97,17 @@ Example you want to use a different model:
95
97
  logdetective https://example.com/logs.txt --model https://huggingface.co/QuantFactory/Meta-Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct.Q5_K_S.gguf?download=true
96
98
  logdetective https://example.com/logs.txt --model QuantFactory/Meta-Llama-3-8B-Instruct-GGUF
97
99
 
100
+ Example of different suffix (useful for models that were quantized)
101
+
102
+ logdetective https://kojipkgs.fedoraproject.org//work/tasks/3367/131313367/build.log --model 'fedora-copr/granite-3.2-8b-instruct-GGUF' -F Q4_K.gguf
103
+
104
+ Example of altered prompts:
105
+
106
+ cp ~/.local/lib/python3.13/site-packages/logdetective/prompts.yml ~/my-prompts.yml
107
+ vi ~/my-prompts.yml # edit the prompts there to better fit your needs
108
+ logdetective https://kojipkgs.fedoraproject.org//work/tasks/3367/131313367/build.log --prompts ~/my-prompts.yml
109
+
110
+
98
111
  Note that streaming with some models (notably Meta-Llama-3 is broken) is broken and can be workarounded by `no-stream` option:
99
112
 
100
113
  logdetective https://example.com/logs.txt --model QuantFactory/Meta-Llama-3-8B-Instruct-GGUF --no-stream
@@ -337,11 +350,23 @@ certbot certonly --standalone -d logdetective01.fedorainfracloud.org
337
350
  Querying statistics
338
351
  -------------------
339
352
 
340
- You can retrieve statistics about server requests over a specified time period
341
- using either the `curl` command or the `http` command (provided by the `httpie` package).
353
+ You can retrieve statistics about server requests and responses over a specified time period
354
+ using either a browser, the `curl` or the `http` command (provided by the `httpie` package).
342
355
 
343
356
  When no time period is specified, the query defaults to the last 2 days:
344
357
 
358
+ You can view requests and responses statistics
359
+ - for the `/analyze` endpoint at http://localhost:8080/metrics/analyze
360
+ - for the `/analyze/staged` endpoint at http://localhost:8080/metrics/analyze/staged.
361
+
362
+ You can retrieve single svg images at the following endpoints:
363
+ - `/metrics/analyze/requests`
364
+ - `/metrics/analyze/responses`
365
+ - `/metrics/analyze/staged/requests`
366
+ - `/metrics/analyze/stages/responses`
367
+
368
+ Examples:
369
+
345
370
  ```
346
371
  http GET "localhost:8080/metrics/analyze/requests" > /tmp/plot.svg
347
372
  curl "localhost:8080/metrics/analyze/staged/requests" > /tmp/plot.svg
@@ -349,7 +374,6 @@ curl "localhost:8080/metrics/analyze/staged/requests" > /tmp/plot.svg
349
374
 
350
375
  You can specify the time period in hours, days, or weeks.
351
376
  The time period:
352
-
353
377
  - cannot be less than one hour
354
378
  - cannot be negative
355
379
  - ends at the current time (when the query is made)
@@ -0,0 +1,24 @@
1
+ logdetective/__init__.py,sha256=VqRngDcuFT7JWms8Qc_MsOvajoXVOKPr-S1kqY3Pqhc,59
2
+ logdetective/constants.py,sha256=A5PzeqlQqDbBS_kzP2hl-lhJ0lCEqdbvW3CaQUYVxjw,1849
3
+ logdetective/drain3.ini,sha256=ni91eCT1TwTznZwcqWoOVMQcGEnWhEDNCoTPF7cfGfY,1360
4
+ logdetective/extractors.py,sha256=7ahzWbTtU9MveG1Q7wU9LO8OJgs85X-cHmWltUhCe9M,3491
5
+ logdetective/logdetective.py,sha256=Q1SfQ9sWR5sIvHJag61-F-8edwf7p1SV7QZRg9VaWcc,5604
6
+ logdetective/models.py,sha256=nrGBmMRu8i6UhFflQKAp81Y3Sd_Aaoor0i_yqSJoLT0,1115
7
+ logdetective/prompts.yml,sha256=dMW2-bdTIqv7LF_owqRD4xinMK5ZWcNhDynnX1zoKns,1722
8
+ logdetective/server/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
+ logdetective/server/database/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
+ logdetective/server/database/base.py,sha256=oMJUvbWeapIUP-8Cf_DR9ptFg8CsYeaBAIjOVEzx8SM,1668
11
+ logdetective/server/database/models.py,sha256=m_3qNBWJwLSwjJn0AmwSxXMJk75Gu1bXFtGAP_4zps4,14088
12
+ logdetective/server/metric.py,sha256=-uM_-yqxNA-EZTCnNRdQ8g1MicmE5eC6jRFI_mBBYUg,2606
13
+ logdetective/server/models.py,sha256=URqZcfx5yUsifZ1pOwZ_uU3Tyjcdvuq6qEnAvTexl4A,8475
14
+ logdetective/server/plot.py,sha256=B2rOngqx7g-Z3NfttboTip3frkypdF1H7FhK8vh45mE,9655
15
+ logdetective/server/server.py,sha256=4NylBojHm9E3gjByVWs870T204ls39EbZmUfU0Kyq4U,28395
16
+ logdetective/server/templates/gitlab_full_comment.md.j2,sha256=DQZ2WVFedpuXI6znbHIW4wpF9BmFS8FaUkowh8AnGhE,1627
17
+ logdetective/server/templates/gitlab_short_comment.md.j2,sha256=fzScpayv2vpRLczP_0O0YxtA8rsKvR6gSv4ntNdWb98,1443
18
+ logdetective/server/utils.py,sha256=QO0H1q55YLCLKxkViqex4Uu31LnakpYUKJfZHysonSc,1838
19
+ logdetective/utils.py,sha256=nklnTipAet9P9aEiuHcnK62WT0DmNHbvO1TvNlrxlik,6463
20
+ logdetective-0.5.11.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
21
+ logdetective-0.5.11.dist-info/METADATA,sha256=LOOzu99kJaP02U2OaFQciPdWKhlgr4Vm4tVKijTY7NM,15882
22
+ logdetective-0.5.11.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
23
+ logdetective-0.5.11.dist-info/entry_points.txt,sha256=3K_vXja6PmcA8sNdUi63WdImeiNhVZcEGPTaoJmltfA,63
24
+ logdetective-0.5.11.dist-info/RECORD,,
@@ -1,23 +0,0 @@
1
- logdetective/__init__.py,sha256=VqRngDcuFT7JWms8Qc_MsOvajoXVOKPr-S1kqY3Pqhc,59
2
- logdetective/constants.py,sha256=eiS6eYhEgl_Rlyi_B9j00DDp9A-UDhuFz3ACWtKf_SU,1558
3
- logdetective/drain3.ini,sha256=ni91eCT1TwTznZwcqWoOVMQcGEnWhEDNCoTPF7cfGfY,1360
4
- logdetective/extractors.py,sha256=7ahzWbTtU9MveG1Q7wU9LO8OJgs85X-cHmWltUhCe9M,3491
5
- logdetective/logdetective.py,sha256=1EFrml_gHdyKEZX4iXBxhGgmU7R7_S26-Fr0WUDaA7E,5316
6
- logdetective/models.py,sha256=nrGBmMRu8i6UhFflQKAp81Y3Sd_Aaoor0i_yqSJoLT0,1115
7
- logdetective/prompts.yml,sha256=OBOWDErlbigbLrStcCY5HKPReNb0g-SNlCnD4QawF7k,1268
8
- logdetective/server/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
- logdetective/server/database/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
- logdetective/server/database/base.py,sha256=oMJUvbWeapIUP-8Cf_DR9ptFg8CsYeaBAIjOVEzx8SM,1668
11
- logdetective/server/database/models.py,sha256=arIahOCT-hTmh904DXrWSkH7rlo13Ppu-OO80huX5Dc,6118
12
- logdetective/server/metric.py,sha256=VYMifrfIhcqgyu6YYN0c1nt8fC1iJ2_LCB7Bh2AheoE,2679
13
- logdetective/server/models.py,sha256=cf1ngu_-19rP_i49s5cEwIzh6SfL_ZpVy4EykCpfWck,8076
14
- logdetective/server/plot.py,sha256=3o-CNHjel04ekpwSB4ckV7dbiF663cfPkimQ0aP9U_8,7073
15
- logdetective/server/server.py,sha256=VGfBgbjUcyBd8hop-ea-O_Mo-FoGLDyP-elAWzRu51g,24605
16
- logdetective/server/templates/gitlab_comment.md.j2,sha256=kheTkhQ-LfuFkr8av-Mw2a-9VYEUbDTLwaa-CKI6OkI,1622
17
- logdetective/server/utils.py,sha256=OFvhttjv3yp8kfim5_s4mNG8ly21qyILxE0o3DcVVKg,1340
18
- logdetective/utils.py,sha256=yTEjfTTaCS8lreKRkwKzLo6Po8cOYzInjSEx4CwpyqA,6665
19
- logdetective-0.5.9.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
20
- logdetective-0.5.9.dist-info/METADATA,sha256=YZbrICuAKXVD4LEEH6orwX-fuX3i3hpSsKuNa1nosoI,14737
21
- logdetective-0.5.9.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
22
- logdetective-0.5.9.dist-info/entry_points.txt,sha256=3K_vXja6PmcA8sNdUi63WdImeiNhVZcEGPTaoJmltfA,63
23
- logdetective-0.5.9.dist-info/RECORD,,