logdetective 1.5.0__py3-none-any.whl → 1.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -8,13 +8,30 @@ from io import BytesIO
8
8
 
9
9
  import matplotlib
10
10
  import matplotlib.pyplot
11
- from fastapi import FastAPI, HTTPException, BackgroundTasks, Depends, Header, Request
11
+ from fastapi import (
12
+ FastAPI,
13
+ HTTPException,
14
+ BackgroundTasks,
15
+ Depends,
16
+ Header,
17
+ Path,
18
+ Request,
19
+ )
12
20
 
13
21
  from fastapi.responses import StreamingResponse
14
22
  from fastapi.responses import Response as BasicResponse
15
23
  import aiohttp
16
24
  import sentry_sdk
17
25
 
26
+ from logdetective.server.exceptions import KojiInvalidTaskID
27
+
28
+ from logdetective.server.database.models.koji import KojiTaskAnalysis
29
+ from logdetective.server.database.models.exceptions import (
30
+ KojiTaskAnalysisTimeoutError,
31
+ KojiTaskNotAnalyzedError,
32
+ KojiTaskNotFoundError,
33
+ )
34
+
18
35
  import logdetective.server.database.base
19
36
 
20
37
  from logdetective.utils import (
@@ -24,6 +41,9 @@ from logdetective.utils import (
24
41
  )
25
42
 
26
43
  from logdetective.server.config import SERVER_CONFIG, PROMPT_CONFIG, LOG
44
+ from logdetective.server.koji import (
45
+ get_failed_log_from_task as get_failed_log_from_koji_task,
46
+ )
27
47
  from logdetective.remote_log import RemoteLog
28
48
  from logdetective.server.llm import (
29
49
  mine_logs,
@@ -31,11 +51,13 @@ from logdetective.server.llm import (
31
51
  submit_text,
32
52
  )
33
53
  from logdetective.server.gitlab import process_gitlab_job_event
34
- from logdetective.server.metric import track_request
54
+ from logdetective.server.metric import track_request, add_new_metrics, update_metrics
35
55
  from logdetective.server.models import (
36
56
  BuildLog,
37
57
  EmojiHook,
38
58
  JobHook,
59
+ KojiInstanceConfig,
60
+ KojiStagedResponse,
39
61
  Response,
40
62
  StagedResponse,
41
63
  TimePeriod,
@@ -49,6 +71,7 @@ from logdetective.server.emoji import (
49
71
  collect_emojis,
50
72
  collect_emojis_for_mr,
51
73
  )
74
+ from logdetective.server.compressors import RemoteLogCompressor
52
75
 
53
76
 
54
77
  LOG_SOURCE_REQUEST_TIMEOUT = os.environ.get("LOG_SOURCE_REQUEST_TIMEOUT", 60)
@@ -178,6 +201,173 @@ async def analyze_log_staged(
178
201
  return await perform_staged_analysis(log_text)
179
202
 
180
203
 
204
+ @app.get(
205
+ "/analyze/rpmbuild/koji/{koji_instance}/{task_id}",
206
+ response_model=KojiStagedResponse,
207
+ )
208
+ async def get_koji_task_analysis(
209
+ koji_instance: Annotated[str, Path(title="The Koji instance to use")],
210
+ task_id: Annotated[int, Path(title="The task ID to analyze")],
211
+ x_koji_token: Annotated[str, Header()] = "",
212
+ ):
213
+ """Provide endpoint for retrieving log file analysis of a Koji task"""
214
+
215
+ try:
216
+ koji_instance_config = SERVER_CONFIG.koji.instances[koji_instance]
217
+ except KeyError:
218
+ # This Koji instance is not configured, so we will return a 404.
219
+ return BasicResponse(status_code=404, content="Unknown Koji instance.")
220
+
221
+ # This should always be available in a production environment.
222
+ # In a testing environment, the tokens list may be empty, in which case
223
+ # it will just proceed.
224
+ if koji_instance_config.tokens and x_koji_token not in koji_instance_config.tokens:
225
+ # (Unauthorized) error.
226
+ return BasicResponse(x_koji_token, status_code=401)
227
+
228
+ # Check if we have a response for this task
229
+ try:
230
+ return KojiTaskAnalysis.get_response_by_task_id(task_id)
231
+
232
+ except (KojiInvalidTaskID, KojiTaskNotFoundError):
233
+ # This task ID is malformed, out of range, or not found, so we will
234
+ # return a 404.
235
+ return BasicResponse(status_code=404)
236
+
237
+ except KojiTaskAnalysisTimeoutError:
238
+ # Task analysis has timed out, so we assume that the request was lost
239
+ # and that we need to start another analysis.
240
+ # There isn't a fully-appropriate error code for this, so we'll use
241
+ # 503 (Service Unavailable) as our best option.
242
+ return BasicResponse(
243
+ status_code=503, content="Task analysis timed out, please retry."
244
+ )
245
+
246
+ except KojiTaskNotAnalyzedError:
247
+ # Its still running, so we need to return a 202
248
+ # (Accepted) code to let the client know to keep waiting.
249
+ return BasicResponse(
250
+ status_code=202, content=f"Analysis still in progress for task {task_id}"
251
+ )
252
+
253
+
254
+ @app.post(
255
+ "/analyze/rpmbuild/koji/{koji_instance}/{task_id}",
256
+ response_model=KojiStagedResponse,
257
+ )
258
+ async def analyze_rpmbuild_koji(
259
+ koji_instance: Annotated[str, Path(title="The Koji instance to use")],
260
+ task_id: Annotated[int, Path(title="The task ID to analyze")],
261
+ x_koji_token: Annotated[str, Header()] = "",
262
+ x_koji_callback: Annotated[str, Header()] = "",
263
+ background_tasks: BackgroundTasks = BackgroundTasks(),
264
+ ):
265
+ """Provide endpoint for retrieving log file analysis of a Koji task"""
266
+
267
+ try:
268
+ koji_instance_config = SERVER_CONFIG.koji.instances[koji_instance]
269
+ except KeyError:
270
+ # This Koji instance is not configured, so we will return a 404.
271
+ return BasicResponse(status_code=404, content="Unknown Koji instance.")
272
+
273
+ # This should always be available in a production environment.
274
+ # In a testing environment, the tokens list may be empty, in which case
275
+ # it will just proceed.
276
+ if koji_instance_config.tokens and x_koji_token not in koji_instance_config.tokens:
277
+ # (Unauthorized) error.
278
+ return BasicResponse(x_koji_token, status_code=401)
279
+
280
+ # Check if we already have a response for this task
281
+ try:
282
+ response = KojiTaskAnalysis.get_response_by_task_id(task_id)
283
+
284
+ except KojiInvalidTaskID:
285
+ # This task ID is malformed or out of range, so we will return a 400.
286
+ response = BasicResponse(status_code=404, content="Invalid or unknown task ID.")
287
+
288
+ except (KojiTaskNotFoundError, KojiTaskAnalysisTimeoutError):
289
+ # Task not yet analyzed or it timed out, so we need to start the
290
+ # analysis in the background and return a 202 (Accepted) error.
291
+
292
+ background_tasks.add_task(
293
+ analyze_koji_task,
294
+ task_id,
295
+ koji_instance_config,
296
+ )
297
+
298
+ # If a callback URL is provided, we need to add it to the callbacks
299
+ # table so that we can notify it when the analysis is complete.
300
+ if x_koji_callback:
301
+ koji_instance_config.register_callback(task_id, x_koji_callback)
302
+
303
+ response = BasicResponse(
304
+ status_code=202, content=f"Beginning analysis of task {task_id}"
305
+ )
306
+
307
+ except KojiTaskNotAnalyzedError:
308
+ # Its still running, so we need to return a 202
309
+ # (Accepted) error.
310
+ response = BasicResponse(
311
+ status_code=202, content=f"Analysis still in progress for task {task_id}"
312
+ )
313
+
314
+ return response
315
+
316
+
317
+ async def analyze_koji_task(task_id: int, koji_instance_config: KojiInstanceConfig):
318
+ """Analyze a koji task and return the response"""
319
+
320
+ # Get the log text from the koji task
321
+ koji_conn = koji_instance_config.get_connection()
322
+ log_file_name, log_text = await get_failed_log_from_koji_task(
323
+ koji_conn, task_id, max_size=SERVER_CONFIG.koji.max_artifact_size
324
+ )
325
+
326
+ # We need to handle the metric tracking manually here, because we need
327
+ # to retrieve the metric ID to associate it with the koji task analysis.
328
+
329
+ metrics_id = await add_new_metrics(
330
+ "analyze_koji_task",
331
+ log_text,
332
+ received_at=datetime.datetime.now(datetime.timezone.utc),
333
+ compressed_log_content=RemoteLogCompressor.zip_text(log_text),
334
+ )
335
+
336
+ # We need to associate the metric ID with the koji task analysis.
337
+ # This will create the new row without a response, which we will use as
338
+ # an indicator that the analysis is in progress.
339
+ KojiTaskAnalysis.create_or_restart(
340
+ koji_instance=koji_instance_config.xmlrpc_url,
341
+ task_id=task_id,
342
+ log_file_name=log_file_name,
343
+ )
344
+ response = await perform_staged_analysis(log_text)
345
+
346
+ # Now that we have the response, we can update the metrics and mark the
347
+ # koji task analysis as completed.
348
+ update_metrics(metrics_id, response)
349
+ KojiTaskAnalysis.add_response(task_id, metrics_id)
350
+
351
+ # Notify any callbacks that the analysis is complete.
352
+ for callback in koji_instance_config.get_callbacks(task_id):
353
+ LOG.info("Notifying callback %s of task %d completion", callback, task_id)
354
+ asyncio.create_task(
355
+ send_koji_callback(callback, task_id)
356
+ )
357
+
358
+ # Now that it's sent, we can clear the callbacks for this task.
359
+ koji_instance_config.clear_callbacks(task_id)
360
+
361
+ return response
362
+
363
+
364
+ async def send_koji_callback(callback: str, task_id: int):
365
+ """Send a callback to the specified URL with the task ID and log file name."""
366
+ async with aiohttp.ClientSession() as session:
367
+ async with session.post(callback, json={"task_id": task_id}):
368
+ pass
369
+
370
+
181
371
  @app.get("/queue/print")
182
372
  async def queue_print(msg: str):
183
373
  """Debug endpoint to test the LLM request queue"""
@@ -58,7 +58,9 @@ This comment was created by [Log Detective][log-detective].
58
58
  Was the provided feedback accurate and helpful? <br>Please vote with :thumbsup:
59
59
  or :thumbsdown: to help us improve.<br>
60
60
 
61
-
61
+ <i>If this Log Detective report contains harmful content, please use the
62
+ [Gitlab reporting feature for harmful content](https://docs.gitlab.com/user/report_abuse/)
63
+ and contact the [Log Detective developers](https://github.com/fedora-copr/logdetective/issues).</i>
62
64
 
63
65
  [log-detective]: https://log-detective.com/
64
66
  [contact]: https://github.com/fedora-copr
@@ -47,7 +47,9 @@ This comment was created by [Log Detective][log-detective].
47
47
  Was the provided feedback accurate and helpful? <br>Please vote with :thumbsup:
48
48
  or :thumbsdown: to help us improve.<br>
49
49
 
50
-
50
+ <i>If this Log Detective report contains harmful content, please use the
51
+ [Gitlab reporting feature for harmful content](https://docs.gitlab.com/user/report_abuse/)
52
+ and contact the [Log Detective developers](https://github.com/fedora-copr/logdetective/issues).</i>
51
53
 
52
54
  [log-detective]: https://log-detective.com/
53
55
  [contact]: https://github.com/fedora-copr
@@ -0,0 +1,12 @@
1
+ # This file holds patterns you want to skip during log parsing.
2
+ # By default, no patterns are supplied.
3
+ # Patterns are to be specified as values of dictionary,
4
+ # with each key being a descriptive name of the pattern.
5
+ # Patterns themselves are evaluated as a regular expression.
6
+ # Make sure to avoid regular expressions that may be interpreted
7
+ # as yaml syntax.
8
+ # Example:
9
+
10
+ # contains_capital_a: "^.*A.*"
11
+ # starts_with_numeric: "^[0-9].*"
12
+ child_exit_code_zero: "Child return code was: 0"
logdetective/utils.py CHANGED
@@ -8,7 +8,7 @@ import numpy as np
8
8
  import yaml
9
9
 
10
10
  from llama_cpp import Llama, CreateCompletionResponse, CreateCompletionStreamResponse
11
- from logdetective.models import PromptConfig
11
+ from logdetective.models import PromptConfig, SkipSnippets
12
12
  from logdetective.remote_log import RemoteLog
13
13
 
14
14
 
@@ -223,3 +223,27 @@ def prompt_to_messages(
223
223
  ]
224
224
 
225
225
  return messages
226
+
227
+
228
+ def filter_snippet_patterns(snippet: str, skip_snippets: SkipSnippets) -> bool:
229
+ """Try to match snippet agains provided patterns to determine if we should
230
+ filter it out or not."""
231
+ for key, pattern in skip_snippets.snippet_patterns.items():
232
+ if pattern.match(snippet):
233
+ LOG.debug("Snippet `%s` has matched agains skip pattern %s", snippet, key)
234
+ return True
235
+
236
+ return False
237
+
238
+
239
+ def load_skip_snippet_patterns(path: str | None) -> SkipSnippets:
240
+ """Load dictionary of snippet patterns we want to skip."""
241
+ if path:
242
+ try:
243
+ with open(path, "r") as file:
244
+ return SkipSnippets(yaml.safe_load(file))
245
+ except OSError as e:
246
+ LOG.error("Couldn't open file with snippet skip patterns `%s`", path)
247
+ raise e
248
+
249
+ return SkipSnippets({})
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: logdetective
3
- Version: 1.5.0
3
+ Version: 1.7.0
4
4
  Summary: Log using LLM AI to search for build/test failures and provide ideas for fixing these.
5
5
  License: Apache-2.0
6
6
  Author: Jiri Podivin
@@ -90,6 +90,7 @@ To analyze a log file, run the script with the following command line arguments:
90
90
  - `--summarizer` DISABLED: LLM summarization option was removed. Argument is kept for backward compatibility only.(optional, default: "drain"): Choose between LLM and Drain template miner as the log summarizer. You can also provide the path to an existing language model file instead of using a URL.
91
91
  - `--n_lines` DISABLED: LLM summarization option was removed. Argument is kept for backward compatibility only. (optional, default: 8): The number of lines per chunk for LLM analysis. This only makes sense when you are summarizing with LLM.
92
92
  - `--n_clusters` (optional, default 8): Number of clusters for Drain to organize log chunks into. This only makes sense when you are summarizing with Drain
93
+ - `--skip_snippets` Path to patterns for skipping snippets.
93
94
 
94
95
  Example usage:
95
96
 
@@ -330,7 +331,7 @@ If the variable is not set, `./models` is mounted inside by default.
330
331
 
331
332
  Model can be downloaded from [our Hugging Space](https://huggingface.co/fedora-copr) by:
332
333
  ```
333
- $ curl -L -o models/mistral-7b-instruct-v0.2.Q4_K_S.gguf https://huggingface.co/fedora-copr/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/ggml-model-Q4_K_S.gguf
334
+ $ curl -L -o models/mistral-7b-instruct-v0.3.Q4_K.gguf https://huggingface.co/fedora-copr/Mistral-7B-Instruct-v0.3-GGUF/resolve/main/ggml-model-Q4_K.gguf
334
335
  ```
335
336
 
336
337
  Generate a new database revision with alembic
@@ -438,6 +439,28 @@ with spaces, or replacement fields marked with curly braces, `{}` left for inser
438
439
  Number of replacement fields in new prompts, must be the same as in originals.
439
440
  Although their position may be different.
440
441
 
442
+
443
+ Skip Snippets
444
+ -------------
445
+
446
+ Certain log chunks may not contribute to the analysis of the problem under any circumstances.
447
+ User can specify regular expressions, matching such log chunks, along with simple description,
448
+ using Skip Snippets feature.
449
+
450
+ Patterns to be skipped must be defined yaml file as a dictionary, where key is a description
451
+ and value is a regular expression. For example:
452
+
453
+ ```
454
+ child_exit_code_zero: "Child return code was: 0"
455
+ ```
456
+
457
+ Special care must be taken not to write a regular expression which may match
458
+ too many chunks, or which may be evaluated as data structure by the yaml parser.
459
+
460
+ Example of a valid pattern definition file: `logdetective/skip_patterns.yml`,
461
+ can be used as a starting point and is used as a default if no other definition is provided.
462
+
463
+
441
464
  License
442
465
  -------
443
466
 
@@ -0,0 +1,38 @@
1
+ logdetective/__init__.py,sha256=VqRngDcuFT7JWms8Qc_MsOvajoXVOKPr-S1kqY3Pqhc,59
2
+ logdetective/constants.py,sha256=aCwrkBrDdS_kbNESK-Z-ewg--DSzodV2OMgwEq3UE38,2456
3
+ logdetective/drain3.ini,sha256=ni91eCT1TwTznZwcqWoOVMQcGEnWhEDNCoTPF7cfGfY,1360
4
+ logdetective/extractors.py,sha256=BkQe7FMLDoKVWitP85Vpv1qle1Fo1FeupKm0wVlcALI,1859
5
+ logdetective/logdetective.py,sha256=DECG4qnmYHlCcQ5Waj3Esr4wSb6LtM4m7qqtmZqYDX0,6151
6
+ logdetective/models.py,sha256=h01nagxgb8sR9Js_00DMoZv6GvwHjcOk0MeKttftDHk,2460
7
+ logdetective/prompts-summary-first.yml,sha256=3Zfp4NNOfaFYq5xBlBjeQa5PdjYfS4v17OtJqQ-DRpU,821
8
+ logdetective/prompts-summary-only.yml,sha256=8U9AMJV8ePW-0CoXOXlQoO92DAJDeutIT8ntSkkm6W0,470
9
+ logdetective/prompts.yml,sha256=dOqaFrtBOkFRHLWte_2tGV-pNXwXP9Km9iWno_TZyic,3863
10
+ logdetective/remote_log.py,sha256=28QvdQiy7RBnd86EKCq_A75P21gSNlCbgxJe5XAe9MA,2258
11
+ logdetective/server/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
+ logdetective/server/compressors.py,sha256=qzrT-BPSksXY6F2L6ger04GGrgdBsGOfK2YuCFRs0Q4,5427
13
+ logdetective/server/config.py,sha256=Qg3Q15S-NPXEpSqGgAH41JVy4dheqKYUSEkzE-1Z05E,2478
14
+ logdetective/server/database/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
+ logdetective/server/database/base.py,sha256=1mcjEbhwLl4RalvT3oy6XVctjJoWIW3H9aI_sMWJBK8,1728
16
+ logdetective/server/database/models/__init__.py,sha256=GQ_4vC_jahwFrqhF4UUKLRo86_ulq1uSBAF3Je31DyA,878
17
+ logdetective/server/database/models/exceptions.py,sha256=AXQPZRgt-r2vboxP9SGYelngP6YIFpHlwELKcZ1FD3Y,384
18
+ logdetective/server/database/models/koji.py,sha256=vZN585FvOHM4z5o3oBBQsxWWJER1_giOMtZZPiU4q3w,5457
19
+ logdetective/server/database/models/merge_request_jobs.py,sha256=0yWLVDKQ1odH8W8lBh1MV0CmSaji2HL0Alv3qrWxdo0,18644
20
+ logdetective/server/database/models/metrics.py,sha256=chQ8mhmsfadkbuIiA5WdY1JW-eMiXulZl-tXTphkoyM,13972
21
+ logdetective/server/emoji.py,sha256=hV4O0yfL0l1a3kWLImvBsY4AJQauKs7okYOGBEtYVz0,4795
22
+ logdetective/server/exceptions.py,sha256=piV7wVKc-rw_pHrThbZbUjtmjuO5qUbjVNFwjdfcP3Q,864
23
+ logdetective/server/gitlab.py,sha256=xTGKDZnEZay7TMumeVFJ4M5lE6LDLBwCtz34OZRfIhk,16431
24
+ logdetective/server/koji.py,sha256=_tZRaY9IRIzQsEk6XMRcsO4Bz0tJq3PgCB-ATywvWIU,5860
25
+ logdetective/server/llm.py,sha256=EiLp3QV3OAvZcqrq6t21M0vzHFiPuVMamRLWPggqTEo,5829
26
+ logdetective/server/metric.py,sha256=QrrX1FmMa7sc57av0P9UFOiCIFYVLs1opOWV3ObYo0s,4086
27
+ logdetective/server/models.py,sha256=VIwVfcXD7wq4aRorT_k8dTmJg5pVi4kUVbEvO1QUMKM,18531
28
+ logdetective/server/plot.py,sha256=C98U9prGoPkp8_t4v2dovdZuwOhSbxXSeB_K9Q2r3NE,14607
29
+ logdetective/server/server.py,sha256=texHf-3HYcdMcERMbkgR8xq9R2PN-kU4E6qtj3Kvx3U,26002
30
+ logdetective/server/templates/gitlab_full_comment.md.j2,sha256=2_TGQPYZFgd5r-rY08kAnKbeePBynCYWbCojbIy44Go,1890
31
+ logdetective/server/templates/gitlab_short_comment.md.j2,sha256=b0dCNmEOLEcKLKufF9g7ftrjvGVotTdMBZsw2SVnrec,1706
32
+ logdetective/skip_snippets.yml,sha256=reGlhPPCo06nNUJWiC2LY-OJOoPdcyOB7QBTSMeh0eg,487
33
+ logdetective/utils.py,sha256=UAhPWbOGdTR7PWc1dEQk8FSxBSsO0UgfdyY8AKGfMJY,7781
34
+ logdetective-1.7.0.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
35
+ logdetective-1.7.0.dist-info/METADATA,sha256=H3BpMTq8FkQmc9da_ZBPpbRCTpO7X_qvYjgHrpzSTEM,18887
36
+ logdetective-1.7.0.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
37
+ logdetective-1.7.0.dist-info/entry_points.txt,sha256=3K_vXja6PmcA8sNdUi63WdImeiNhVZcEGPTaoJmltfA,63
38
+ logdetective-1.7.0.dist-info/RECORD,,
@@ -1,33 +0,0 @@
1
- logdetective/__init__.py,sha256=VqRngDcuFT7JWms8Qc_MsOvajoXVOKPr-S1kqY3Pqhc,59
2
- logdetective/constants.py,sha256=KD5FtMvRMO5jO9O1a5FbHy6yFSF6ZkZ4lNrhI7D_S2Y,2456
3
- logdetective/drain3.ini,sha256=ni91eCT1TwTznZwcqWoOVMQcGEnWhEDNCoTPF7cfGfY,1360
4
- logdetective/extractors.py,sha256=sFsBFKpIBglejD2lxct2B0qEP0lFSep-ZIebq4KfaLM,1515
5
- logdetective/logdetective.py,sha256=WKj8U5p329ek0T-G2rFtRxD5R07IZZGSVNZodcGT5PA,5722
6
- logdetective/models.py,sha256=ONF7SK8VeuJk_gEj_l0ToYQ7asZYbrEmVUOUNQ5SEaA,1407
7
- logdetective/prompts-summary-first.yml,sha256=3Zfp4NNOfaFYq5xBlBjeQa5PdjYfS4v17OtJqQ-DRpU,821
8
- logdetective/prompts-summary-only.yml,sha256=8U9AMJV8ePW-0CoXOXlQoO92DAJDeutIT8ntSkkm6W0,470
9
- logdetective/prompts.yml,sha256=dOqaFrtBOkFRHLWte_2tGV-pNXwXP9Km9iWno_TZyic,3863
10
- logdetective/remote_log.py,sha256=u-KlhO4Eu0ES6pPwrNbHBVhrZCdFi8894zJj33Lg3YA,2226
11
- logdetective/server/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
- logdetective/server/compressors.py,sha256=qzrT-BPSksXY6F2L6ger04GGrgdBsGOfK2YuCFRs0Q4,5427
13
- logdetective/server/config.py,sha256=WeEhgiYVdvNQEcE9ZcIt63U9CzScQRWl5QXfHh-KH9s,2105
14
- logdetective/server/database/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
- logdetective/server/database/base.py,sha256=1mcjEbhwLl4RalvT3oy6XVctjJoWIW3H9aI_sMWJBK8,1728
16
- logdetective/server/database/models/__init__.py,sha256=xy2hkygyw6_87zPKkG20i7g7_LXTGR__PUeojhbvv94,496
17
- logdetective/server/database/models/merge_request_jobs.py,sha256=hw88wV1-3x7i53sX7ZotKClc6OsH1njPpbRSZofnqr4,18670
18
- logdetective/server/database/models/metrics.py,sha256=yl9fS4IPVFWDeFvPAxO6zOVu6oLF319ApvVLAgnD5yU,13928
19
- logdetective/server/emoji.py,sha256=Iv1CFNyWhweBG13v59O1fQD-dZj-YGM1IKlkIaCzBaU,4392
20
- logdetective/server/gitlab.py,sha256=wQSlvdWn6XEi1oP6HhI75bIhm6bgdpWr3zu2WXF0_oE,16473
21
- logdetective/server/llm.py,sha256=q9LdoAmsx9MpBjnjLyJ9GBU27jKViTaWbVXyMsmsCI0,5721
22
- logdetective/server/metric.py,sha256=B3ew_qSmtEMj6xl-FoOtS4F_bkplp-shhtfHF1cG_Io,4010
23
- logdetective/server/models.py,sha256=I45uLnq_zqn_r0FdOdop9zQPbsOWOY_M39NBBOXP134,15738
24
- logdetective/server/plot.py,sha256=yS7TF_Gu7yV0uE9W50Ht5wQSlavgCx2CiU1XGO-iftE,14870
25
- logdetective/server/server.py,sha256=V-lSG2cCTxoGwvUc8mEmLQQWS4g_W_dER2o118RufAk,18792
26
- logdetective/server/templates/gitlab_full_comment.md.j2,sha256=DQZ2WVFedpuXI6znbHIW4wpF9BmFS8FaUkowh8AnGhE,1627
27
- logdetective/server/templates/gitlab_short_comment.md.j2,sha256=fzScpayv2vpRLczP_0O0YxtA8rsKvR6gSv4ntNdWb98,1443
28
- logdetective/utils.py,sha256=5EcRjQcNG1UFPovrMLqlaApgxWSB2DHQhSExkEY3yk0,6932
29
- logdetective-1.5.0.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
30
- logdetective-1.5.0.dist-info/METADATA,sha256=ee4c820E_pX7ULV8PZGCJT0TzbfzpkkReRfrC7cznG4,18050
31
- logdetective-1.5.0.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
32
- logdetective-1.5.0.dist-info/entry_points.txt,sha256=3K_vXja6PmcA8sNdUi63WdImeiNhVZcEGPTaoJmltfA,63
33
- logdetective-1.5.0.dist-info/RECORD,,