logdetective 1.6.0__tar.gz → 1.7.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. {logdetective-1.6.0 → logdetective-1.7.0}/PKG-INFO +2 -2
  2. {logdetective-1.6.0 → logdetective-1.7.0}/README.md +1 -1
  3. {logdetective-1.6.0 → logdetective-1.7.0}/logdetective/constants.py +1 -1
  4. {logdetective-1.6.0 → logdetective-1.7.0}/logdetective/logdetective.py +1 -1
  5. {logdetective-1.6.0 → logdetective-1.7.0}/logdetective/server/database/models/__init__.py +12 -0
  6. logdetective-1.7.0/logdetective/server/database/models/exceptions.py +13 -0
  7. logdetective-1.7.0/logdetective/server/database/models/koji.py +126 -0
  8. {logdetective-1.6.0 → logdetective-1.7.0}/logdetective/server/database/models/merge_request_jobs.py +11 -10
  9. {logdetective-1.6.0 → logdetective-1.7.0}/logdetective/server/database/models/metrics.py +1 -0
  10. {logdetective-1.6.0 → logdetective-1.7.0}/logdetective/server/emoji.py +22 -12
  11. logdetective-1.7.0/logdetective/server/exceptions.py +33 -0
  12. {logdetective-1.6.0 → logdetective-1.7.0}/logdetective/server/gitlab.py +1 -4
  13. logdetective-1.7.0/logdetective/server/koji.py +167 -0
  14. {logdetective-1.6.0 → logdetective-1.7.0}/logdetective/server/metric.py +10 -10
  15. {logdetective-1.6.0 → logdetective-1.7.0}/logdetective/server/models.py +91 -2
  16. {logdetective-1.6.0 → logdetective-1.7.0}/logdetective/server/server.py +192 -2
  17. {logdetective-1.6.0 → logdetective-1.7.0}/logdetective/server/templates/gitlab_full_comment.md.j2 +3 -1
  18. {logdetective-1.6.0 → logdetective-1.7.0}/logdetective/server/templates/gitlab_short_comment.md.j2 +3 -1
  19. {logdetective-1.6.0 → logdetective-1.7.0}/pyproject.toml +1 -1
  20. {logdetective-1.6.0 → logdetective-1.7.0}/LICENSE +0 -0
  21. {logdetective-1.6.0 → logdetective-1.7.0}/logdetective/__init__.py +0 -0
  22. {logdetective-1.6.0 → logdetective-1.7.0}/logdetective/drain3.ini +0 -0
  23. {logdetective-1.6.0 → logdetective-1.7.0}/logdetective/extractors.py +0 -0
  24. {logdetective-1.6.0 → logdetective-1.7.0}/logdetective/models.py +0 -0
  25. {logdetective-1.6.0 → logdetective-1.7.0}/logdetective/prompts-summary-first.yml +0 -0
  26. {logdetective-1.6.0 → logdetective-1.7.0}/logdetective/prompts-summary-only.yml +0 -0
  27. {logdetective-1.6.0 → logdetective-1.7.0}/logdetective/prompts.yml +0 -0
  28. {logdetective-1.6.0 → logdetective-1.7.0}/logdetective/remote_log.py +0 -0
  29. {logdetective-1.6.0 → logdetective-1.7.0}/logdetective/server/__init__.py +0 -0
  30. {logdetective-1.6.0 → logdetective-1.7.0}/logdetective/server/compressors.py +0 -0
  31. {logdetective-1.6.0 → logdetective-1.7.0}/logdetective/server/config.py +0 -0
  32. {logdetective-1.6.0 → logdetective-1.7.0}/logdetective/server/database/__init__.py +0 -0
  33. {logdetective-1.6.0 → logdetective-1.7.0}/logdetective/server/database/base.py +0 -0
  34. {logdetective-1.6.0 → logdetective-1.7.0}/logdetective/server/llm.py +0 -0
  35. {logdetective-1.6.0 → logdetective-1.7.0}/logdetective/server/plot.py +0 -0
  36. {logdetective-1.6.0 → logdetective-1.7.0}/logdetective/skip_snippets.yml +0 -0
  37. {logdetective-1.6.0 → logdetective-1.7.0}/logdetective/utils.py +0 -0
  38. {logdetective-1.6.0 → logdetective-1.7.0}/logdetective.1.asciidoc +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: logdetective
3
- Version: 1.6.0
3
+ Version: 1.7.0
4
4
  Summary: Log using LLM AI to search for build/test failures and provide ideas for fixing these.
5
5
  License: Apache-2.0
6
6
  Author: Jiri Podivin
@@ -331,7 +331,7 @@ If the variable is not set, `./models` is mounted inside by default.
331
331
 
332
332
  Model can be downloaded from [our Hugging Space](https://huggingface.co/fedora-copr) by:
333
333
  ```
334
- $ curl -L -o models/mistral-7b-instruct-v0.2.Q4_K_S.gguf https://huggingface.co/fedora-copr/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/ggml-model-Q4_K_S.gguf
334
+ $ curl -L -o models/mistral-7b-instruct-v0.3.Q4_K.gguf https://huggingface.co/fedora-copr/Mistral-7B-Instruct-v0.3-GGUF/resolve/main/ggml-model-Q4_K.gguf
335
335
  ```
336
336
 
337
337
  Generate a new database revision with alembic
@@ -287,7 +287,7 @@ If the variable is not set, `./models` is mounted inside by default.
287
287
 
288
288
  Model can be downloaded from [our Hugging Space](https://huggingface.co/fedora-copr) by:
289
289
  ```
290
- $ curl -L -o models/mistral-7b-instruct-v0.2.Q4_K_S.gguf https://huggingface.co/fedora-copr/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/ggml-model-Q4_K_S.gguf
290
+ $ curl -L -o models/mistral-7b-instruct-v0.3.Q4_K.gguf https://huggingface.co/fedora-copr/Mistral-7B-Instruct-v0.3-GGUF/resolve/main/ggml-model-Q4_K.gguf
291
291
  ```
292
292
 
293
293
  Generate a new database revision with alembic
@@ -4,7 +4,7 @@ in prompts.yaml instead.
4
4
  """
5
5
 
6
6
  # pylint: disable=line-too-long
7
- DEFAULT_ADVISOR = "fedora-copr/Mistral-7B-Instruct-v0.2-GGUF"
7
+ DEFAULT_ADVISOR = "fedora-copr/Mistral-7B-Instruct-v0.3-GGUF"
8
8
 
9
9
  PROMPT_TEMPLATE = """
10
10
  Given following log snippets, and nothing else, explain what failure, if any, occured during build of this package.
@@ -42,7 +42,7 @@ def setup_args():
42
42
  "--filename_suffix",
43
43
  help="Suffix of the model file name to be retrieved from Hugging Face.\
44
44
  Makes sense only if the model is specified with Hugging Face name.",
45
- default="Q4_K_S.gguf",
45
+ default="Q4_K.gguf",
46
46
  )
47
47
  parser.add_argument("-n", "--no-stream", action="store_true")
48
48
  parser.add_argument(
@@ -5,10 +5,18 @@ from logdetective.server.database.models.merge_request_jobs import (
5
5
  Comments,
6
6
  Reactions,
7
7
  )
8
+ from logdetective.server.database.models.koji import (
9
+ KojiTaskAnalysis,
10
+ )
8
11
  from logdetective.server.database.models.metrics import (
9
12
  AnalyzeRequestMetrics,
10
13
  EndpointType,
11
14
  )
15
+ from logdetective.server.database.models.exceptions import (
16
+ KojiTaskNotFoundError,
17
+ KojiTaskNotAnalyzedError,
18
+ KojiTaskAnalysisTimeoutError,
19
+ )
12
20
 
13
21
  __all__ = [
14
22
  Base.__name__,
@@ -18,4 +26,8 @@ __all__ = [
18
26
  AnalyzeRequestMetrics.__name__,
19
27
  EndpointType.__name__,
20
28
  Forge.__name__,
29
+ KojiTaskAnalysis.__name__,
30
+ KojiTaskNotFoundError.__name__,
31
+ KojiTaskNotAnalyzedError.__name__,
32
+ KojiTaskAnalysisTimeoutError.__name__,
21
33
  ]
@@ -0,0 +1,13 @@
1
+ """Database model exceptions for logdetective."""
2
+
3
+
4
+ class KojiTaskNotFoundError(Exception):
5
+ """Exception raised when a koji task is not found"""
6
+
7
+
8
+ class KojiTaskNotAnalyzedError(Exception):
9
+ """Exception raised when a koji task analysis is still in progress"""
10
+
11
+
12
+ class KojiTaskAnalysisTimeoutError(Exception):
13
+ """Exception raised when a koji task analysis has timed out"""
@@ -0,0 +1,126 @@
1
+ from datetime import datetime, timedelta, timezone
2
+ from sqlalchemy import Column, BigInteger, DateTime, ForeignKey, Integer, String
3
+ from sqlalchemy.orm import relationship
4
+ from sqlalchemy.exc import OperationalError
5
+ import backoff
6
+
7
+ from logdetective.server.config import SERVER_CONFIG
8
+ from logdetective.server.compressors import LLMResponseCompressor
9
+ from logdetective.server.database.models.metrics import AnalyzeRequestMetrics
10
+ from logdetective.server.database.base import Base, transaction, DB_MAX_RETRIES
11
+ from logdetective.server.database.models.exceptions import (
12
+ KojiTaskNotFoundError,
13
+ KojiTaskNotAnalyzedError,
14
+ KojiTaskAnalysisTimeoutError,
15
+ )
16
+ from logdetective.server.models import KojiStagedResponse
17
+
18
+
19
+ class KojiTaskAnalysis(Base):
20
+ """Store details for the koji task analysis"""
21
+
22
+ __tablename__ = "koji_task_analysis"
23
+
24
+ id = Column(Integer, primary_key=True)
25
+ koji_instance = Column(String(255), nullable=False, index=True)
26
+ task_id = Column(BigInteger, nullable=False, index=True, unique=True)
27
+ log_file_name = Column(String(255), nullable=False, index=True)
28
+ request_received_at = Column(
29
+ DateTime,
30
+ nullable=False,
31
+ index=True,
32
+ default=datetime.now(timezone.utc),
33
+ comment="Timestamp when the request was received",
34
+ )
35
+ response_id = Column(
36
+ Integer,
37
+ ForeignKey("analyze_request_metrics.id"),
38
+ nullable=True,
39
+ index=False,
40
+ comment="The id of the analyze request metrics for this task",
41
+ )
42
+ response = relationship("AnalyzeRequestMetrics")
43
+
44
+ @classmethod
45
+ @backoff.on_exception(backoff.expo, OperationalError, max_tries=DB_MAX_RETRIES)
46
+ def create_or_restart(cls, koji_instance: str, task_id: int, log_file_name: str):
47
+ """Create a new koji task analysis"""
48
+ with transaction(commit=True) as session:
49
+ # Check if the task analysis already exists
50
+ koji_task_analysis = (
51
+ session.query(cls)
52
+ .filter_by(koji_instance=koji_instance, task_id=task_id)
53
+ .first()
54
+ )
55
+ if koji_task_analysis:
56
+ # If it does, update the request_received_at timestamp
57
+ koji_task_analysis.request_received_at = datetime.now(timezone.utc)
58
+ session.add(koji_task_analysis)
59
+ session.flush()
60
+ return
61
+
62
+ # If it doesn't, create a new one
63
+ koji_task_analysis = KojiTaskAnalysis()
64
+ koji_task_analysis.koji_instance = koji_instance
65
+ koji_task_analysis.task_id = task_id
66
+ koji_task_analysis.log_file_name = log_file_name
67
+ session.add(koji_task_analysis)
68
+ session.flush()
69
+
70
+ @classmethod
71
+ @backoff.on_exception(backoff.expo, OperationalError, max_tries=DB_MAX_RETRIES)
72
+ def add_response(cls, task_id: int, metric_id: int):
73
+ """Add a response to a koji task analysis"""
74
+ with transaction(commit=True) as session:
75
+ koji_task_analysis = session.query(cls).filter_by(task_id=task_id).first()
76
+ # Ensure that the task analysis doesn't already have a response
77
+ if koji_task_analysis.response:
78
+ # This is probably due to an analysis that took so long that
79
+ # a follow-up analysis was started before this one completed.
80
+ # We want to maintain consistency between the response we
81
+ # returned to the consumer, so we'll just drop this extra one
82
+ # on the floor and keep the one saved in the database.
83
+ return
84
+
85
+ metric = (
86
+ session.query(AnalyzeRequestMetrics).filter_by(id=metric_id).first()
87
+ )
88
+ koji_task_analysis.response = metric
89
+ session.add(koji_task_analysis)
90
+ session.flush()
91
+
92
+ @classmethod
93
+ @backoff.on_exception(backoff.expo, OperationalError, max_tries=DB_MAX_RETRIES)
94
+ def get_response_by_task_id(cls, task_id: int) -> KojiStagedResponse:
95
+ """Get a koji task analysis by task id"""
96
+ with transaction(commit=False) as session:
97
+ koji_task_analysis = session.query(cls).filter_by(task_id=task_id).first()
98
+ if not koji_task_analysis:
99
+ raise KojiTaskNotFoundError(f"Task {task_id} not yet analyzed")
100
+
101
+ if not koji_task_analysis.response:
102
+ # Check if the task analysis has timed out
103
+ if koji_task_analysis.request_received_at.replace(
104
+ tzinfo=timezone.utc
105
+ ) + timedelta(
106
+ minutes=SERVER_CONFIG.koji.analysis_timeout
107
+ ) < datetime.now(timezone.utc):
108
+ raise KojiTaskAnalysisTimeoutError(
109
+ f"Task {task_id} analysis has timed out"
110
+ )
111
+
112
+ # Task analysis is still in progress, so we need to let the
113
+ # consumer know
114
+ raise KojiTaskNotAnalyzedError(
115
+ f"Task {task_id} analysis is still in progress"
116
+ )
117
+
118
+ # We need to decompress the response message and return it
119
+ response = LLMResponseCompressor.unzip(
120
+ koji_task_analysis.response.compressed_response
121
+ )
122
+ return KojiStagedResponse(
123
+ task_id=task_id,
124
+ log_file_name=koji_task_analysis.log_file_name,
125
+ response=response,
126
+ )
@@ -1,6 +1,6 @@
1
1
  import enum
2
2
  import datetime
3
- from typing import Optional, List, Tuple
3
+ from typing import Optional, List, Tuple, Self
4
4
 
5
5
  import backoff
6
6
 
@@ -15,6 +15,7 @@ from sqlalchemy import (
15
15
  desc,
16
16
  )
17
17
  from sqlalchemy.orm import relationship
18
+ from sqlalchemy.engine import Row
18
19
  from sqlalchemy.exc import OperationalError
19
20
  from logdetective.server.database.base import Base, transaction, DB_MAX_RETRIES
20
21
 
@@ -134,7 +135,7 @@ class GitlabMergeRequestJobs(Base):
134
135
  @classmethod
135
136
  def get_by_mr_iid(
136
137
  cls, forge: Forge, project_id: int, mr_iid
137
- ) -> Optional["GitlabMergeRequestJobs"]:
138
+ ) -> List[Self]:
138
139
  """Get all the mr jobs saved for the specified mr iid and project id."""
139
140
  with transaction(commit=False) as session:
140
141
  comments = (
@@ -262,7 +263,7 @@ class Comments(Base):
262
263
  cls,
263
264
  forge: Forge,
264
265
  comment_id: str,
265
- ) -> Optional["Comments"]:
266
+ ) -> Optional[Self]:
266
267
  """Search for a detailed comment
267
268
  by its unique forge comment id.
268
269
 
@@ -324,7 +325,7 @@ class Comments(Base):
324
325
  forge: Forge,
325
326
  project_id: int,
326
327
  mr_iid: int,
327
- ) -> Optional["Comments"]:
328
+ ) -> List[Self]:
328
329
  """Search for all merge request comments.
329
330
 
330
331
  Args:
@@ -358,7 +359,7 @@ class Comments(Base):
358
359
  mr_iid: int,
359
360
  job_id: int,
360
361
  comment_id: str,
361
- ) -> Optional["Comments"]:
362
+ ) -> Self:
362
363
  """Search for a detailed comment
363
364
  or create a new one if not found.
364
365
 
@@ -372,11 +373,11 @@ class Comments(Base):
372
373
  comment = Comments.get_by_gitlab_id(forge, comment_id)
373
374
  if comment is None:
374
375
  id_ = Comments.create(forge, project_id, mr_iid, job_id, comment_id)
375
- comment = GitlabMergeRequestJobs.get_by_id(id_)
376
+ comment = Comments.get_by_id(id_)
376
377
  return comment
377
378
 
378
379
  @classmethod
379
- def get_since(cls, time: datetime.datetime) -> Optional["Comments"]:
380
+ def get_since(cls, time: datetime.datetime) -> List[Self]:
380
381
  """Get all the comments created after the given time."""
381
382
  with transaction(commit=False) as session:
382
383
  comments = (
@@ -485,7 +486,7 @@ class Reactions(Base):
485
486
  mr_iid: int,
486
487
  job_id: int,
487
488
  comment_id: str,
488
- ) -> int:
489
+ ) -> List[Self]:
489
490
  """Get all reactions for a comment
490
491
 
491
492
  Args:
@@ -524,7 +525,7 @@ class Reactions(Base):
524
525
  job_id: int,
525
526
  comment_id: str,
526
527
  reaction_type: str,
527
- ) -> int:
528
+ ) -> Self | None:
528
529
  """Get reaction, of a given type,
529
530
  for a comment
530
531
 
@@ -589,7 +590,7 @@ class Reactions(Base):
589
590
  @classmethod
590
591
  def get_since(
591
592
  cls, time: datetime.datetime
592
- ) -> List[Tuple[datetime.datetime, "Comments"]]:
593
+ ) -> List[Row[Tuple[datetime.datetime, Self]]]:
593
594
  """Get all the reactions on comments created after the given time
594
595
  and the comment creation time."""
595
596
  with transaction(commit=False) as session:
@@ -34,6 +34,7 @@ class EndpointType(enum.Enum):
34
34
  ANALYZE_STAGED = "analyze_log_staged"
35
35
  ANALYZE_STREAM = "analyze_log_stream"
36
36
  ANALYZE_GITLAB_JOB = "analyze_gitlab_job"
37
+ ANALYZE_KOJI_TASK = "analyze_koji_task"
37
38
 
38
39
 
39
40
  class AnalyzeRequestMetrics(Base):
@@ -10,6 +10,7 @@ from logdetective.server.database.models import (
10
10
  Comments,
11
11
  Reactions,
12
12
  GitlabMergeRequestJobs,
13
+ Forge,
13
14
  )
14
15
  from logdetective.server.config import LOG
15
16
 
@@ -19,7 +20,7 @@ async def collect_emojis(gitlab_conn: gitlab.Gitlab, period: TimePeriod):
19
20
  Collect emoji feedback from logdetective comments saved in database.
20
21
  Check only comments created in the last given period of time.
21
22
  """
22
- comments = Comments.get_since(period.get_period_start_time())
23
+ comments = Comments.get_since(period.get_period_start_time()) or []
23
24
  comments_for_gitlab_connection = [
24
25
  comment for comment in comments if comment.forge == gitlab_conn.url
25
26
  ]
@@ -32,7 +33,14 @@ async def collect_emojis_for_mr(
32
33
  """
33
34
  Collect emoji feedback from logdetective comments in the specified MR.
34
35
  """
35
- mr_jobs = GitlabMergeRequestJobs.get_by_mr_iid(gitlab_conn.url, project_id, mr_iid)
36
+ comments = []
37
+ try:
38
+ url = Forge(gitlab_conn.url)
39
+ except ValueError as ex:
40
+ LOG.exception("Attempt to use unrecognized Forge `%s`", gitlab_conn.url)
41
+ raise ex
42
+ mr_jobs = GitlabMergeRequestJobs.get_by_mr_iid(url, project_id, mr_iid) or []
43
+
36
44
  comments = [Comments.get_by_mr_job(mr_job) for mr_job in mr_jobs]
37
45
  await collect_emojis_in_comments(comments, gitlab_conn)
38
46
 
@@ -63,38 +71,40 @@ async def collect_emojis_in_comments( # pylint: disable=too-many-locals
63
71
  Collect emoji feedback from specified logdetective comments.
64
72
  """
65
73
  projects = {}
66
- mrs = {}
74
+ merge_requests = {}
67
75
  for comment in comments:
68
76
  mr_job_db = GitlabMergeRequestJobs.get_by_id(comment.merge_request_job_id)
69
77
  if not mr_job_db:
70
78
  continue
71
79
  if mr_job_db.id not in projects:
72
- projects[mr_job_db.id] = project = await _handle_gitlab_operation(
80
+ project = await _handle_gitlab_operation(
73
81
  gitlab_conn.projects.get, mr_job_db.project_id
74
82
  )
75
83
  if not project:
76
84
  continue
85
+ projects[mr_job_db.id] = project
77
86
  else:
78
87
  project = projects[mr_job_db.id]
79
- mr_iid = mr_job_db.mr_iid
80
- if mr_iid not in mrs:
81
- mrs[mr_iid] = mr = await _handle_gitlab_operation(
82
- project.mergerequests.get, mr_iid
88
+ merge_request_iid = mr_job_db.mr_iid
89
+ if merge_request_iid not in merge_requests:
90
+ merge_request = await _handle_gitlab_operation(
91
+ project.mergerequests.get, merge_request_iid
83
92
  )
84
- if not mr:
93
+ if not merge_request:
85
94
  continue
95
+ merge_requests[merge_request_iid] = merge_request
86
96
  else:
87
- mr = mrs[mr_iid]
97
+ merge_request = merge_requests[merge_request_iid]
88
98
 
89
99
  discussion = await _handle_gitlab_operation(
90
- mr.discussions.get, comment.comment_id
100
+ merge_request.discussions.get, comment.comment_id
91
101
  )
92
102
  if not discussion:
93
103
  continue
94
104
 
95
105
  # Get the ID of the first note
96
106
  note_id = discussion.attributes["notes"][0]["id"]
97
- note = await _handle_gitlab_operation(mr.notes.get, note_id)
107
+ note = await _handle_gitlab_operation(merge_request.notes.get, note_id)
98
108
  if not note:
99
109
  continue
100
110
 
@@ -0,0 +1,33 @@
1
+ """Exception classes for Log Detective server."""
2
+
3
+
4
+ class LogDetectiveException(Exception):
5
+ """Base exception for Log Detective server."""
6
+
7
+
8
+ class LogsMissingError(LogDetectiveException):
9
+ """The logs are missing, possibly due to garbage-collection"""
10
+
11
+
12
+ class LogDetectiveKojiException(LogDetectiveException):
13
+ """Base exception for Koji-related errors."""
14
+
15
+
16
+ class KojiInvalidTaskID(LogDetectiveKojiException):
17
+ """The task ID is invalid."""
18
+
19
+
20
+ class UnknownTaskType(LogDetectiveKojiException):
21
+ """The task type is not supported."""
22
+
23
+
24
+ class NoFailedTask(LogDetectiveKojiException):
25
+ """The task is not in the FAILED state."""
26
+
27
+
28
+ class LogDetectiveConnectionError(LogDetectiveKojiException):
29
+ """A connection error occurred."""
30
+
31
+
32
+ class LogsTooLargeError(LogDetectiveKojiException):
33
+ """The log archive exceeds the configured maximum size"""
@@ -13,6 +13,7 @@ import jinja2
13
13
  import aiohttp
14
14
 
15
15
  from logdetective.server.config import SERVER_CONFIG, LOG
16
+ from logdetective.server.exceptions import LogsTooLargeError
16
17
  from logdetective.server.llm import perform_staged_analysis
17
18
  from logdetective.server.metric import add_new_metrics, update_metrics
18
19
  from logdetective.server.models import (
@@ -150,10 +151,6 @@ def is_eligible_package(project_name: str):
150
151
  return True
151
152
 
152
153
 
153
- class LogsTooLargeError(RuntimeError):
154
- """The log archive exceeds the configured maximum size"""
155
-
156
-
157
154
  async def retrieve_and_preprocess_koji_logs(
158
155
  gitlab_cfg: GitLabInstanceConfig,
159
156
  job: gitlab.v4.objects.ProjectJob,
@@ -0,0 +1,167 @@
1
+ import asyncio
2
+ import re
3
+ from typing import Any, Callable, Optional
4
+
5
+ import backoff
6
+ import koji
7
+ from logdetective.server.config import LOG
8
+ from logdetective.server.exceptions import (
9
+ KojiInvalidTaskID,
10
+ LogDetectiveConnectionError,
11
+ LogsMissingError,
12
+ LogsTooLargeError,
13
+ UnknownTaskType,
14
+ )
15
+
16
+
17
+ FAILURE_LOG_REGEX = re.compile(r"(\w*\.log)")
18
+
19
+
20
+ def connection_error_giveup(details: backoff._typing.Details) -> None:
21
+ """
22
+ Too many connection errors, give up.
23
+ """
24
+ LOG.error("Too many connection errors, giving up. %s", details["exception"])
25
+ raise LogDetectiveConnectionError() from details["exception"]
26
+
27
+
28
+ @backoff.on_exception(
29
+ backoff.expo,
30
+ koji.GenericError,
31
+ max_time=60,
32
+ )
33
+ async def call_koji(func: Callable, *args, **kwargs) -> Any:
34
+ """
35
+ Call a Koji function asynchronously.
36
+
37
+ Automatically retries on connection errors.
38
+ """
39
+ try:
40
+ result = await asyncio.to_thread(func, *args, **kwargs)
41
+ except koji.ActionNotAllowed as e:
42
+ # User doesn't have permission to do this, don't retry.
43
+ raise LogDetectiveConnectionError(e) from e
44
+ return result
45
+
46
+
47
+ async def get_failed_subtask_info(
48
+ koji_session: koji.ClientSession, task_id: int
49
+ ) -> dict[str, Any]:
50
+ """
51
+ If the provided task ID represents a task of type "build", this function
52
+ will return the buildArch or rebuildSRPM subtask that failed. If there is
53
+ more than one, it will return the first one found from the following
54
+ ordered list of processor architectures. If none is found among those
55
+ architectures, it will return the first failed architecture after a
56
+ standard sort.
57
+ * x86_64
58
+ * aarch64
59
+ * riscv
60
+ * ppc64le
61
+ * s390x
62
+
63
+ If the provided task ID represents a task of type "buildArch" or
64
+ "buildSRPMFromSCM" and has a task state of "FAILED", it will be returned
65
+ directly.
66
+
67
+ Any other task type will rase the UnknownTaskType exception.
68
+
69
+ If no task or subtask of the provided task is in the task state "FAILED",
70
+ this function will raise a NoFailedSubtask exception.
71
+ """
72
+
73
+ # Look up the current task first and check its type.
74
+ taskinfo = await call_koji(koji_session.getTaskInfo, task_id)
75
+ if not taskinfo:
76
+ raise KojiInvalidTaskID(f"Task {task_id} does not exist.")
77
+
78
+ # If the parent isn't FAILED, the children probably aren't either.
79
+ # There's one special case where the user may have canceled the
80
+ # overall task when one arch failed, so we should check that situation
81
+ # too.
82
+ if (
83
+ taskinfo["state"] != koji.TASK_STATES["FAILED"]
84
+ and taskinfo["state"] != koji.TASK_STATES["CANCELED"] # noqa: W503 flake vs lint
85
+ ):
86
+ raise UnknownTaskType(f"The primary task state was {taskinfo['state']}.")
87
+
88
+ # If the task is buildArch or buildSRPMFromSCM, we can return it directly.
89
+ if taskinfo["method"] in ["buildArch", "buildSRPMFromSCM"]:
90
+ return taskinfo
91
+
92
+ # Look up the subtasks for the task.
93
+ response = await asyncio.to_thread(koji_session.getTaskDescendents, task_id)
94
+ subtasks = response[f"{task_id}"]
95
+ arch_tasks = {}
96
+ for subtask in subtasks:
97
+ if (
98
+ subtask["method"] not in ["buildArch", "buildSRPMFromSCM"]
99
+ or subtask["state"] != koji.TASK_STATES["FAILED"] # noqa: W503 flake vs lint
100
+ ):
101
+ # Skip over any completed subtasks or non-build types
102
+ continue
103
+
104
+ arch_tasks[subtask["arch"]] = subtask
105
+
106
+ # Return the first architecture in the order of preference.
107
+ for arch in ["x86_64", "aarch64", "riscv", "ppc64le", "s390x"]:
108
+ if arch in arch_tasks:
109
+ return arch_tasks[arch]
110
+
111
+ # If none of those architectures were found, return the first one
112
+ # alphabetically
113
+ return arch_tasks[sorted(arch_tasks.keys())[0]]
114
+
115
+
116
+ async def get_failed_log_from_task(
117
+ koji_session: koji.ClientSession, task_id: int, max_size: int
118
+ ) -> Optional[tuple[str, str]]:
119
+ """
120
+ Get the failed log from a task.
121
+
122
+ If the log is too large, this function will raise a LogsTooLargeError.
123
+ If the log is missing or garbage-collected, this function will raise a
124
+ LogsMissingError.
125
+ """
126
+ taskinfo = await get_failed_subtask_info(koji_session, task_id)
127
+
128
+ # Read the failure reason from the task. Note that the taskinfo returned
129
+ # above may not be the same as passed in, so we need to use taskinfo["id"]
130
+ # to look up the correct failure reason.
131
+ result = await call_koji(
132
+ koji_session.getTaskResult, taskinfo["id"], raise_fault=False
133
+ )
134
+
135
+ # Examine the result message for the appropriate log file.
136
+ match = FAILURE_LOG_REGEX.search(result["faultString"])
137
+ if match:
138
+ failure_log_name = match.group(1)
139
+ else:
140
+ # The best thing we can do at this point is return the
141
+ # task_failed.log, since it will probably contain the most
142
+ # relevant information
143
+ return result["faultString"]
144
+
145
+ # Check that the size of the log file is not enormous
146
+ task_output = await call_koji(
147
+ koji_session.listTaskOutput, taskinfo["id"], stat=True
148
+ )
149
+ if not task_output:
150
+ # If the task has been garbage-collected, the task output will be empty
151
+ raise LogsMissingError(
152
+ "No logs attached to this task. Possibly garbage-collected."
153
+ )
154
+
155
+ if failure_log_name not in task_output:
156
+ # This shouldn't be possible, but we'll check anyway.
157
+ raise LogsMissingError(f"{failure_log_name} could not be located")
158
+
159
+ if int(task_output[failure_log_name]["st_size"]) > max_size:
160
+ raise LogsTooLargeError(
161
+ f"{task_output[failure_log_name]['st_size']} exceeds max size {max_size}"
162
+ )
163
+
164
+ log_contents = await call_koji(
165
+ koji_session.downloadTaskOutput, taskinfo["id"], failure_log_name
166
+ )
167
+ return failure_log_name, log_contents.decode("utf-8")
@@ -2,7 +2,7 @@ import io
2
2
  import inspect
3
3
  import datetime
4
4
 
5
- from typing import Union
5
+ from typing import Optional, Union
6
6
  from functools import wraps
7
7
 
8
8
  import aiohttp
@@ -17,10 +17,10 @@ from logdetective.server.database.models import EndpointType, AnalyzeRequestMetr
17
17
 
18
18
  async def add_new_metrics(
19
19
  api_name: str,
20
- url: str,
21
- http_session: aiohttp.ClientSession,
22
- received_at: datetime.datetime = None,
23
- compressed_log_content: io.BytesIO = None,
20
+ url: Optional[str] = None,
21
+ http_session: Optional[aiohttp.ClientSession] = None,
22
+ received_at: Optional[datetime.datetime] = None,
23
+ compressed_log_content: Optional[io.BytesIO] = None,
24
24
  ) -> int:
25
25
  """Add a new database entry for a received request.
26
26
 
@@ -28,10 +28,10 @@ async def add_new_metrics(
28
28
  the endpoint from where the request was received,
29
29
  and the log (in a zip format) for which analysis is requested.
30
30
  """
31
- remote_log = RemoteLog(url, http_session)
32
- compressed_log_content = (
33
- compressed_log_content or await RemoteLogCompressor(remote_log).zip_content()
34
- )
31
+ if not compressed_log_content:
32
+ remote_log = RemoteLog(url, http_session)
33
+ compressed_log_content = await RemoteLogCompressor(remote_log).zip_content()
34
+
35
35
  return AnalyzeRequestMetrics.create(
36
36
  endpoint=EndpointType(api_name),
37
37
  compressed_log=compressed_log_content,
@@ -44,7 +44,7 @@ async def add_new_metrics(
44
44
  def update_metrics(
45
45
  metrics_id: int,
46
46
  response: Union[models.Response, models.StagedResponse, StreamingResponse],
47
- sent_at: datetime.datetime = None,
47
+ sent_at: Optional[datetime.datetime] = None,
48
48
  ) -> None:
49
49
  """Update a database metric entry for a received request,
50
50
  filling data for the given response.
@@ -1,4 +1,5 @@
1
1
  import asyncio
2
+ from collections import defaultdict
2
3
  import datetime
3
4
  from logging import BASIC_FORMAT
4
5
  from typing import List, Dict, Optional
@@ -15,6 +16,7 @@ import aiohttp
15
16
 
16
17
  from aiolimiter import AsyncLimiter
17
18
  from gitlab import Gitlab
19
+ import koji
18
20
 
19
21
  from logdetective.constants import (
20
22
  DEFAULT_TEMPERATURE,
@@ -132,6 +134,17 @@ class StagedResponse(Response):
132
134
  snippets: List[AnalyzedSnippet]
133
135
 
134
136
 
137
+ class KojiStagedResponse(BaseModel):
138
+ """Model of data returned by Log Detective API when called when a Koji build
139
+ analysis is requested. Contains list of reponses to prompts for individual
140
+ snippets.
141
+ """
142
+
143
+ task_id: int
144
+ log_file_name: str
145
+ response: StagedResponse
146
+
147
+
135
148
  class InferenceConfig(BaseModel): # pylint: disable=too-many-instance-attributes
136
149
  """Model for inference configuration of logdetective server."""
137
150
 
@@ -247,7 +260,7 @@ class GitLabInstanceConfig(BaseModel): # pylint: disable=too-many-instance-attr
247
260
  _http_session: aiohttp.ClientSession = None
248
261
 
249
262
  # Maximum size of artifacts.zip in MiB. (default: 300 MiB)
250
- max_artifact_size: int = 300
263
+ max_artifact_size: int = 300 * 1024 * 1024
251
264
 
252
265
  def __init__(self, name: str, data: Optional[dict] = None):
253
266
  super().__init__()
@@ -259,7 +272,7 @@ class GitLabInstanceConfig(BaseModel): # pylint: disable=too-many-instance-attr
259
272
  self.api_path = data.get("api_path", "/api/v4")
260
273
  self.api_token = data.get("api_token", None)
261
274
  self.webhook_secrets = data.get("webhook_secrets", None)
262
- self.max_artifact_size = int(data.get("max_artifact_size")) * 1024 * 1024
275
+ self.max_artifact_size = int(data.get("max_artifact_size", 300)) * 1024 * 1024
263
276
 
264
277
  self.timeout = data.get("timeout", 5.0)
265
278
  self._conn = Gitlab(
@@ -323,6 +336,80 @@ class GitLabConfig(BaseModel):
323
336
  self.instances[instance.url] = instance
324
337
 
325
338
 
339
+ class KojiInstanceConfig(BaseModel):
340
+ """Model for Koji configuration of logdetective server."""
341
+
342
+ name: str = ""
343
+ xmlrpc_url: str = ""
344
+ tokens: List[str] = []
345
+
346
+ _conn: Optional[koji.ClientSession] = None
347
+ _callbacks: defaultdict[int, set[str]] = defaultdict(set)
348
+
349
+ def __init__(self, name: str, data: Optional[dict] = None):
350
+ super().__init__()
351
+
352
+ self.name = name
353
+ if data is None:
354
+ # Set some reasonable defaults
355
+ self.xmlrpc_url = "https://koji.fedoraproject.org/kojihub"
356
+ self.tokens = []
357
+ self.max_artifact_size = 1024 * 1024
358
+ return
359
+
360
+ self.xmlrpc_url = data.get(
361
+ "xmlrpc_url", "https://koji.fedoraproject.org/kojihub"
362
+ )
363
+ self.tokens = data.get("tokens", [])
364
+
365
+ def get_connection(self):
366
+ """Get the Koji connection object"""
367
+ if not self._conn:
368
+ self._conn = koji.ClientSession(self.xmlrpc_url)
369
+ return self._conn
370
+
371
+ def register_callback(self, task_id: int, callback: str):
372
+ """Register a callback for a task"""
373
+ self._callbacks[task_id].add(callback)
374
+
375
+ def clear_callbacks(self, task_id: int):
376
+ """Unregister a callback for a task"""
377
+ try:
378
+ del self._callbacks[task_id]
379
+ except KeyError:
380
+ pass
381
+
382
+ def get_callbacks(self, task_id: int) -> set[str]:
383
+ """Get the callbacks for a task"""
384
+ return self._callbacks[task_id]
385
+
386
+
387
+ class KojiConfig(BaseModel):
388
+ """Model for Koji configuration of logdetective server."""
389
+
390
+ instances: Dict[str, KojiInstanceConfig] = {}
391
+ analysis_timeout: int = 15
392
+ max_artifact_size: int = 300 * 1024 * 1024
393
+
394
+ def __init__(self, data: Optional[dict] = None):
395
+ super().__init__()
396
+ if data is None:
397
+ return
398
+
399
+ # Handle analysis_timeout with default 15
400
+ self.analysis_timeout = data.get("analysis_timeout", 15)
401
+
402
+ # Handle max_artifact_size with default 300
403
+ self.max_artifact_size = data.get("max_artifact_size", 300) * 1024 * 1024
404
+
405
+ # Handle instances dictionary
406
+ instances_data = data.get("instances", {})
407
+ for instance_name, instance_data in instances_data.items():
408
+ self.instances[instance_name] = KojiInstanceConfig(
409
+ instance_name, instance_data
410
+ )
411
+
412
+
326
413
  class LogConfig(BaseModel):
327
414
  """Logging configuration"""
328
415
 
@@ -375,6 +462,7 @@ class Config(BaseModel):
375
462
  snippet_inference: InferenceConfig = InferenceConfig()
376
463
  extractor: ExtractorConfig = ExtractorConfig()
377
464
  gitlab: GitLabConfig = GitLabConfig()
465
+ koji: KojiConfig = KojiConfig()
378
466
  general: GeneralConfig = GeneralConfig()
379
467
 
380
468
  def __init__(self, data: Optional[dict] = None):
@@ -387,6 +475,7 @@ class Config(BaseModel):
387
475
  self.inference = InferenceConfig(data.get("inference"))
388
476
  self.extractor = ExtractorConfig(data.get("extractor"))
389
477
  self.gitlab = GitLabConfig(data.get("gitlab"))
478
+ self.koji = KojiConfig(data.get("koji"))
390
479
  self.general = GeneralConfig(data.get("general"))
391
480
 
392
481
  if snippet_inference := data.get("snippet_inference", None):
@@ -8,13 +8,30 @@ from io import BytesIO
8
8
 
9
9
  import matplotlib
10
10
  import matplotlib.pyplot
11
- from fastapi import FastAPI, HTTPException, BackgroundTasks, Depends, Header, Request
11
+ from fastapi import (
12
+ FastAPI,
13
+ HTTPException,
14
+ BackgroundTasks,
15
+ Depends,
16
+ Header,
17
+ Path,
18
+ Request,
19
+ )
12
20
 
13
21
  from fastapi.responses import StreamingResponse
14
22
  from fastapi.responses import Response as BasicResponse
15
23
  import aiohttp
16
24
  import sentry_sdk
17
25
 
26
+ from logdetective.server.exceptions import KojiInvalidTaskID
27
+
28
+ from logdetective.server.database.models.koji import KojiTaskAnalysis
29
+ from logdetective.server.database.models.exceptions import (
30
+ KojiTaskAnalysisTimeoutError,
31
+ KojiTaskNotAnalyzedError,
32
+ KojiTaskNotFoundError,
33
+ )
34
+
18
35
  import logdetective.server.database.base
19
36
 
20
37
  from logdetective.utils import (
@@ -24,6 +41,9 @@ from logdetective.utils import (
24
41
  )
25
42
 
26
43
  from logdetective.server.config import SERVER_CONFIG, PROMPT_CONFIG, LOG
44
+ from logdetective.server.koji import (
45
+ get_failed_log_from_task as get_failed_log_from_koji_task,
46
+ )
27
47
  from logdetective.remote_log import RemoteLog
28
48
  from logdetective.server.llm import (
29
49
  mine_logs,
@@ -31,11 +51,13 @@ from logdetective.server.llm import (
31
51
  submit_text,
32
52
  )
33
53
  from logdetective.server.gitlab import process_gitlab_job_event
34
- from logdetective.server.metric import track_request
54
+ from logdetective.server.metric import track_request, add_new_metrics, update_metrics
35
55
  from logdetective.server.models import (
36
56
  BuildLog,
37
57
  EmojiHook,
38
58
  JobHook,
59
+ KojiInstanceConfig,
60
+ KojiStagedResponse,
39
61
  Response,
40
62
  StagedResponse,
41
63
  TimePeriod,
@@ -49,6 +71,7 @@ from logdetective.server.emoji import (
49
71
  collect_emojis,
50
72
  collect_emojis_for_mr,
51
73
  )
74
+ from logdetective.server.compressors import RemoteLogCompressor
52
75
 
53
76
 
54
77
  LOG_SOURCE_REQUEST_TIMEOUT = os.environ.get("LOG_SOURCE_REQUEST_TIMEOUT", 60)
@@ -178,6 +201,173 @@ async def analyze_log_staged(
178
201
  return await perform_staged_analysis(log_text)
179
202
 
180
203
 
204
+ @app.get(
205
+ "/analyze/rpmbuild/koji/{koji_instance}/{task_id}",
206
+ response_model=KojiStagedResponse,
207
+ )
208
+ async def get_koji_task_analysis(
209
+ koji_instance: Annotated[str, Path(title="The Koji instance to use")],
210
+ task_id: Annotated[int, Path(title="The task ID to analyze")],
211
+ x_koji_token: Annotated[str, Header()] = "",
212
+ ):
213
+ """Provide endpoint for retrieving log file analysis of a Koji task"""
214
+
215
+ try:
216
+ koji_instance_config = SERVER_CONFIG.koji.instances[koji_instance]
217
+ except KeyError:
218
+ # This Koji instance is not configured, so we will return a 404.
219
+ return BasicResponse(status_code=404, content="Unknown Koji instance.")
220
+
221
+ # This should always be available in a production environment.
222
+ # In a testing environment, the tokens list may be empty, in which case
223
+ # it will just proceed.
224
+ if koji_instance_config.tokens and x_koji_token not in koji_instance_config.tokens:
225
+ # (Unauthorized) error.
226
+ return BasicResponse(x_koji_token, status_code=401)
227
+
228
+ # Check if we have a response for this task
229
+ try:
230
+ return KojiTaskAnalysis.get_response_by_task_id(task_id)
231
+
232
+ except (KojiInvalidTaskID, KojiTaskNotFoundError):
233
+ # This task ID is malformed, out of range, or not found, so we will
234
+ # return a 404.
235
+ return BasicResponse(status_code=404)
236
+
237
+ except KojiTaskAnalysisTimeoutError:
238
+ # Task analysis has timed out, so we assume that the request was lost
239
+ # and that we need to start another analysis.
240
+ # There isn't a fully-appropriate error code for this, so we'll use
241
+ # 503 (Service Unavailable) as our best option.
242
+ return BasicResponse(
243
+ status_code=503, content="Task analysis timed out, please retry."
244
+ )
245
+
246
+ except KojiTaskNotAnalyzedError:
247
+ # Its still running, so we need to return a 202
248
+ # (Accepted) code to let the client know to keep waiting.
249
+ return BasicResponse(
250
+ status_code=202, content=f"Analysis still in progress for task {task_id}"
251
+ )
252
+
253
+
254
+ @app.post(
255
+ "/analyze/rpmbuild/koji/{koji_instance}/{task_id}",
256
+ response_model=KojiStagedResponse,
257
+ )
258
+ async def analyze_rpmbuild_koji(
259
+ koji_instance: Annotated[str, Path(title="The Koji instance to use")],
260
+ task_id: Annotated[int, Path(title="The task ID to analyze")],
261
+ x_koji_token: Annotated[str, Header()] = "",
262
+ x_koji_callback: Annotated[str, Header()] = "",
263
+ background_tasks: BackgroundTasks = BackgroundTasks(),
264
+ ):
265
+ """Provide endpoint for retrieving log file analysis of a Koji task"""
266
+
267
+ try:
268
+ koji_instance_config = SERVER_CONFIG.koji.instances[koji_instance]
269
+ except KeyError:
270
+ # This Koji instance is not configured, so we will return a 404.
271
+ return BasicResponse(status_code=404, content="Unknown Koji instance.")
272
+
273
+ # This should always be available in a production environment.
274
+ # In a testing environment, the tokens list may be empty, in which case
275
+ # it will just proceed.
276
+ if koji_instance_config.tokens and x_koji_token not in koji_instance_config.tokens:
277
+ # (Unauthorized) error.
278
+ return BasicResponse(x_koji_token, status_code=401)
279
+
280
+ # Check if we already have a response for this task
281
+ try:
282
+ response = KojiTaskAnalysis.get_response_by_task_id(task_id)
283
+
284
+ except KojiInvalidTaskID:
285
+ # This task ID is malformed or out of range, so we will return a 400.
286
+ response = BasicResponse(status_code=404, content="Invalid or unknown task ID.")
287
+
288
+ except (KojiTaskNotFoundError, KojiTaskAnalysisTimeoutError):
289
+ # Task not yet analyzed or it timed out, so we need to start the
290
+ # analysis in the background and return a 202 (Accepted) error.
291
+
292
+ background_tasks.add_task(
293
+ analyze_koji_task,
294
+ task_id,
295
+ koji_instance_config,
296
+ )
297
+
298
+ # If a callback URL is provided, we need to add it to the callbacks
299
+ # table so that we can notify it when the analysis is complete.
300
+ if x_koji_callback:
301
+ koji_instance_config.register_callback(task_id, x_koji_callback)
302
+
303
+ response = BasicResponse(
304
+ status_code=202, content=f"Beginning analysis of task {task_id}"
305
+ )
306
+
307
+ except KojiTaskNotAnalyzedError:
308
+ # Its still running, so we need to return a 202
309
+ # (Accepted) error.
310
+ response = BasicResponse(
311
+ status_code=202, content=f"Analysis still in progress for task {task_id}"
312
+ )
313
+
314
+ return response
315
+
316
+
317
+ async def analyze_koji_task(task_id: int, koji_instance_config: KojiInstanceConfig):
318
+ """Analyze a koji task and return the response"""
319
+
320
+ # Get the log text from the koji task
321
+ koji_conn = koji_instance_config.get_connection()
322
+ log_file_name, log_text = await get_failed_log_from_koji_task(
323
+ koji_conn, task_id, max_size=SERVER_CONFIG.koji.max_artifact_size
324
+ )
325
+
326
+ # We need to handle the metric tracking manually here, because we need
327
+ # to retrieve the metric ID to associate it with the koji task analysis.
328
+
329
+ metrics_id = await add_new_metrics(
330
+ "analyze_koji_task",
331
+ log_text,
332
+ received_at=datetime.datetime.now(datetime.timezone.utc),
333
+ compressed_log_content=RemoteLogCompressor.zip_text(log_text),
334
+ )
335
+
336
+ # We need to associate the metric ID with the koji task analysis.
337
+ # This will create the new row without a response, which we will use as
338
+ # an indicator that the analysis is in progress.
339
+ KojiTaskAnalysis.create_or_restart(
340
+ koji_instance=koji_instance_config.xmlrpc_url,
341
+ task_id=task_id,
342
+ log_file_name=log_file_name,
343
+ )
344
+ response = await perform_staged_analysis(log_text)
345
+
346
+ # Now that we have the response, we can update the metrics and mark the
347
+ # koji task analysis as completed.
348
+ update_metrics(metrics_id, response)
349
+ KojiTaskAnalysis.add_response(task_id, metrics_id)
350
+
351
+ # Notify any callbacks that the analysis is complete.
352
+ for callback in koji_instance_config.get_callbacks(task_id):
353
+ LOG.info("Notifying callback %s of task %d completion", callback, task_id)
354
+ asyncio.create_task(
355
+ send_koji_callback(callback, task_id)
356
+ )
357
+
358
+ # Now that it's sent, we can clear the callbacks for this task.
359
+ koji_instance_config.clear_callbacks(task_id)
360
+
361
+ return response
362
+
363
+
364
+ async def send_koji_callback(callback: str, task_id: int):
365
+ """Send a callback to the specified URL with the task ID and log file name."""
366
+ async with aiohttp.ClientSession() as session:
367
+ async with session.post(callback, json={"task_id": task_id}):
368
+ pass
369
+
370
+
181
371
  @app.get("/queue/print")
182
372
  async def queue_print(msg: str):
183
373
  """Debug endpoint to test the LLM request queue"""
@@ -58,7 +58,9 @@ This comment was created by [Log Detective][log-detective].
58
58
  Was the provided feedback accurate and helpful? <br>Please vote with :thumbsup:
59
59
  or :thumbsdown: to help us improve.<br>
60
60
 
61
-
61
+ <i>If this Log Detective report contains harmful content, please use the
62
+ [Gitlab reporting feature for harmful content](https://docs.gitlab.com/user/report_abuse/)
63
+ and contact the [Log Detective developers](https://github.com/fedora-copr/logdetective/issues).</i>
62
64
 
63
65
  [log-detective]: https://log-detective.com/
64
66
  [contact]: https://github.com/fedora-copr
@@ -47,7 +47,9 @@ This comment was created by [Log Detective][log-detective].
47
47
  Was the provided feedback accurate and helpful? <br>Please vote with :thumbsup:
48
48
  or :thumbsdown: to help us improve.<br>
49
49
 
50
-
50
+ <i>If this Log Detective report contains harmful content, please use the
51
+ [Gitlab reporting feature for harmful content](https://docs.gitlab.com/user/report_abuse/)
52
+ and contact the [Log Detective developers](https://github.com/fedora-copr/logdetective/issues).</i>
51
53
 
52
54
  [log-detective]: https://log-detective.com/
53
55
  [contact]: https://github.com/fedora-copr
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "logdetective"
3
- version = "1.6.0"
3
+ version = "1.7.0"
4
4
  description = "Log using LLM AI to search for build/test failures and provide ideas for fixing these."
5
5
  authors = ["Jiri Podivin <jpodivin@gmail.com>"]
6
6
  license = "Apache-2.0"
File without changes