logdetective 2.5.0__tar.gz → 2.7.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. {logdetective-2.5.0 → logdetective-2.7.0}/PKG-INFO +15 -1
  2. {logdetective-2.5.0 → logdetective-2.7.0}/README.md +14 -0
  3. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/logdetective.py +14 -11
  4. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/prompts-summary-first.yml +0 -2
  5. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/prompts.yml +0 -3
  6. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/server/database/base.py +5 -2
  7. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/server/database/models/__init__.py +2 -2
  8. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/server/database/models/koji.py +15 -9
  9. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/server/database/models/merge_request_jobs.py +42 -22
  10. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/server/database/models/metrics.py +25 -13
  11. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/server/templates/gitlab_full_comment.md.j2 +7 -7
  12. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/server/templates/gitlab_short_comment.md.j2 +7 -7
  13. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/utils.py +22 -6
  14. {logdetective-2.5.0 → logdetective-2.7.0}/pyproject.toml +1 -1
  15. {logdetective-2.5.0 → logdetective-2.7.0}/LICENSE +0 -0
  16. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/__init__.py +0 -0
  17. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/constants.py +0 -0
  18. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/drain3.ini +0 -0
  19. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/extractors.py +0 -0
  20. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/models.py +0 -0
  21. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/prompts-summary-only.yml +0 -0
  22. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/remote_log.py +0 -0
  23. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/server/__init__.py +0 -0
  24. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/server/compressors.py +0 -0
  25. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/server/config.py +0 -0
  26. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/server/database/__init__.py +0 -0
  27. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/server/database/models/exceptions.py +0 -0
  28. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/server/emoji.py +0 -0
  29. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/server/exceptions.py +0 -0
  30. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/server/gitlab.py +0 -0
  31. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/server/koji.py +0 -0
  32. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/server/llm.py +0 -0
  33. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/server/metric.py +0 -0
  34. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/server/models.py +0 -0
  35. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/server/plot.py +0 -0
  36. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/server/server.py +0 -0
  37. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/server/templates/base_response.html.j2 +0 -0
  38. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/server/utils.py +0 -0
  39. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective/skip_snippets.yml +0 -0
  40. {logdetective-2.5.0 → logdetective-2.7.0}/logdetective.1.asciidoc +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: logdetective
3
- Version: 2.5.0
3
+ Version: 2.7.0
4
4
  Summary: Log using LLM AI to search for build/test failures and provide ideas for fixing these.
5
5
  License: Apache-2.0
6
6
  License-File: LICENSE
@@ -128,6 +128,20 @@ Note that streaming with some models (notably Meta-Llama-3) is broken and can be
128
128
 
129
129
  logdetective https://example.com/logs.txt --model QuantFactory/Meta-Llama-3-8B-Instruct-GGUF --filename_suffix Q5_K_M.gguf --no-stream
130
130
 
131
+ Choice of LLM
132
+ -------------
133
+
134
+ While Log Detective is compatible with a wide range of LLMs, it does require an instruction tuned model to function properly.
135
+
136
+ Whether or not the model has been trained to work with instructions can be determined by examining the model card, or simply by checking if it has `instruct` in its name.
137
+
138
+ When deployed as a server, Log Detective uses `/chat/completions` API as defined by OpenAI. The API must support both `system` and `user` roles, in order to properly work with a system prompt.
139
+
140
+ Configuration fields `system_role` and `user_role` can be used to set role names for APIs with non-standard roles.
141
+
142
+ > **Note:**
143
+ > In cases when no system role is available, it is possible to set both fields to the same value. This will concatenate system and standard prompt.
144
+ > This may have negative impact coherence of response.
131
145
 
132
146
  Real Example
133
147
  ------------
@@ -73,6 +73,20 @@ Note that streaming with some models (notably Meta-Llama-3) is broken and can be
73
73
 
74
74
  logdetective https://example.com/logs.txt --model QuantFactory/Meta-Llama-3-8B-Instruct-GGUF --filename_suffix Q5_K_M.gguf --no-stream
75
75
 
76
+ Choice of LLM
77
+ -------------
78
+
79
+ While Log Detective is compatible with a wide range of LLMs, it does require an instruction tuned model to function properly.
80
+
81
+ Whether or not the model has been trained to work with instructions can be determined by examining the model card, or simply by checking if it has `instruct` in its name.
82
+
83
+ When deployed as a server, Log Detective uses `/chat/completions` API as defined by OpenAI. The API must support both `system` and `user` roles, in order to properly work with a system prompt.
84
+
85
+ Configuration fields `system_role` and `user_role` can be used to set role names for APIs with non-standard roles.
86
+
87
+ > **Note:**
88
+ > In cases when no system role is available, it is possible to set both fields to the same value. This will concatenate system and standard prompt.
89
+ > This may have negative impact coherence of response.
76
90
 
77
91
  Real Example
78
92
  ------------
@@ -174,11 +174,6 @@ async def run(): # pylint: disable=too-many-statements,too-many-locals,too-many
174
174
  log_summary = format_snippets(log_summary)
175
175
  LOG.info("Log summary: \n %s", log_summary)
176
176
 
177
- prompt = (
178
- f"{prompts_configuration.default_system_prompt}\n"
179
- f"{prompts_configuration.prompt_template}"
180
- )
181
-
182
177
  stream = True
183
178
  if args.no_stream:
184
179
  stream = False
@@ -186,30 +181,38 @@ async def run(): # pylint: disable=too-many-statements,too-many-locals,too-many
186
181
  log_summary,
187
182
  model,
188
183
  stream,
189
- prompt_template=prompt,
184
+ prompt_templates=prompts_configuration,
190
185
  temperature=args.temperature,
191
186
  )
192
187
  probs = []
193
188
  print("Explanation:")
194
189
  # We need to extract top token probability from the response
195
- # CreateCompletionResponse structure of llama-cpp-python.
190
+ # CreateChatCompletionResponse structure of llama-cpp-python.
196
191
  # `compute_certainty` function expects list of dictionaries with form
197
192
  # { 'logprob': <float> } as expected from the OpenAI API.
198
193
 
199
194
  if args.no_stream:
200
- print(response["choices"][0]["text"])
195
+ print(response["choices"][0]["message"]["content"])
201
196
  probs = [
202
- {"logprob": e} for e in response["choices"][0]["logprobs"]["token_logprobs"]
197
+ {"logprob": e["logprob"]} for e in response["choices"][0]["logprobs"]["content"]
203
198
  ]
204
199
 
205
200
  else:
206
201
  # Stream the output
207
202
  for chunk in response:
203
+ # What might happen, is that first (or possibly any other) chunk may not contain
204
+ # fields choices[0].delta.content or choices[0].logprobs -> if so, we just skip it
205
+ if any([
206
+ 'content' not in chunk["choices"][0]["delta"],
207
+ 'logprobs' not in chunk["choices"][0]
208
+ ]):
209
+ continue
210
+
208
211
  if isinstance(chunk["choices"][0]["logprobs"], dict):
209
212
  probs.append(
210
- {"logprob": chunk["choices"][0]["logprobs"]["token_logprobs"][0]}
213
+ {"logprob": chunk["choices"][0]["logprobs"]["content"][0]["logprob"]}
211
214
  )
212
- delta = chunk["choices"][0]["text"]
215
+ delta = chunk["choices"][0]["delta"]["content"]
213
216
  print(delta, end="", flush=True)
214
217
  certainty = compute_certainty(probs)
215
218
 
@@ -18,5 +18,3 @@ prompt_template: |
18
18
  Snippets:
19
19
 
20
20
  {}
21
-
22
- Analysis:
@@ -19,7 +19,6 @@ prompt_template: |
19
19
 
20
20
  {}
21
21
 
22
- Analysis:
23
22
 
24
23
  snippet_prompt_template: |
25
24
  Analyse following RPM build log snippet. Describe contents accurately, without speculation or suggestions for resolution
@@ -30,7 +29,6 @@ snippet_prompt_template: |
30
29
 
31
30
  {}
32
31
 
33
- Analysis:
34
32
 
35
33
  prompt_template_staged: |
36
34
  Given following log snippets, their explanation, and nothing else, explain what failure, if any, occurred during build of this package.
@@ -47,7 +45,6 @@ prompt_template_staged: |
47
45
 
48
46
  {}
49
47
 
50
- Analysis:
51
48
 
52
49
  # System prompts
53
50
  # System prompts are meant to serve as general guide for model behavior,
@@ -1,6 +1,6 @@
1
1
  from os import getenv
2
2
  from contextlib import asynccontextmanager
3
- from sqlalchemy.orm import declarative_base
3
+ from sqlalchemy.orm import DeclarativeBase
4
4
  from sqlalchemy.ext.asyncio import create_async_engine, async_sessionmaker
5
5
  from logdetective import logger
6
6
 
@@ -24,7 +24,10 @@ sqlalchemy_echo = getenv("SQLALCHEMY_ECHO", "False").lower() in (
24
24
  )
25
25
  engine = create_async_engine(get_pg_url(), echo=sqlalchemy_echo)
26
26
  SessionFactory = async_sessionmaker(autoflush=True, bind=engine) # pylint: disable=invalid-name
27
- Base = declarative_base()
27
+
28
+
29
+ class Base(DeclarativeBase):
30
+ """Declarative base class for all ORM models."""
28
31
 
29
32
 
30
33
  @asynccontextmanager
@@ -1,4 +1,3 @@
1
- from logdetective.server.database.base import Base
2
1
  from logdetective.server.database.models.merge_request_jobs import (
3
2
  Forge,
4
3
  GitlabMergeRequestJobs,
@@ -18,8 +17,9 @@ from logdetective.server.database.models.exceptions import (
18
17
  KojiTaskAnalysisTimeoutError,
19
18
  )
20
19
 
20
+ # pylint: disable=undefined-all-variable
21
+
21
22
  __all__ = [
22
- Base.__name__,
23
23
  GitlabMergeRequestJobs.__name__,
24
24
  Comments.__name__,
25
25
  Reactions.__name__,
@@ -1,6 +1,9 @@
1
+ from __future__ import annotations
2
+ from typing import Optional
1
3
  from datetime import datetime, timedelta, timezone
2
- from sqlalchemy import Column, BigInteger, DateTime, ForeignKey, Integer, String, select
3
- from sqlalchemy.orm import relationship
4
+ from sqlalchemy import BigInteger, DateTime, ForeignKey, Integer, String, select
5
+ from sqlalchemy.orm import Mapped, mapped_column, relationship
6
+
4
7
  from sqlalchemy.exc import OperationalError
5
8
  import backoff
6
9
 
@@ -21,25 +24,28 @@ class KojiTaskAnalysis(Base):
21
24
 
22
25
  __tablename__ = "koji_task_analysis"
23
26
 
24
- id = Column(Integer, primary_key=True)
25
- koji_instance = Column(String(255), nullable=False, index=True)
26
- task_id = Column(BigInteger, nullable=False, index=True, unique=True)
27
- log_file_name = Column(String(255), nullable=False, index=True)
28
- request_received_at = Column(
27
+ id: Mapped[int] = mapped_column(Integer, primary_key=True)
28
+ koji_instance: Mapped[str] = mapped_column(String(255), nullable=False, index=True)
29
+ task_id: Mapped[int] = mapped_column(BigInteger, nullable=False, index=True, unique=True)
30
+ log_file_name: Mapped[str] = mapped_column(String(255), nullable=False, index=True)
31
+ request_received_at: Mapped[datetime] = mapped_column(
29
32
  DateTime(timezone=True),
30
33
  nullable=False,
31
34
  index=True,
32
35
  default=datetime.now(timezone.utc),
33
36
  comment="Timestamp when the request was received",
34
37
  )
35
- response_id = Column(
38
+ response_id: Mapped[Optional[int]] = mapped_column(
36
39
  Integer,
37
40
  ForeignKey("analyze_request_metrics.id"),
38
41
  nullable=True,
39
42
  index=False,
40
43
  comment="The id of the analyze request metrics for this task",
41
44
  )
42
- response = relationship("AnalyzeRequestMetrics")
45
+ response: Mapped[Optional["AnalyzeRequestMetrics"]] = relationship(
46
+ "AnalyzeRequestMetrics",
47
+ back_populates="koji_tasks"
48
+ )
43
49
 
44
50
  @classmethod
45
51
  @backoff.on_exception(backoff.expo, OperationalError, max_tries=DB_MAX_RETRIES)
@@ -1,12 +1,12 @@
1
+ from __future__ import annotations
1
2
  import enum
2
3
  import datetime
3
- from typing import Optional, List, Tuple, Self
4
+ from typing import Optional, List, Tuple, Self, TYPE_CHECKING
4
5
 
5
6
  import backoff
6
7
 
7
8
  from sqlalchemy import (
8
9
  Enum,
9
- Column,
10
10
  BigInteger,
11
11
  DateTime,
12
12
  String,
@@ -15,12 +15,16 @@ from sqlalchemy import (
15
15
  desc,
16
16
  select,
17
17
  )
18
- from sqlalchemy.orm import relationship
18
+ from sqlalchemy.orm import Mapped, mapped_column, relationship
19
19
  from sqlalchemy.engine import Row
20
20
  from sqlalchemy.exc import OperationalError
21
21
  from logdetective.server.database.base import Base, transaction, DB_MAX_RETRIES
22
22
 
23
23
 
24
+ if TYPE_CHECKING:
25
+ from .metrics import AnalyzeRequestMetrics
26
+
27
+
24
28
  class Forge(str, enum.Enum):
25
29
  """List of forges managed by logdetective"""
26
30
 
@@ -35,21 +39,26 @@ class GitlabMergeRequestJobs(Base):
35
39
 
36
40
  __tablename__ = "gitlab_merge_request_jobs"
37
41
 
38
- id = Column(BigInteger, primary_key=True)
39
- forge = Column(Enum(Forge), nullable=False, index=True, comment="The forge name")
40
- project_id = Column(
42
+ id: Mapped[int] = mapped_column(BigInteger, primary_key=True)
43
+ forge: Mapped[Forge] = mapped_column(
44
+ Enum(Forge),
45
+ nullable=False,
46
+ index=True,
47
+ comment="The forge name"
48
+ )
49
+ project_id: Mapped[int] = mapped_column(
41
50
  BigInteger,
42
51
  nullable=False,
43
52
  index=True,
44
53
  comment="The project gitlab id",
45
54
  )
46
- mr_iid = Column(
55
+ mr_iid: Mapped[int] = mapped_column(
47
56
  BigInteger,
48
57
  nullable=False,
49
58
  index=False,
50
59
  comment="The merge request gitlab iid",
51
60
  )
52
- job_id = Column(
61
+ job_id: Mapped[int] = mapped_column(
53
62
  BigInteger,
54
63
  nullable=False,
55
64
  index=True,
@@ -63,11 +72,14 @@ class GitlabMergeRequestJobs(Base):
63
72
  ),
64
73
  )
65
74
 
66
- comment = relationship(
75
+ comment: Mapped[List["Comments"]] = relationship(
67
76
  "Comments", back_populates="merge_request_job", uselist=False
68
77
  ) # 1 comment for 1 job
69
78
 
70
- request_metrics = relationship("AnalyzeRequestMetrics", back_populates="mr_job")
79
+ request_metrics: Mapped[List["AnalyzeRequestMetrics"]] = relationship(
80
+ "AnalyzeRequestMetrics",
81
+ back_populates="mr_job"
82
+ )
71
83
 
72
84
  @classmethod
73
85
  @backoff.on_exception(backoff.expo, OperationalError, max_tries=DB_MAX_RETRIES)
@@ -183,8 +195,8 @@ class Comments(Base):
183
195
 
184
196
  __tablename__ = "comments"
185
197
 
186
- id = Column(BigInteger, primary_key=True)
187
- merge_request_job_id = Column(
198
+ id: Mapped[int] = mapped_column(BigInteger, primary_key=True)
199
+ merge_request_job_id: Mapped[int] = mapped_column(
188
200
  BigInteger,
189
201
  ForeignKey("gitlab_merge_request_jobs.id"),
190
202
  nullable=False,
@@ -192,14 +204,19 @@ class Comments(Base):
192
204
  index=True,
193
205
  comment="The associated merge request job (db) id",
194
206
  )
195
- forge = Column(Enum(Forge), nullable=False, index=True, comment="The forge name")
196
- comment_id = Column(
207
+ forge: Mapped[Forge] = mapped_column(
208
+ Enum(Forge),
209
+ nullable=False,
210
+ index=True,
211
+ comment="The forge name"
212
+ )
213
+ comment_id: Mapped[str] = mapped_column(
197
214
  String(50), # e.g. 'd5a3ff139356ce33e37e73add446f16869741b50'
198
215
  nullable=False,
199
216
  index=True,
200
217
  comment="The comment gitlab id",
201
218
  )
202
- created_at = Column(
219
+ created_at: Mapped[datetime.datetime] = mapped_column(
203
220
  DateTime(timezone=True),
204
221
  nullable=False,
205
222
  comment="Timestamp when the comment was created",
@@ -209,8 +226,11 @@ class Comments(Base):
209
226
  UniqueConstraint("forge", "comment_id", name="uix_forge_comment_id"),
210
227
  )
211
228
 
212
- merge_request_job = relationship("GitlabMergeRequestJobs", back_populates="comment")
213
- reactions = relationship("Reactions", back_populates="comment")
229
+ merge_request_job: Mapped["GitlabMergeRequestJobs"] = relationship(
230
+ "GitlabMergeRequestJobs",
231
+ back_populates="comment"
232
+ )
233
+ reactions: Mapped[list["Reactions"]] = relationship("Reactions", back_populates="comment")
214
234
 
215
235
  @classmethod
216
236
  @backoff.on_exception(backoff.expo, OperationalError, max_tries=DB_MAX_RETRIES)
@@ -408,20 +428,20 @@ class Reactions(Base):
408
428
 
409
429
  __tablename__ = "reactions"
410
430
 
411
- id = Column(BigInteger, primary_key=True)
412
- comment_id = Column(
431
+ id: Mapped[int] = mapped_column(BigInteger, primary_key=True)
432
+ comment_id: Mapped[int] = mapped_column(
413
433
  BigInteger,
414
434
  ForeignKey("comments.id"),
415
435
  nullable=False,
416
436
  index=True,
417
437
  comment="The associated comment (db) id",
418
438
  )
419
- reaction_type = Column(
439
+ reaction_type: Mapped[str] = mapped_column(
420
440
  String(127), # e.g. 'thumbs-up'
421
441
  nullable=False,
422
442
  comment="The type of reaction",
423
443
  )
424
- count = Column(
444
+ count: Mapped[int] = mapped_column(
425
445
  BigInteger,
426
446
  nullable=False,
427
447
  comment="The number of reactions, of this type, given in the comment",
@@ -431,7 +451,7 @@ class Reactions(Base):
431
451
  UniqueConstraint("comment_id", "reaction_type", name="uix_comment_reaction"),
432
452
  )
433
453
 
434
- comment = relationship("Comments", back_populates="reactions")
454
+ comment: Mapped["Comments"] = relationship("Comments", back_populates="reactions")
435
455
 
436
456
  @classmethod
437
457
  @backoff.on_exception(backoff.expo, OperationalError, max_tries=DB_MAX_RETRIES)
@@ -1,12 +1,12 @@
1
+ from __future__ import annotations
1
2
  import io
2
3
  import enum
3
4
  import datetime
4
- from typing import Optional, List, Self, Tuple
5
+ from typing import Optional, List, Self, Tuple, TYPE_CHECKING
5
6
 
6
7
  import backoff
7
8
 
8
9
  from sqlalchemy import (
9
- Column,
10
10
  Integer,
11
11
  Float,
12
12
  DateTime,
@@ -17,7 +17,7 @@ from sqlalchemy import (
17
17
  ForeignKey,
18
18
  LargeBinary,
19
19
  )
20
- from sqlalchemy.orm import relationship, aliased
20
+ from sqlalchemy.orm import Mapped, mapped_column, relationship, aliased
21
21
  from sqlalchemy.exc import OperationalError
22
22
 
23
23
  from logdetective.server.database.base import Base, transaction, DB_MAX_RETRIES
@@ -27,6 +27,10 @@ from logdetective.server.database.models.merge_request_jobs import (
27
27
  )
28
28
 
29
29
 
30
+ if TYPE_CHECKING:
31
+ from .koji import KojiTaskAnalysis
32
+
33
+
30
34
  class EndpointType(enum.Enum):
31
35
  """Different analyze endpoints"""
32
36
 
@@ -42,45 +46,45 @@ class AnalyzeRequestMetrics(Base):
42
46
 
43
47
  __tablename__ = "analyze_request_metrics"
44
48
 
45
- id = Column(Integer, primary_key=True)
46
- endpoint = Column(
49
+ id: Mapped[int] = mapped_column(Integer, primary_key=True)
50
+ endpoint: Mapped[EndpointType] = mapped_column(
47
51
  Enum(EndpointType),
48
52
  nullable=False,
49
53
  index=True,
50
54
  comment="The service endpoint that was called",
51
55
  )
52
- request_received_at = Column(
56
+ request_received_at: Mapped[datetime.datetime] = mapped_column(
53
57
  DateTime(timezone=True),
54
58
  nullable=False,
55
59
  index=True,
56
60
  default=datetime.datetime.now(datetime.timezone.utc),
57
61
  comment="Timestamp when the request was received",
58
62
  )
59
- compressed_log = Column(
63
+ compressed_log: Mapped[bytes] = mapped_column(
60
64
  LargeBinary(length=314572800), # 300MB limit (300 * 1024 * 1024)
61
65
  nullable=False,
62
66
  index=False,
63
67
  comment="Log processed, saved in a zip format",
64
68
  )
65
- compressed_response = Column(
69
+ compressed_response: Mapped[Optional[bytes]] = mapped_column(
66
70
  LargeBinary(length=314572800), # 300MB limit (300 * 1024 * 1024)
67
71
  nullable=True,
68
72
  index=False,
69
73
  comment="Given response (with explanation and snippets) saved in a zip format",
70
74
  )
71
- response_sent_at = Column(
75
+ response_sent_at: Mapped[Optional[datetime.datetime]] = mapped_column(
72
76
  DateTime(timezone=True),
73
77
  nullable=True,
74
78
  comment="Timestamp when the response was sent back",
75
79
  )
76
- response_length = Column(
80
+ response_length: Mapped[Optional[int]] = mapped_column(
77
81
  Integer, nullable=True, comment="Length of the response in chars"
78
82
  )
79
- response_certainty = Column(
83
+ response_certainty: Mapped[Optional[float]] = mapped_column(
80
84
  Float, nullable=True, comment="Certainty for generated response"
81
85
  )
82
86
 
83
- merge_request_job_id = Column(
87
+ merge_request_job_id: Mapped[Optional[int]] = mapped_column(
84
88
  Integer,
85
89
  ForeignKey("gitlab_merge_request_jobs.id"),
86
90
  nullable=True,
@@ -88,7 +92,15 @@ class AnalyzeRequestMetrics(Base):
88
92
  comment="Is this an analyze request coming from a merge request?",
89
93
  )
90
94
 
91
- mr_job = relationship("GitlabMergeRequestJobs", back_populates="request_metrics")
95
+ mr_job: Mapped[Optional["GitlabMergeRequestJobs"]] = relationship(
96
+ "GitlabMergeRequestJobs",
97
+ back_populates="request_metrics"
98
+ )
99
+
100
+ koji_tasks: Mapped[List["KojiTaskAnalysis"]] = relationship(
101
+ "KojiTaskAnalysis",
102
+ back_populates="response"
103
+ )
92
104
 
93
105
  @classmethod
94
106
  @backoff.on_exception(backoff.expo, OperationalError, max_tries=DB_MAX_RETRIES)
@@ -57,15 +57,15 @@ Please know that the explanation was provided by AI and may be incorrect.
57
57
  </li>
58
58
  </ul>
59
59
  </details>
60
- ---
61
- This comment was created by [Log Detective][log-detective].
60
+
61
+ <hr>
62
+
63
+ This comment was created by <a href="https://logdetective.com">Log Detective</a>.
62
64
  Was the provided feedback accurate and helpful?
63
65
  <br>
64
66
  Please vote with :thumbsup:
65
67
  or :thumbsdown: to help us improve.
66
68
  <br>
67
- <i>If this Log Detective report contains harmful content, please use the
68
- [Gitlab reporting feature for harmful content](https://docs.gitlab.com/user/report_abuse/)
69
- and contact the [Log Detective developers](https://github.com/fedora-copr/logdetective/issues).</i>
70
- [log-detective]: https://log-detective.com/
71
- [contact]: https://github.com/fedora-copr
69
+ <i>If this Log Detective report contains harmful content,
70
+ please use the <a href="https://docs.gitlab.com/user/report_abuse/">Gitlab reporting feature for harmful content</a>
71
+ and contact the <a href="https://github.com/fedora-copr/logdetective/issues">Log Detective developers</a>.</i>
@@ -46,15 +46,15 @@ Please know that the explanation was provided by AI and may be incorrect.
46
46
  </li>
47
47
  </ul>
48
48
  </details>
49
- ---
50
- This comment was created by [Log Detective][log-detective].
49
+
50
+ <hr>
51
+
52
+ This comment was created by <a href="https://logdetective.com">Log Detective</a>.
51
53
  Was the provided feedback accurate and helpful?
52
54
  <br>
53
55
  Please vote with :thumbsup:
54
56
  or :thumbsdown: to help us improve.
55
57
  <br>
56
- <i>If this Log Detective report contains harmful content, please use the
57
- [Gitlab reporting feature for harmful content](https://docs.gitlab.com/user/report_abuse/)
58
- and contact the [Log Detective developers](https://github.com/fedora-copr/logdetective/issues).</i>
59
- [log-detective]: https://log-detective.com/
60
- [contact]: https://github.com/fedora-copr
58
+ <i>If this Log Detective report contains harmful content,
59
+ please use the <a href="https://docs.gitlab.com/user/report_abuse/">Gitlab reporting feature for harmful content</a>
60
+ and contact the <a href="https://github.com/fedora-copr/logdetective/issues">Log Detective developers</a>.</i>
@@ -8,7 +8,11 @@ import aiohttp
8
8
  import numpy as np
9
9
  import yaml
10
10
 
11
- from llama_cpp import Llama, CreateCompletionResponse, CreateCompletionStreamResponse
11
+ from llama_cpp import (
12
+ Llama,
13
+ CreateChatCompletionResponse,
14
+ CreateChatCompletionStreamResponse,
15
+ )
12
16
  from logdetective.constants import SNIPPET_DELIMITER
13
17
  from logdetective.models import PromptConfig, SkipSnippets
14
18
  from logdetective.remote_log import RemoteLog
@@ -123,8 +127,8 @@ def compute_certainty(probs: List[Dict]) -> float:
123
127
 
124
128
 
125
129
  def process_log(
126
- log: str, model: Llama, stream: bool, prompt_template: str, temperature: float
127
- ) -> CreateCompletionResponse | Iterator[CreateCompletionStreamResponse]:
130
+ log: str, model: Llama, stream: bool, prompt_templates: PromptConfig, temperature: float
131
+ ) -> CreateChatCompletionResponse | Iterator[CreateChatCompletionStreamResponse]:
128
132
  """Processes a given log using the provided language model and returns its summary.
129
133
 
130
134
  Args:
@@ -136,11 +140,23 @@ def process_log(
136
140
  Returns:
137
141
  str: The summary of the given log generated by the language model.
138
142
  """
139
- response = model(
140
- prompt=prompt_template.format(log),
143
+ messages = [
144
+ {
145
+ "role": "system",
146
+ "content": prompt_templates.default_system_prompt
147
+ },
148
+ {
149
+ "role": "user",
150
+ "content": prompt_templates.prompt_template.format(log)
151
+ },
152
+ ]
153
+
154
+ response = model.create_chat_completion(
155
+ messages=messages,
141
156
  stream=stream,
142
157
  max_tokens=0,
143
- logprobs=1,
158
+ logprobs=True,
159
+ top_logprobs=1,
144
160
  temperature=temperature,
145
161
  )
146
162
 
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "logdetective"
3
- version = "2.5.0"
3
+ version = "2.7.0"
4
4
  description = "Log using LLM AI to search for build/test failures and provide ideas for fixing these."
5
5
  authors = ["Jiri Podivin <jpodivin@gmail.com>"]
6
6
  license = "Apache-2.0"
File without changes