logdetective 2.6.0__tar.gz → 2.7.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {logdetective-2.6.0 → logdetective-2.7.0}/PKG-INFO +1 -1
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/logdetective.py +14 -11
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/utils.py +22 -6
- {logdetective-2.6.0 → logdetective-2.7.0}/pyproject.toml +1 -1
- {logdetective-2.6.0 → logdetective-2.7.0}/LICENSE +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/README.md +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/__init__.py +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/constants.py +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/drain3.ini +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/extractors.py +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/models.py +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/prompts-summary-first.yml +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/prompts-summary-only.yml +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/prompts.yml +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/remote_log.py +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/server/__init__.py +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/server/compressors.py +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/server/config.py +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/server/database/__init__.py +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/server/database/base.py +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/server/database/models/__init__.py +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/server/database/models/exceptions.py +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/server/database/models/koji.py +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/server/database/models/merge_request_jobs.py +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/server/database/models/metrics.py +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/server/emoji.py +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/server/exceptions.py +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/server/gitlab.py +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/server/koji.py +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/server/llm.py +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/server/metric.py +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/server/models.py +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/server/plot.py +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/server/server.py +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/server/templates/base_response.html.j2 +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/server/templates/gitlab_full_comment.md.j2 +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/server/templates/gitlab_short_comment.md.j2 +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/server/utils.py +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective/skip_snippets.yml +0 -0
- {logdetective-2.6.0 → logdetective-2.7.0}/logdetective.1.asciidoc +0 -0
|
@@ -174,11 +174,6 @@ async def run(): # pylint: disable=too-many-statements,too-many-locals,too-many
|
|
|
174
174
|
log_summary = format_snippets(log_summary)
|
|
175
175
|
LOG.info("Log summary: \n %s", log_summary)
|
|
176
176
|
|
|
177
|
-
prompt = (
|
|
178
|
-
f"{prompts_configuration.default_system_prompt}\n"
|
|
179
|
-
f"{prompts_configuration.prompt_template}"
|
|
180
|
-
)
|
|
181
|
-
|
|
182
177
|
stream = True
|
|
183
178
|
if args.no_stream:
|
|
184
179
|
stream = False
|
|
@@ -186,30 +181,38 @@ async def run(): # pylint: disable=too-many-statements,too-many-locals,too-many
|
|
|
186
181
|
log_summary,
|
|
187
182
|
model,
|
|
188
183
|
stream,
|
|
189
|
-
|
|
184
|
+
prompt_templates=prompts_configuration,
|
|
190
185
|
temperature=args.temperature,
|
|
191
186
|
)
|
|
192
187
|
probs = []
|
|
193
188
|
print("Explanation:")
|
|
194
189
|
# We need to extract top token probability from the response
|
|
195
|
-
#
|
|
190
|
+
# CreateChatCompletionResponse structure of llama-cpp-python.
|
|
196
191
|
# `compute_certainty` function expects list of dictionaries with form
|
|
197
192
|
# { 'logprob': <float> } as expected from the OpenAI API.
|
|
198
193
|
|
|
199
194
|
if args.no_stream:
|
|
200
|
-
print(response["choices"][0]["
|
|
195
|
+
print(response["choices"][0]["message"]["content"])
|
|
201
196
|
probs = [
|
|
202
|
-
{"logprob": e} for e in response["choices"][0]["logprobs"]["
|
|
197
|
+
{"logprob": e["logprob"]} for e in response["choices"][0]["logprobs"]["content"]
|
|
203
198
|
]
|
|
204
199
|
|
|
205
200
|
else:
|
|
206
201
|
# Stream the output
|
|
207
202
|
for chunk in response:
|
|
203
|
+
# What might happen, is that first (or possibly any other) chunk may not contain
|
|
204
|
+
# fields choices[0].delta.content or choices[0].logprobs -> if so, we just skip it
|
|
205
|
+
if any([
|
|
206
|
+
'content' not in chunk["choices"][0]["delta"],
|
|
207
|
+
'logprobs' not in chunk["choices"][0]
|
|
208
|
+
]):
|
|
209
|
+
continue
|
|
210
|
+
|
|
208
211
|
if isinstance(chunk["choices"][0]["logprobs"], dict):
|
|
209
212
|
probs.append(
|
|
210
|
-
{"logprob": chunk["choices"][0]["logprobs"]["
|
|
213
|
+
{"logprob": chunk["choices"][0]["logprobs"]["content"][0]["logprob"]}
|
|
211
214
|
)
|
|
212
|
-
delta = chunk["choices"][0]["
|
|
215
|
+
delta = chunk["choices"][0]["delta"]["content"]
|
|
213
216
|
print(delta, end="", flush=True)
|
|
214
217
|
certainty = compute_certainty(probs)
|
|
215
218
|
|
|
@@ -8,7 +8,11 @@ import aiohttp
|
|
|
8
8
|
import numpy as np
|
|
9
9
|
import yaml
|
|
10
10
|
|
|
11
|
-
from llama_cpp import
|
|
11
|
+
from llama_cpp import (
|
|
12
|
+
Llama,
|
|
13
|
+
CreateChatCompletionResponse,
|
|
14
|
+
CreateChatCompletionStreamResponse,
|
|
15
|
+
)
|
|
12
16
|
from logdetective.constants import SNIPPET_DELIMITER
|
|
13
17
|
from logdetective.models import PromptConfig, SkipSnippets
|
|
14
18
|
from logdetective.remote_log import RemoteLog
|
|
@@ -123,8 +127,8 @@ def compute_certainty(probs: List[Dict]) -> float:
|
|
|
123
127
|
|
|
124
128
|
|
|
125
129
|
def process_log(
|
|
126
|
-
log: str, model: Llama, stream: bool,
|
|
127
|
-
) ->
|
|
130
|
+
log: str, model: Llama, stream: bool, prompt_templates: PromptConfig, temperature: float
|
|
131
|
+
) -> CreateChatCompletionResponse | Iterator[CreateChatCompletionStreamResponse]:
|
|
128
132
|
"""Processes a given log using the provided language model and returns its summary.
|
|
129
133
|
|
|
130
134
|
Args:
|
|
@@ -136,11 +140,23 @@ def process_log(
|
|
|
136
140
|
Returns:
|
|
137
141
|
str: The summary of the given log generated by the language model.
|
|
138
142
|
"""
|
|
139
|
-
|
|
140
|
-
|
|
143
|
+
messages = [
|
|
144
|
+
{
|
|
145
|
+
"role": "system",
|
|
146
|
+
"content": prompt_templates.default_system_prompt
|
|
147
|
+
},
|
|
148
|
+
{
|
|
149
|
+
"role": "user",
|
|
150
|
+
"content": prompt_templates.prompt_template.format(log)
|
|
151
|
+
},
|
|
152
|
+
]
|
|
153
|
+
|
|
154
|
+
response = model.create_chat_completion(
|
|
155
|
+
messages=messages,
|
|
141
156
|
stream=stream,
|
|
142
157
|
max_tokens=0,
|
|
143
|
-
logprobs=
|
|
158
|
+
logprobs=True,
|
|
159
|
+
top_logprobs=1,
|
|
144
160
|
temperature=temperature,
|
|
145
161
|
)
|
|
146
162
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{logdetective-2.6.0 → logdetective-2.7.0}/logdetective/server/database/models/merge_request_jobs.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{logdetective-2.6.0 → logdetective-2.7.0}/logdetective/server/templates/base_response.html.j2
RENAMED
|
File without changes
|
{logdetective-2.6.0 → logdetective-2.7.0}/logdetective/server/templates/gitlab_full_comment.md.j2
RENAMED
|
File without changes
|
{logdetective-2.6.0 → logdetective-2.7.0}/logdetective/server/templates/gitlab_short_comment.md.j2
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|