logdetective 2.6.0__py3-none-any.whl → 2.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -174,11 +174,6 @@ async def run(): # pylint: disable=too-many-statements,too-many-locals,too-many
174
174
  log_summary = format_snippets(log_summary)
175
175
  LOG.info("Log summary: \n %s", log_summary)
176
176
 
177
- prompt = (
178
- f"{prompts_configuration.default_system_prompt}\n"
179
- f"{prompts_configuration.prompt_template}"
180
- )
181
-
182
177
  stream = True
183
178
  if args.no_stream:
184
179
  stream = False
@@ -186,30 +181,38 @@ async def run(): # pylint: disable=too-many-statements,too-many-locals,too-many
186
181
  log_summary,
187
182
  model,
188
183
  stream,
189
- prompt_template=prompt,
184
+ prompt_templates=prompts_configuration,
190
185
  temperature=args.temperature,
191
186
  )
192
187
  probs = []
193
188
  print("Explanation:")
194
189
  # We need to extract top token probability from the response
195
- # CreateCompletionResponse structure of llama-cpp-python.
190
+ # CreateChatCompletionResponse structure of llama-cpp-python.
196
191
  # `compute_certainty` function expects list of dictionaries with form
197
192
  # { 'logprob': <float> } as expected from the OpenAI API.
198
193
 
199
194
  if args.no_stream:
200
- print(response["choices"][0]["text"])
195
+ print(response["choices"][0]["message"]["content"])
201
196
  probs = [
202
- {"logprob": e} for e in response["choices"][0]["logprobs"]["token_logprobs"]
197
+ {"logprob": e["logprob"]} for e in response["choices"][0]["logprobs"]["content"]
203
198
  ]
204
199
 
205
200
  else:
206
201
  # Stream the output
207
202
  for chunk in response:
203
+ # What might happen, is that first (or possibly any other) chunk may not contain
204
+ # fields choices[0].delta.content or choices[0].logprobs -> if so, we just skip it
205
+ if any([
206
+ 'content' not in chunk["choices"][0]["delta"],
207
+ 'logprobs' not in chunk["choices"][0]
208
+ ]):
209
+ continue
210
+
208
211
  if isinstance(chunk["choices"][0]["logprobs"], dict):
209
212
  probs.append(
210
- {"logprob": chunk["choices"][0]["logprobs"]["token_logprobs"][0]}
213
+ {"logprob": chunk["choices"][0]["logprobs"]["content"][0]["logprob"]}
211
214
  )
212
- delta = chunk["choices"][0]["text"]
215
+ delta = chunk["choices"][0]["delta"]["content"]
213
216
  print(delta, end="", flush=True)
214
217
  certainty = compute_certainty(probs)
215
218
 
logdetective/utils.py CHANGED
@@ -8,7 +8,11 @@ import aiohttp
8
8
  import numpy as np
9
9
  import yaml
10
10
 
11
- from llama_cpp import Llama, CreateCompletionResponse, CreateCompletionStreamResponse
11
+ from llama_cpp import (
12
+ Llama,
13
+ CreateChatCompletionResponse,
14
+ CreateChatCompletionStreamResponse,
15
+ )
12
16
  from logdetective.constants import SNIPPET_DELIMITER
13
17
  from logdetective.models import PromptConfig, SkipSnippets
14
18
  from logdetective.remote_log import RemoteLog
@@ -123,8 +127,8 @@ def compute_certainty(probs: List[Dict]) -> float:
123
127
 
124
128
 
125
129
  def process_log(
126
- log: str, model: Llama, stream: bool, prompt_template: str, temperature: float
127
- ) -> CreateCompletionResponse | Iterator[CreateCompletionStreamResponse]:
130
+ log: str, model: Llama, stream: bool, prompt_templates: PromptConfig, temperature: float
131
+ ) -> CreateChatCompletionResponse | Iterator[CreateChatCompletionStreamResponse]:
128
132
  """Processes a given log using the provided language model and returns its summary.
129
133
 
130
134
  Args:
@@ -136,11 +140,23 @@ def process_log(
136
140
  Returns:
137
141
  str: The summary of the given log generated by the language model.
138
142
  """
139
- response = model(
140
- prompt=prompt_template.format(log),
143
+ messages = [
144
+ {
145
+ "role": "system",
146
+ "content": prompt_templates.default_system_prompt
147
+ },
148
+ {
149
+ "role": "user",
150
+ "content": prompt_templates.prompt_template.format(log)
151
+ },
152
+ ]
153
+
154
+ response = model.create_chat_completion(
155
+ messages=messages,
141
156
  stream=stream,
142
157
  max_tokens=0,
143
- logprobs=1,
158
+ logprobs=True,
159
+ top_logprobs=1,
144
160
  temperature=temperature,
145
161
  )
146
162
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: logdetective
3
- Version: 2.6.0
3
+ Version: 2.7.0
4
4
  Summary: Log using LLM AI to search for build/test failures and provide ideas for fixing these.
5
5
  License: Apache-2.0
6
6
  License-File: LICENSE
@@ -2,7 +2,7 @@ logdetective/__init__.py,sha256=VqRngDcuFT7JWms8Qc_MsOvajoXVOKPr-S1kqY3Pqhc,59
2
2
  logdetective/constants.py,sha256=aCwrkBrDdS_kbNESK-Z-ewg--DSzodV2OMgwEq3UE38,2456
3
3
  logdetective/drain3.ini,sha256=ni91eCT1TwTznZwcqWoOVMQcGEnWhEDNCoTPF7cfGfY,1360
4
4
  logdetective/extractors.py,sha256=vT-je4NkDgSj9rRtSeLpqBU52gIUnnVgJPHFbVihpCw,5993
5
- logdetective/logdetective.py,sha256=Ck7TL3YvdQG8zniudM8bM51LfTyVW6Ea3BarTjzjWHo,6606
5
+ logdetective/logdetective.py,sha256=S0abGrAQH2oi0MRisCV64Sa1UXdQLIfXFBA4tYAYqhM,6896
6
6
  logdetective/models.py,sha256=uczmQtWFgSp_ZGssngdTM4qzPF1o64dCy0469GoSbjQ,2937
7
7
  logdetective/prompts-summary-first.yml,sha256=kmyMFQmqFXpojkz7p3CyCWCPxMpFLpfDdMGisB4YwL0,808
8
8
  logdetective/prompts-summary-only.yml,sha256=8U9AMJV8ePW-0CoXOXlQoO92DAJDeutIT8ntSkkm6W0,470
@@ -32,9 +32,9 @@ logdetective/server/templates/gitlab_full_comment.md.j2,sha256=hSWEj_a7KZpzfbgoP
32
32
  logdetective/server/templates/gitlab_short_comment.md.j2,sha256=d396HR2DJuS8XLu2FNgVAg1CrOW8_LySQX2f6opOjp8,1957
33
33
  logdetective/server/utils.py,sha256=KiyzzUIVssBc61LhGS0QNC5EY29In3NsG9j58ZRtoNI,4104
34
34
  logdetective/skip_snippets.yml,sha256=reGlhPPCo06nNUJWiC2LY-OJOoPdcyOB7QBTSMeh0eg,487
35
- logdetective/utils.py,sha256=4B9wwaM4tyxLFtRnnTRDcGULJDonp6VoUS8HvUpIeSI,9388
36
- logdetective-2.6.0.dist-info/METADATA,sha256=hLRUiBvOeyWn6t9qkRVEx3nLuNmysSoReXL6aFBECsg,22598
37
- logdetective-2.6.0.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
38
- logdetective-2.6.0.dist-info/entry_points.txt,sha256=3K_vXja6PmcA8sNdUi63WdImeiNhVZcEGPTaoJmltfA,63
39
- logdetective-2.6.0.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
40
- logdetective-2.6.0.dist-info/RECORD,,
35
+ logdetective/utils.py,sha256=_hiRCqFv-CSTTlMKha2uzQ0TX7LZhDcx4ITPqnvGMHo,9718
36
+ logdetective-2.7.0.dist-info/METADATA,sha256=v3XvUztZlLeO2_rKC-VP9g1wej2tvROBiRE8wUaAeSM,22598
37
+ logdetective-2.7.0.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
38
+ logdetective-2.7.0.dist-info/entry_points.txt,sha256=3K_vXja6PmcA8sNdUi63WdImeiNhVZcEGPTaoJmltfA,63
39
+ logdetective-2.7.0.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
40
+ logdetective-2.7.0.dist-info/RECORD,,