logdetective 0.2.9__tar.gz → 0.2.10__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.3
2
2
  Name: logdetective
3
- Version: 0.2.9
3
+ Version: 0.2.10
4
4
  Summary: Log using LLM AI to search for build/test failures and provide ideas for fixing these.
5
5
  License: Apache-2.0
6
6
  Author: Jiri Podivin
@@ -22,7 +22,7 @@ Provides-Extra: server
22
22
  Requires-Dist: drain3 (>=0.9.11,<0.10.0)
23
23
  Requires-Dist: huggingface-hub (>0.23.2)
24
24
  Requires-Dist: llama-cpp-python (>0.2.56,!=0.2.86)
25
- Requires-Dist: numpy (>=1.26.0,<2.0.0)
25
+ Requires-Dist: numpy (>=1.26.0)
26
26
  Requires-Dist: requests (>0.2.31)
27
27
  Project-URL: homepage, https://github.com/fedora-copr/logdetective
28
28
  Project-URL: issues, https://github.com/fedora-copr/logdetective/issues
@@ -16,7 +16,7 @@ class LLMExtractor:
16
16
  A class that extracts relevant information from logs using a language model.
17
17
  """
18
18
  def __init__(self, model: Llama, n_lines: int = 2):
19
- self.model = model
19
+ self.model = model
20
20
  self.n_lines = n_lines
21
21
  self.grammar = LlamaGrammar.from_string(
22
22
  "root ::= (\"Yes\" | \"No\")", verbose=False)
@@ -9,6 +9,7 @@ from logdetective.extractors import LLMExtractor, DrainExtractor
9
9
 
10
10
  LOG = logging.getLogger("logdetective")
11
11
 
12
+
12
13
  def setup_args():
13
14
  """ Setup argument parser and return arguments. """
14
15
  parser = argparse.ArgumentParser("logdetective")
@@ -16,6 +16,7 @@ from logdetective.constants import (
16
16
  from logdetective.extractors import DrainExtractor
17
17
  from logdetective.utils import validate_url, compute_certainty
18
18
 
19
+
19
20
  class BuildLog(BaseModel):
20
21
  """Model of data submitted to API.
21
22
  """
@@ -47,7 +48,6 @@ class StagedResponse(Response):
47
48
 
48
49
  LOG = logging.getLogger("logdetective")
49
50
 
50
-
51
51
  LLM_CPP_HOST = os.environ.get("LLAMA_CPP_HOST", "localhost")
52
52
  LLM_CPP_SERVER_ADDRESS = f"http://{LLM_CPP_HOST}"
53
53
  LLM_CPP_SERVER_PORT = os.environ.get("LLAMA_CPP_SERVER_PORT", 8000)
@@ -55,6 +55,7 @@ LLM_CPP_SERVER_TIMEOUT = os.environ.get("LLAMA_CPP_SERVER_TIMEOUT", 600)
55
55
  LOG_SOURCE_REQUEST_TIMEOUT = os.environ.get("LOG_SOURCE_REQUEST_TIMEOUT", 60)
56
56
  API_TOKEN = os.environ.get("LOGDETECTIVE_TOKEN", None)
57
57
 
58
+
58
59
  def requires_token_when_set(authentication: Annotated[str | None, Header()] = None):
59
60
  """
60
61
  FastAPI Depend function that expects a header named Authentication
@@ -82,6 +83,7 @@ def requires_token_when_set(authentication: Annotated[str | None, Header()] = No
82
83
  API_TOKEN, token)
83
84
  raise HTTPException(status_code=401, detail=f"Token {token} not valid.")
84
85
 
86
+
85
87
  app = FastAPI(dependencies=[Depends(requires_token_when_set)])
86
88
 
87
89
 
@@ -99,7 +101,7 @@ def process_url(url: str) -> str:
99
101
  if not log_request.ok:
100
102
  raise HTTPException(status_code=400,
101
103
  detail="Something went wrong while getting the logs: "
102
- f"[{log_request.status_code}] {log_request.text}")
104
+ f"[{log_request.status_code}] {log_request.text}")
103
105
  else:
104
106
  LOG.error("Invalid URL received ")
105
107
  raise HTTPException(status_code=400,
@@ -120,9 +122,9 @@ def mine_logs(log: str) -> List[str]:
120
122
  LOG.debug("Log summary: \n %s", log_summary)
121
123
  LOG.info("Compression ratio: %s", ratio)
122
124
 
123
-
124
125
  return log_summary
125
126
 
127
+
126
128
  async def submit_text(text: str, max_tokens: int = 0, log_probs: int = 1, stream: bool = False,
127
129
  model: str = "default-model"):
128
130
  """Submit prompt to LLM.
@@ -131,17 +133,17 @@ async def submit_text(text: str, max_tokens: int = 0, log_probs: int = 1, stream
131
133
  """
132
134
  LOG.info("Analyzing the text")
133
135
  data = {
134
- "prompt": text,
135
- "max_tokens": str(max_tokens),
136
- "logprobs": str(log_probs),
137
- "stream": stream,
138
- "model": model}
136
+ "prompt": text,
137
+ "max_tokens": str(max_tokens),
138
+ "logprobs": str(log_probs),
139
+ "stream": stream,
140
+ "model": model}
139
141
 
140
142
  try:
141
143
  # Expects llama-cpp server to run on LLM_CPP_SERVER_ADDRESS:LLM_CPP_SERVER_PORT
142
144
  response = requests.post(
143
145
  f"{LLM_CPP_SERVER_ADDRESS}:{LLM_CPP_SERVER_PORT}/v1/completions",
144
- headers={"Content-Type":"application/json"},
146
+ headers={"Content-Type": "application/json"},
145
147
  data=json.dumps(data),
146
148
  timeout=int(LLM_CPP_SERVER_TIMEOUT),
147
149
  stream=stream)
@@ -154,7 +156,7 @@ async def submit_text(text: str, max_tokens: int = 0, log_probs: int = 1, stream
154
156
  raise HTTPException(
155
157
  status_code=400,
156
158
  detail="Something went wrong while getting a response from the llama server: "
157
- f"[{response.status_code}] {response.text}")
159
+ f"[{response.status_code}] {response.text}")
158
160
  try:
159
161
  response = json.loads(response.text)
160
162
  except UnicodeDecodeError as ex:
@@ -80,6 +80,8 @@ def compute_certainty(probs: List[Dict[str, float] | None]) -> float:
80
80
  In this case it's just a matter of applying inverse operation exp.
81
81
  Of course that leaves you with a value in range <0, 1> so it needs to be multiplied by 100.
82
82
  Simply put, this is the most straightforward way to get the numbers out.
83
+
84
+ This function is used in the server codebase.
83
85
  """
84
86
 
85
87
  top_logprobs = [
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "logdetective"
3
- version = "0.2.9"
3
+ version = "0.2.10"
4
4
  description = "Log using LLM AI to search for build/test failures and provide ideas for fixing these."
5
5
  authors = ["Jiri Podivin <jpodivin@gmail.com>"]
6
6
  license = "Apache-2.0"
@@ -31,7 +31,9 @@ requests = ">0.2.31"
31
31
  llama-cpp-python = ">0.2.56,!=0.2.86"
32
32
  drain3 = "^0.9.11"
33
33
  huggingface-hub = ">0.23.2"
34
- numpy = "^1.26.0"
34
+ # rawhide has numpy 2, F40 and F41 are still on 1.26
35
+ # we need to support both versions
36
+ numpy = ">=1.26.0"
35
37
 
36
38
  [build-system]
37
39
  requires = ["poetry-core"]
File without changes
File without changes