logdetective 0.4.0__py3-none-any.whl → 2.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. logdetective/constants.py +33 -12
  2. logdetective/extractors.py +137 -68
  3. logdetective/logdetective.py +102 -33
  4. logdetective/models.py +99 -0
  5. logdetective/prompts-summary-first.yml +20 -0
  6. logdetective/prompts-summary-only.yml +13 -0
  7. logdetective/prompts.yml +90 -0
  8. logdetective/remote_log.py +67 -0
  9. logdetective/server/compressors.py +186 -0
  10. logdetective/server/config.py +78 -0
  11. logdetective/server/database/base.py +34 -26
  12. logdetective/server/database/models/__init__.py +33 -0
  13. logdetective/server/database/models/exceptions.py +17 -0
  14. logdetective/server/database/models/koji.py +143 -0
  15. logdetective/server/database/models/merge_request_jobs.py +623 -0
  16. logdetective/server/database/models/metrics.py +427 -0
  17. logdetective/server/emoji.py +148 -0
  18. logdetective/server/exceptions.py +37 -0
  19. logdetective/server/gitlab.py +451 -0
  20. logdetective/server/koji.py +159 -0
  21. logdetective/server/llm.py +309 -0
  22. logdetective/server/metric.py +75 -30
  23. logdetective/server/models.py +426 -23
  24. logdetective/server/plot.py +432 -0
  25. logdetective/server/server.py +580 -468
  26. logdetective/server/templates/base_response.html.j2 +59 -0
  27. logdetective/server/templates/gitlab_full_comment.md.j2 +73 -0
  28. logdetective/server/templates/gitlab_short_comment.md.j2 +62 -0
  29. logdetective/server/utils.py +98 -32
  30. logdetective/skip_snippets.yml +12 -0
  31. logdetective/utils.py +187 -73
  32. logdetective-2.11.0.dist-info/METADATA +568 -0
  33. logdetective-2.11.0.dist-info/RECORD +40 -0
  34. {logdetective-0.4.0.dist-info → logdetective-2.11.0.dist-info}/WHEEL +1 -1
  35. logdetective/server/database/models.py +0 -88
  36. logdetective-0.4.0.dist-info/METADATA +0 -333
  37. logdetective-0.4.0.dist-info/RECORD +0 -19
  38. {logdetective-0.4.0.dist-info → logdetective-2.11.0.dist-info}/entry_points.txt +0 -0
  39. {logdetective-0.4.0.dist-info → logdetective-2.11.0.dist-info/licenses}/LICENSE +0 -0
@@ -0,0 +1,568 @@
1
+ Metadata-Version: 2.4
2
+ Name: logdetective
3
+ Version: 2.11.0
4
+ Summary: Log using LLM AI to search for build/test failures and provide ideas for fixing these.
5
+ License: Apache-2.0
6
+ License-File: LICENSE
7
+ Author: Jiri Podivin
8
+ Author-email: jpodivin@gmail.com
9
+ Requires-Python: >=3.11,<4.0
10
+ Classifier: Development Status :: 5 - Production/Stable
11
+ Classifier: Environment :: Console
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: License :: OSI Approved :: Apache Software License
14
+ Classifier: Natural Language :: English
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3.11
17
+ Classifier: Programming Language :: Python :: 3.12
18
+ Classifier: Programming Language :: Python :: 3.13
19
+ Classifier: Programming Language :: Python :: 3.14
20
+ Classifier: Topic :: Internet :: Log Analysis
21
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
22
+ Classifier: Topic :: Software Development :: Debuggers
23
+ Provides-Extra: server
24
+ Provides-Extra: server-testing
25
+ Provides-Extra: testing
26
+ Requires-Dist: aiohttp (>=3.7.4,<4.0.0)
27
+ Requires-Dist: aiolimiter (>=1.0.0,<2.0.0) ; extra == "server"
28
+ Requires-Dist: aioresponses (>=0.7.8,<0.8.0) ; extra == "testing"
29
+ Requires-Dist: alembic (>=1.13.3,<2.0.0) ; extra == "server" or extra == "server-testing"
30
+ Requires-Dist: asciidoc[testing] (>=10.2.1,<11.0.0) ; extra == "testing"
31
+ Requires-Dist: asyncpg (>=0.30.0,<1.0.0) ; extra == "server" or extra == "server-testing"
32
+ Requires-Dist: backoff (==2.2.1) ; extra == "server" or extra == "server-testing"
33
+ Requires-Dist: drain3 (>=0.9.11,<0.10.0)
34
+ Requires-Dist: fastapi (>=0.111.1,<1.0.0) ; extra == "server" or extra == "server-testing"
35
+ Requires-Dist: flexmock (>=0.12.2,<0.13.0) ; extra == "testing"
36
+ Requires-Dist: huggingface-hub (>=0.23.0,<1.4.0)
37
+ Requires-Dist: koji (>=1.35.0,<2.0.0) ; extra == "server" or extra == "server-testing"
38
+ Requires-Dist: llama-cpp-python (>0.2.56,!=0.2.86,<1.0.0)
39
+ Requires-Dist: matplotlib (>=3.8.4,<4.0.0) ; extra == "server" or extra == "server-testing"
40
+ Requires-Dist: numpy (>=1.26.0)
41
+ Requires-Dist: openai (>=1.82.1,<2.0.0) ; extra == "server" or extra == "server-testing"
42
+ Requires-Dist: pydantic (>=2.8.2,<3.0.0)
43
+ Requires-Dist: pytest (>=8.4.1,<9.0.0) ; extra == "testing"
44
+ Requires-Dist: pytest-asyncio (>=1.1.0,<2.0.0) ; extra == "testing"
45
+ Requires-Dist: pytest-cov[testing] (>=7.0.0,<8.0.0) ; extra == "testing"
46
+ Requires-Dist: pytest-mock (>=3.14.1,<4.0.0) ; extra == "server-testing"
47
+ Requires-Dist: python-gitlab (>=4.4.0)
48
+ Requires-Dist: pyyaml (>=6.0.1,<7.0.0)
49
+ Requires-Dist: responses (>=0.25.7,<0.26.0) ; extra == "server-testing"
50
+ Requires-Dist: sentry-sdk[fastapi] (>=2.17.0,<3.0.0) ; extra == "server" or extra == "server-testing"
51
+ Requires-Dist: sqlalchemy (>=2.0.36,<3.0.0) ; extra == "server" or extra == "server-testing"
52
+ Project-URL: homepage, https://github.com/fedora-copr/logdetective
53
+ Project-URL: issues, https://github.com/fedora-copr/logdetective/issues
54
+ Description-Content-Type: text/markdown
55
+
56
+ Log Detective
57
+ =============
58
+
59
+ [![PyPI - Version](https://img.shields.io/pypi/v/logdetective?color=blue)][PyPI Releases]
60
+
61
+ [PyPI Releases]: https://pypi.org/project/logdetective/#history
62
+
63
+ A tool, service and RHEL process integration to analyze logs using a Large Language Model (LLM) and a [Drain template miner](https://github.com/logpai/Drain3).
64
+
65
+ The service that explains logs is available here: https://logdetective.com/explain
66
+
67
+ Note: if you are looking for code of website logdetective.com it is in [github.com/fedora-copr/logdetective-website](https://github.com/fedora-copr/logdetective-website).
68
+
69
+ Installation
70
+ ------------
71
+
72
+ **Fedora 41+**
73
+
74
+ dnf install logdetective
75
+
76
+ **From Pypi repository**
77
+
78
+ The logdetective project is published on the [Pypi repository](https://pypi.org/project/logdetective/). The `pip` tool can be used for installation.
79
+
80
+ First, ensure that the necessary dependencies for the `llama-cpp-python` project are installed. For Fedora, install `gcc-c++`:
81
+
82
+ # for Fedora it will be:
83
+ dnf install gcc-c++
84
+
85
+ Then, install the `logdetective` project using pip:
86
+
87
+ pip install logdetective
88
+
89
+ **Local repository install**
90
+
91
+ Clone this repository and install with pip:
92
+
93
+ pip install .
94
+
95
+ Usage
96
+ -----
97
+
98
+ To analyze a log file, run the script with the following command line arguments:
99
+ - `file` (required): The path or URL of the log file to be analyzed.
100
+ - `--model` (optional, default: "Mistral-7B-Instruct-v0.3-GGUF"): The path or Hugging space name of the language model for analysis. For models from Hugging Face, write them as `namespace/repo_name`. As we are using LLama.cpp we want this to be in the `gguf` format. If the model is already on your machine it will skip the download.
101
+ - `--filename_suffix` (optional, default "Q4_K.gguf"): You can specify which suffix of the file to use. This option is applied when specifying model using the Hugging Face repository.
102
+ - `--summarizer` DISABLED: LLM summarization option was removed. Argument is kept for backward compatibility only.(optional, default: "drain"): Choose between LLM and Drain template miner as the log summarizer. You can also provide the path to an existing language model file instead of using a URL.
103
+ - `--n_lines` DISABLED: LLM summarization option was removed. Argument is kept for backward compatibility only. (optional, default: 8): The number of lines per chunk for LLM analysis. This only makes sense when you are summarizing with LLM.
104
+ - `--n_clusters` (optional, default 8): Number of clusters for Drain to organize log chunks into. This only makes sense when you are summarizing with Drain.
105
+ - `--skip_snippets` Path to patterns for skipping snippets (in YAML).
106
+
107
+ Example usage:
108
+
109
+ logdetective https://example.com/logs.txt
110
+
111
+ Or if the log file is stored locally:
112
+
113
+ logdetective ./data/logs.txt
114
+
115
+ Examples of using different models. Note the use of `--filename_suffix` (or `-F`) option, useful for models that were quantized:
116
+
117
+ logdetective https://example.com/logs.txt --model QuantFactory/Meta-Llama-3-8B-Instruct-GGUF --filename_suffix Q5_K_S.gguf
118
+ logdetective https://kojipkgs.fedoraproject.org//work/tasks/3367/131313367/build.log --model 'fedora-copr/granite-3.2-8b-instruct-GGUF' -F Q4_K_M.gguf
119
+
120
+ Example of altered prompts:
121
+
122
+ cp ~/.local/lib/python3.13/site-packages/logdetective/prompts.yml ~/my-prompts.yml
123
+ vi ~/my-prompts.yml # edit the prompts there to better fit your needs
124
+ logdetective https://kojipkgs.fedoraproject.org//work/tasks/3367/131313367/build.log --prompts ~/my-prompts.yml
125
+
126
+
127
+ Note that streaming with some models (notably Meta-Llama-3) is broken and can be worked around by `no-stream` option:
128
+
129
+ logdetective https://example.com/logs.txt --model QuantFactory/Meta-Llama-3-8B-Instruct-GGUF --filename_suffix Q5_K_M.gguf --no-stream
130
+
131
+ Choice of LLM
132
+ -------------
133
+
134
+ While Log Detective is compatible with a wide range of LLMs, it does require an instruction tuned model to function properly.
135
+
136
+ Whether or not the model has been trained to work with instructions can be determined by examining the model card, or simply by checking if it has `instruct` in its name.
137
+
138
+ When deployed as a server, Log Detective uses `/chat/completions` API as defined by OpenAI. The API must support both `system` and `user` roles, in order to properly work with a system prompt.
139
+
140
+ Configuration fields `system_role` and `user_role` can be used to set role names for APIs with non-standard roles.
141
+
142
+ > **Note:**
143
+ > In cases when no system role is available, it is possible to set both fields to the same value. This will concatenate system and standard prompt.
144
+ > This may have negative impact coherence of response.
145
+
146
+ Real Example
147
+ ------------
148
+
149
+ Let's have a look at a real world example. Log Detective can work with any logs though we optimize it for RPM build logs.
150
+
151
+ We're going to analyze a failed build of a python-based library that happened in Fedora Koji buildsystem:
152
+ ```
153
+ $ logdetective https://kojipkgs.fedoraproject.org//work/tasks/8157/117788157/build.log
154
+ Explanation:
155
+ [Child return code was: 0] : The rpm build process executed successfully without any errors until the 'check' phase.
156
+
157
+ [wamp/test/test_wamp_component_aio.py::test_asyncio_component] : Pytest found
158
+ two tests marked with '@pytest.mark.asyncio' but they are not async functions.
159
+ This warning can be ignored unless the tests are intended to be run
160
+ asynchronously.
161
+
162
+ [wamp/test/test_wamp_component_aio.py::test_asyncio_component_404] : Another
163
+ Pytest warning for the same issue as test_asyncio_component.
164
+
165
+ [-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html] :
166
+ This line is not related to the error, but it is a reminder to refer to Pytest
167
+ documentation for handling warnings.
168
+
169
+ [=========================== short test summary info
170
+ ============================] : This section shows the summary of tests that
171
+ were executed. It shows the number of tests passed, failed, skipped,
172
+ deselected, and warnings.
173
+
174
+ [FAILED wamp/test/test_wamp_cryptosign.py::TestSigVectors::test_vectors] : A
175
+ failed test is reported with the name of the test file, the name of the test
176
+ method, and the name of the test case that failed. In this case,
177
+ TestSigVectors::test_vectors failed.
178
+
179
+ [FAILED
180
+ websocket/test/test_websocket_protocol.py::WebSocketClientProtocolTests::test_auto_ping]
181
+ : Another failed test is reported with the same format as the previous test. In
182
+ this case, it is WebSocketClientProtocolTests::test_auto_ping that failed.
183
+
184
+ [FAILED websocket/test/test_websocket_protocol.py::WebSocketServerProtocolTests::test_interpolate_server_status_template]
185
+ : A third failed test is reported with the same format as the previous tests.
186
+ In this case, it is
187
+ WebSocketServerProtocolTests::test_interpolate_server_status_template that
188
+ failed.
189
+
190
+ [FAILED websocket/test/test_websocket_protocol.py::WebSocketServerProtocolTests::test_sendClose_reason_with_no_code]
191
+ : Another failed test is reported. This time it is
192
+ WebSocketServerProtocolTests::test_sendClose_reason_with_no_code.
193
+
194
+ [FAILED websocket/test/test_websocket_protocol.py::WebSocketServerProtocolTests::test_sendClose_str_reason]
195
+ : Another failed test is reported with the same test file and test method name,
196
+ but a different test case name: test_sendClose_str_reason.
197
+
198
+ [==== 13 failed, 195 passed, 64 skipped, 13 deselected, 2 warnings in 6.55s
199
+ =====] : This is the summary of all tests that were executed, including the
200
+ number of tests that passed, failed, were skipped, deselected, or produced
201
+ warnings. In this case, there were 13 failed tests among a total of 211 tests.
202
+
203
+ [error: Bad exit status from /var/tmp/rpm-tmp.8C0L25 (%check)] : An error
204
+ message is reported indicating that the 'check' phase of the rpm build process
205
+ failed with a bad exit status.
206
+ ```
207
+
208
+ It looks like a wall of text. Similar to any log. The main difference is that here we have the most significant lines of a logfile wrapped in `[ ] : ` and followed by textual explanation of the log text done by mistral 7b.
209
+
210
+
211
+ Contributing
212
+ ============
213
+
214
+ Contributions are welcome! Please submit a pull request if you have any improvements or new features to add. Make sure your changes pass all existing tests before submitting.
215
+ For bigger code changes, please consult us first by creating an issue.
216
+
217
+ We are always looking for more annotated snippets that will increase the quality of Log Detective's results. The contributions happen in our website: https://logdetective.com/
218
+
219
+ Log Detective performs several inference queries while evaluating a log file. Prompts are stored in a separate file (more info below: https://github.com/fedora-copr/logdetective?tab=readme-ov-file#system-prompts). If you have an idea for improvements to our prompts, please open a PR and we'd happy to test it out.
220
+
221
+ To develop Log Detective, you should fork this repository, clone your fork, and install dependencies using pip:
222
+
223
+ git clone https://github.com/yourusername/logdetective.git
224
+ cd logdetective
225
+ pip install .
226
+
227
+ Make changes to the code as needed and run pre-commit.
228
+
229
+ Tests
230
+ -----
231
+
232
+ Tests for code used by server must placed in the `./tests/server/` path, while tests for general
233
+ code must be in the `./tests/base/` path.
234
+
235
+ The [tox](https://github.com/tox-dev/tox) is used to manage tests. Please install `tox` package into your distribution and run:
236
+
237
+ tox
238
+
239
+ This will create a virtual environment with dependencies and run all the tests. For more information follow the tox help.
240
+
241
+ To run only a specific test execute this:
242
+
243
+ tox run -e style # to run flake8
244
+
245
+ or
246
+
247
+ tox run -e lint # to run pylint
248
+
249
+ Tox environments for base and server tests are separate, each installs different dependencies.
250
+
251
+ Running base tests:
252
+
253
+ tox run -e pytest_base
254
+
255
+ Running server tests:
256
+
257
+ tox run -e pytest_server
258
+
259
+ To run server test suite you will need postgresql client utilities.
260
+
261
+ dnf install postgresql
262
+
263
+ Visual Studio Code testing with podman/docker-compose
264
+ -----------------------------------------------------
265
+
266
+ - In `Containerfile`, add `debugpy` as a dependency
267
+
268
+ ```diff
269
+ -RUN pip3 install llama_cpp_python==0.2.85 sse-starlette starlette-context \
270
+ +RUN pip3 install llama_cpp_python==0.2.85 sse-starlette starlette-context debugpy\
271
+ ```
272
+
273
+ - Rebuild server image with new dependencies
274
+
275
+ ```
276
+ make rebuild-server
277
+ ```
278
+
279
+ - Forward debugging port in `docker-compose.yaml` for `server` service.
280
+
281
+ ```diff
282
+ ports:
283
+ - "${LOGDETECTIVE_SERVER_PORT:-8080}:${LOGDETECTIVE_SERVER_PORT:-8080}"
284
+ + - "${VSCODE_DEBUG_PORT:-5678}:${VSCODE_DEBUG_PORT:-5678}"
285
+ ```
286
+
287
+ - Add `debugpy` code in a logdetective file where you want to stop at first.
288
+
289
+ ```diff
290
+ +import debugpy
291
+ +debugpy.listen(("0.0.0.0", 5678))
292
+ +debugpy.wait_for_client()
293
+ ```
294
+
295
+ - Prepare `.vscode/lunch.json` configuration for Visual Studio Code (at least the following configuration is needed)
296
+
297
+ ```json
298
+ {
299
+ "version": "0.2.0",
300
+ "configurations": [
301
+ {
302
+ "name": "Python Debugger: Remote Attach",
303
+ "type": "debugpy",
304
+ "request": "attach",
305
+ "connect": {
306
+ "host": "localhost",
307
+ "port": 5678
308
+ },
309
+ "pathMappings": [
310
+ {
311
+ "localRoot": "${workspaceFolder}",
312
+ "remoteRoot": "/src"
313
+ }
314
+ ]
315
+ }
316
+ ]
317
+ }
318
+ ```
319
+
320
+ - Run the server
321
+
322
+ ```
323
+ podman-compose up server
324
+ ```
325
+
326
+ - Run Visual Stdio Code debug configuration named *Python Debug: Remote Attach*
327
+
328
+ Visual Studio Code CLI debugging
329
+ --------------------------------
330
+
331
+ When debugging the CLI application, the `./scripts/debug_runner.py` script can be used
332
+ as a stand in for stump script created during package installation.
333
+
334
+ Using `launch.json`, or similar alternative, arguments can be specified for testing.
335
+
336
+ Example:
337
+
338
+ ```
339
+ {
340
+ "version": "0.2.0",
341
+ "configurations": [
342
+ {
343
+ "name": "Python: Debug Installed Module",
344
+ "type": "debugpy",
345
+ "request": "launch",
346
+ "console": "integratedTerminal",
347
+ "program": "${workspaceFolder}/scripts/debug_runner.py",
348
+ "args": [<URL_OF_A_LOG>]
349
+ }
350
+ ]
351
+ }
352
+ ```
353
+
354
+ Server
355
+ ======
356
+
357
+ FastApi based server is implemented in `logdetective/server.py`. In order to run it in a development mode,
358
+ simply start llama-cpp-python server with your chosen model as described in llama-cpp-python [docs](https://llama-cpp-python.readthedocs.io/en/latest/server/#running-the-server).
359
+
360
+ Afterwards, start the logdetective server with `fastapi dev logdetective/server.py --port 8080`.
361
+ Requests can then be made with post requests, for example:
362
+
363
+ curl --header "Content-Type: application/json" --request POST --data '{"url":"<YOUR_URL_HERE>"}' http://localhost:8080/analyze
364
+
365
+ For more accurate responses, you can use `/analyze/staged` endpoint. This will submit snippets to model for individual analysis first.
366
+ Afterwards the model outputs are used to construct final prompt. This will take substantially longer, compared to plain `/analyze`
367
+
368
+ curl --header "Content-Type: application/json" --request POST --data '{"url":"<YOUR_URL_HERE>"}' http://localhost:8080/analyze/staged
369
+
370
+ We also have a Containerfile and composefile to run the logdetective server and llama server in containers.
371
+
372
+ Before doing `podman-compose up`, make sure to set `MODELS_PATH` environment variable and point to a directory with your local model files:
373
+ ```
374
+ $ export MODELS_PATH=/path/to/models/
375
+ $ ll $MODELS_PATH
376
+ -rw-r--r--. 1 tt tt 3.9G apr 10 17:18 mistral-7b-instruct-v0.2.Q4_K_S.gguf
377
+ ```
378
+
379
+ If the variable is not set, `./models` is mounted inside by default.
380
+
381
+ Model can be downloaded from [our Hugging Space](https://huggingface.co/fedora-copr) by:
382
+ ```
383
+ $ curl -L -o models/mistral-7b-instruct-v0.3.Q4_K.gguf https://huggingface.co/fedora-copr/Mistral-7B-Instruct-v0.3-GGUF/resolve/main/ggml-model-Q4_K.gguf
384
+ ```
385
+
386
+ Filtering snippet analysis by relevance
387
+ ---------------------------------------
388
+
389
+ When using `/analyze/staged` API, it is possible to enable filtering analyzed snippets by their estimated relavance, submitting only those with highest meansure of relevance for final analysis.
390
+
391
+ **Note**: This feautre requires LLM provider with support for JSON structured output. Smaller models, even though techically capable of providing structured output, may not be able to appropriatelly estimate snippet relevance.
392
+
393
+ Filtering is disabled by default and must be enabled by setting value of `top_k_snippets` field in `general` section of server configuration. Value indicates number of snippets with highest estimated relavance that will be submitted for final analysis.
394
+
395
+ Example:
396
+
397
+ ```
398
+ general:
399
+ devmode: False
400
+ packages:
401
+ - .*
402
+ excluded_packages:
403
+ - ^redhat-internal-.*
404
+ top_k_snippets: 10
405
+ ```
406
+
407
+ If all snippets are rated the same, the filtering is skipped and warning raised in logs.
408
+ Values higher than total number of snippets, as set by `max_clusters` in the `extrator` section of config, also result in filtering being skipped.
409
+
410
+ Generate a new database revision with alembic
411
+ ---------------------------------------------
412
+
413
+ Modify the database models (`logdetective/server/database/model.py).
414
+
415
+ Generate a new database revision with the command:
416
+
417
+ **Warning**: this command will start up a new server
418
+ and shut it down when the operation completes.
419
+
420
+ ```
421
+ CHANGE="A change comment" make alembic-generate-revision
422
+ ```
423
+
424
+ Our production instance
425
+ -----------------------
426
+
427
+ Our FastAPI server and model inference server run through `podman-compose` on an
428
+ Amazon AWS intance. The VM is provisioned by an
429
+ [ansible playbook](https://pagure.io/fedora-infra/ansible/blob/main/f/roles/logdetective/tasks/main.yml).
430
+
431
+ You can control the server through:
432
+
433
+ ```
434
+ cd /root/logdetective
435
+ podman-compose -f docker-compose-prod.yaml ...
436
+ ```
437
+
438
+ The `/root` directory contains valuable data. If moving to a new instance,
439
+ please backup the whole directory and transfer it to the new instance.
440
+
441
+ Fore some reason, we need to manually run this command after every reboot:
442
+
443
+ ```
444
+ nvidia-ctk cdi generate --output=/etc/cdi/nvidia.yaml
445
+ ```
446
+
447
+ HTTPS certificate generated through:
448
+
449
+ ```
450
+ certbot certonly --standalone -d logdetective01.fedorainfracloud.org
451
+ ```
452
+
453
+ Certificates need to be be placed into location specified by the`LOGDETECTIVE_CERTDIR`
454
+ env var and the service should be restarted.
455
+
456
+ Querying statistics
457
+ -------------------
458
+
459
+ You can retrieve statistics about server requests and responses over a specified time period
460
+ using either a browser, the `curl` or the `http` command (provided by the `httpie` package).
461
+
462
+ When no time period is specified, the query defaults to the last 2 days:
463
+
464
+ You can view requests, responses and emojis statistics
465
+ - for the `/analyze` endpoint at http://localhost:8080/metrics/analyze
466
+ - for the `/analyze-staged` endpoint at http://localhost:8080/metrics/analyze-staged.
467
+ - for the requests coming from gitlab: http://localhost:8080/metrics/analyze-gitlab.
468
+
469
+ You can retrieve single svg images at the following endpoints:
470
+ - http://localhost:8080/metrics/analyze/requests
471
+ - http://localhost:8080/metrics/analyze/responses
472
+ - http://localhost:8080/metrics/analyze-staged/requests
473
+ - http://localhost:8080/metrics/analyze-staged/responses
474
+ - http://localhost:8080/metrics/analyze-gitlab/requests
475
+ - http://localhost:8080/metrics/analyze-gitlab/responses
476
+ - http://localhost:8080/metrics/analyze-gitlab/emojis
477
+
478
+ Examples:
479
+
480
+ ```
481
+ http GET "localhost:8080/metrics/analyze/requests" > /tmp/plot.svg
482
+ curl "localhost:8080/metrics/analyze/staged/requests" > /tmp/plot.svg
483
+ ```
484
+
485
+ You can specify the time period in hours, days, or weeks.
486
+ The time period:
487
+ - cannot be less than one hour
488
+ - cannot be negative
489
+ - ends at the current time (when the query is made)
490
+ - starts at the specified time interval before the current time.
491
+
492
+ Examples:
493
+
494
+ ```
495
+ http GET "localhost:8080/metrics/analyze/requests?hours=5" > /tmp/plot_hours.svg
496
+ http GET "localhost:8080/metrics/analyze/requests?days=5" > /tmp/plot_days.svg
497
+ http GET "localhost:8080/metrics/analyze/requests?weeks=5" > /tmp/plot_weeks.svg
498
+ ```
499
+
500
+ System Prompts
501
+ --------------
502
+
503
+ Prompt templates used by Log Detective are stored in the `prompts.yml` file.
504
+ It is possible to modify the file in place, or provide your own.
505
+ In CLI you can override prompt templates location using `--prompts` option,
506
+ while in the container service deployment the `LOGDETECTIVE_PROMPTS` environment variable
507
+ is used instead.
508
+
509
+ Prompts need to have a form compatible with python [format string syntax](https://docs.python.org/3/library/string.html#format-string-syntax)
510
+ with spaces, or replacement fields marked with curly braces, `{}` left for insertion of snippets.
511
+
512
+ Number of replacement fields in new prompts, must be the same as in originals.
513
+ Although their position may be different.
514
+
515
+
516
+ Skip Snippets
517
+ -------------
518
+
519
+ Certain log chunks may not contribute to the analysis of the problem under any circumstances.
520
+ User can specify regular expressions, matching such log chunks, along with simple description,
521
+ using Skip Snippets feature.
522
+
523
+ Patterns to be skipped must be defined yaml file as a dictionary, where key is a description
524
+ and value is a regular expression. For example:
525
+
526
+ ```
527
+ child_exit_code_zero: "Child return code was: 0"
528
+ ```
529
+
530
+ Special care must be taken not to write a regular expression which may match
531
+ too many chunks, or which may be evaluated as data structure by the yaml parser.
532
+
533
+ Example of a valid pattern definition file: `logdetective/skip_patterns.yml`,
534
+ can be used as a starting point and is used as a default if no other definition is provided.
535
+
536
+
537
+ Extracting snippets with csgrep
538
+ -------------------------------
539
+
540
+ When working with logs containing messages from GCC, it can be beneficial to employ
541
+ additional extractor based on `csgrep` tool, to ensure that the messages are kept intact.
542
+ Since `csgrep` is not available as a python package, it must be installed separately,
543
+ with a package manager or from [source](https://github.com/csutils/csdiff).
544
+
545
+ The binary is available as part of `csdiff` package on Fedora.
546
+
547
+ ```
548
+ dnf install csdiff
549
+ ```
550
+
551
+ When working with CLI Log Detective, the csgrep extractor can be activated using option `--csgrep`.
552
+ While in server mode, the `csgrep` field in `extractor` config needs to be set to `true`.
553
+
554
+ ```
555
+ csgrep: true
556
+ ```
557
+
558
+ Both options are disabled by default and error will be produced if the option is used,
559
+ but `csgrep` is not present in the $PATH.
560
+
561
+ The container images are built with `csdiff` installed.
562
+
563
+
564
+ License
565
+ -------
566
+
567
+ This project is licensed under the Apache-2.0 License - see the LICENSE file for details.
568
+
@@ -0,0 +1,40 @@
1
+ logdetective/__init__.py,sha256=VqRngDcuFT7JWms8Qc_MsOvajoXVOKPr-S1kqY3Pqhc,59
2
+ logdetective/constants.py,sha256=aCwrkBrDdS_kbNESK-Z-ewg--DSzodV2OMgwEq3UE38,2456
3
+ logdetective/drain3.ini,sha256=ni91eCT1TwTznZwcqWoOVMQcGEnWhEDNCoTPF7cfGfY,1360
4
+ logdetective/extractors.py,sha256=vT-je4NkDgSj9rRtSeLpqBU52gIUnnVgJPHFbVihpCw,5993
5
+ logdetective/logdetective.py,sha256=S0abGrAQH2oi0MRisCV64Sa1UXdQLIfXFBA4tYAYqhM,6896
6
+ logdetective/models.py,sha256=uczmQtWFgSp_ZGssngdTM4qzPF1o64dCy0469GoSbjQ,2937
7
+ logdetective/prompts-summary-first.yml,sha256=kmyMFQmqFXpojkz7p3CyCWCPxMpFLpfDdMGisB4YwL0,808
8
+ logdetective/prompts-summary-only.yml,sha256=8U9AMJV8ePW-0CoXOXlQoO92DAJDeutIT8ntSkkm6W0,470
9
+ logdetective/prompts.yml,sha256=i3z6Jcb4ScVi7LsxOpDlKiXrcvql3qO_JnLzkAKMn1c,3870
10
+ logdetective/remote_log.py,sha256=28QvdQiy7RBnd86EKCq_A75P21gSNlCbgxJe5XAe9MA,2258
11
+ logdetective/server/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
+ logdetective/server/compressors.py,sha256=y4aFYJ_9CbYdKuAI39Kc9GQSdPN8cSJ2c_VAz3T47EE,5249
13
+ logdetective/server/config.py,sha256=cKUmNCJyNyEid0bPTiUjr8CQuBYBab5bC79Axk2h0z8,2525
14
+ logdetective/server/database/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
+ logdetective/server/database/base.py,sha256=HSV2tgye7iYTDzJD1Q5X7_nlLuTMIFP-hRVQMYxngHQ,2073
16
+ logdetective/server/database/models/__init__.py,sha256=zoZMCt1_7tewDa6eEIIX_xrdN-tLegSiPNg5NiYaV3o,850
17
+ logdetective/server/database/models/exceptions.py,sha256=4ED7FSSA1liV9-7VIN2BwUiz6XlmP97Y1loKnsoNdD8,507
18
+ logdetective/server/database/models/koji.py,sha256=HNWxHYDxf4JN9K2ue8-V8dH-0XY5ZmxqH7Y9lAIbILA,6436
19
+ logdetective/server/database/models/merge_request_jobs.py,sha256=MxiAVKQIsQMbFylBsmYBmVXYvid-4_5mwwXLfWdp6_w,19965
20
+ logdetective/server/database/models/metrics.py,sha256=4xsUdbtlp5PI1-iJQc5Dd8EPDgVVplD9hJRWeRDn43k,15443
21
+ logdetective/server/emoji.py,sha256=zSaYtLpSkpRCXpjMWnHR1bYwkmobMJASZ7YNalrd85U,5274
22
+ logdetective/server/exceptions.py,sha256=WN715KLL3ya6FiZ95v70VSbNuVhGuHFzxm2OeEPWQCw,981
23
+ logdetective/server/gitlab.py,sha256=putpnf8PfGsCZJsqWZA1rMovRGnyagoQmgpKLqtA-aQ,16743
24
+ logdetective/server/koji.py,sha256=LG1pRiKUFvYFRKzgQoUG3pUHfcEwMoaMNjUSMKw_pBA,5640
25
+ logdetective/server/llm.py,sha256=bmA6LsV80OdO60q4WLoKuehuVDEYq-HhBAYcZeLfrv8,10150
26
+ logdetective/server/metric.py,sha256=wLOpgcAch3rwhPA5P2YWUeMNAPsvRGseRjH5HlTb7JM,4529
27
+ logdetective/server/models.py,sha256=AJyycAEEl2o6TH4eAqVMlt5woqAB5M8ze2L575leA_I,19835
28
+ logdetective/server/plot.py,sha256=8LERgY3vQckaHZV2PZfOrZT8CjCAiji57QCmRW24Rfo,14697
29
+ logdetective/server/server.py,sha256=JueU-5c8t9h1CZy4gtoEeT8VSEirpeS0K3wrfqTPvAc,25381
30
+ logdetective/server/templates/base_response.html.j2,sha256=BJGGV_Xb0Lnue8kq32oG9lI5CQDf9vce7HMYsP-Pvb4,2040
31
+ logdetective/server/templates/gitlab_full_comment.md.j2,sha256=4UujUzl3lmdbNEADsxn3HVrjfUiUu2FvUlp9MDFGXQI,2321
32
+ logdetective/server/templates/gitlab_short_comment.md.j2,sha256=2krnMlGqqju2V_6pE0UqUR1P674OFaeX5BMyY5htTOQ,2022
33
+ logdetective/server/utils.py,sha256=0BZ8WmzXNEtkUty1kOyFbBxDZWL0Icc8BUrxuHw9uvs,4015
34
+ logdetective/skip_snippets.yml,sha256=reGlhPPCo06nNUJWiC2LY-OJOoPdcyOB7QBTSMeh0eg,487
35
+ logdetective/utils.py,sha256=yalhySOF_Gzmqx_Ft9qad3TplAfZ6LOmauGXEJfKWiE,9803
36
+ logdetective-2.11.0.dist-info/METADATA,sha256=SdXBkYlSoiVXhgPiM23luYQa0Y_BCX_el_mxTdJc0Zw,23273
37
+ logdetective-2.11.0.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
38
+ logdetective-2.11.0.dist-info/entry_points.txt,sha256=3K_vXja6PmcA8sNdUi63WdImeiNhVZcEGPTaoJmltfA,63
39
+ logdetective-2.11.0.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
40
+ logdetective-2.11.0.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 2.1.1
2
+ Generator: poetry-core 2.2.1
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any