logdetective 1.5.0__py3-none-any.whl → 1.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- logdetective/constants.py +1 -1
- logdetective/extractors.py +23 -10
- logdetective/logdetective.py +18 -2
- logdetective/models.py +32 -1
- logdetective/remote_log.py +1 -1
- logdetective/server/config.py +9 -1
- logdetective/server/database/models/__init__.py +12 -0
- logdetective/server/database/models/exceptions.py +13 -0
- logdetective/server/database/models/koji.py +126 -0
- logdetective/server/database/models/merge_request_jobs.py +11 -10
- logdetective/server/database/models/metrics.py +1 -0
- logdetective/server/emoji.py +22 -12
- logdetective/server/exceptions.py +33 -0
- logdetective/server/gitlab.py +1 -4
- logdetective/server/koji.py +167 -0
- logdetective/server/llm.py +11 -2
- logdetective/server/metric.py +10 -10
- logdetective/server/models.py +91 -2
- logdetective/server/plot.py +36 -35
- logdetective/server/server.py +192 -2
- logdetective/server/templates/gitlab_full_comment.md.j2 +3 -1
- logdetective/server/templates/gitlab_short_comment.md.j2 +3 -1
- logdetective/skip_snippets.yml +12 -0
- logdetective/utils.py +25 -1
- {logdetective-1.5.0.dist-info → logdetective-1.7.0.dist-info}/METADATA +25 -2
- logdetective-1.7.0.dist-info/RECORD +38 -0
- logdetective-1.5.0.dist-info/RECORD +0 -33
- {logdetective-1.5.0.dist-info → logdetective-1.7.0.dist-info}/LICENSE +0 -0
- {logdetective-1.5.0.dist-info → logdetective-1.7.0.dist-info}/WHEEL +0 -0
- {logdetective-1.5.0.dist-info → logdetective-1.7.0.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,167 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import re
|
|
3
|
+
from typing import Any, Callable, Optional
|
|
4
|
+
|
|
5
|
+
import backoff
|
|
6
|
+
import koji
|
|
7
|
+
from logdetective.server.config import LOG
|
|
8
|
+
from logdetective.server.exceptions import (
|
|
9
|
+
KojiInvalidTaskID,
|
|
10
|
+
LogDetectiveConnectionError,
|
|
11
|
+
LogsMissingError,
|
|
12
|
+
LogsTooLargeError,
|
|
13
|
+
UnknownTaskType,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
FAILURE_LOG_REGEX = re.compile(r"(\w*\.log)")
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def connection_error_giveup(details: backoff._typing.Details) -> None:
|
|
21
|
+
"""
|
|
22
|
+
Too many connection errors, give up.
|
|
23
|
+
"""
|
|
24
|
+
LOG.error("Too many connection errors, giving up. %s", details["exception"])
|
|
25
|
+
raise LogDetectiveConnectionError() from details["exception"]
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@backoff.on_exception(
|
|
29
|
+
backoff.expo,
|
|
30
|
+
koji.GenericError,
|
|
31
|
+
max_time=60,
|
|
32
|
+
)
|
|
33
|
+
async def call_koji(func: Callable, *args, **kwargs) -> Any:
|
|
34
|
+
"""
|
|
35
|
+
Call a Koji function asynchronously.
|
|
36
|
+
|
|
37
|
+
Automatically retries on connection errors.
|
|
38
|
+
"""
|
|
39
|
+
try:
|
|
40
|
+
result = await asyncio.to_thread(func, *args, **kwargs)
|
|
41
|
+
except koji.ActionNotAllowed as e:
|
|
42
|
+
# User doesn't have permission to do this, don't retry.
|
|
43
|
+
raise LogDetectiveConnectionError(e) from e
|
|
44
|
+
return result
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
async def get_failed_subtask_info(
|
|
48
|
+
koji_session: koji.ClientSession, task_id: int
|
|
49
|
+
) -> dict[str, Any]:
|
|
50
|
+
"""
|
|
51
|
+
If the provided task ID represents a task of type "build", this function
|
|
52
|
+
will return the buildArch or rebuildSRPM subtask that failed. If there is
|
|
53
|
+
more than one, it will return the first one found from the following
|
|
54
|
+
ordered list of processor architectures. If none is found among those
|
|
55
|
+
architectures, it will return the first failed architecture after a
|
|
56
|
+
standard sort.
|
|
57
|
+
* x86_64
|
|
58
|
+
* aarch64
|
|
59
|
+
* riscv
|
|
60
|
+
* ppc64le
|
|
61
|
+
* s390x
|
|
62
|
+
|
|
63
|
+
If the provided task ID represents a task of type "buildArch" or
|
|
64
|
+
"buildSRPMFromSCM" and has a task state of "FAILED", it will be returned
|
|
65
|
+
directly.
|
|
66
|
+
|
|
67
|
+
Any other task type will rase the UnknownTaskType exception.
|
|
68
|
+
|
|
69
|
+
If no task or subtask of the provided task is in the task state "FAILED",
|
|
70
|
+
this function will raise a NoFailedSubtask exception.
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
# Look up the current task first and check its type.
|
|
74
|
+
taskinfo = await call_koji(koji_session.getTaskInfo, task_id)
|
|
75
|
+
if not taskinfo:
|
|
76
|
+
raise KojiInvalidTaskID(f"Task {task_id} does not exist.")
|
|
77
|
+
|
|
78
|
+
# If the parent isn't FAILED, the children probably aren't either.
|
|
79
|
+
# There's one special case where the user may have canceled the
|
|
80
|
+
# overall task when one arch failed, so we should check that situation
|
|
81
|
+
# too.
|
|
82
|
+
if (
|
|
83
|
+
taskinfo["state"] != koji.TASK_STATES["FAILED"]
|
|
84
|
+
and taskinfo["state"] != koji.TASK_STATES["CANCELED"] # noqa: W503 flake vs lint
|
|
85
|
+
):
|
|
86
|
+
raise UnknownTaskType(f"The primary task state was {taskinfo['state']}.")
|
|
87
|
+
|
|
88
|
+
# If the task is buildArch or buildSRPMFromSCM, we can return it directly.
|
|
89
|
+
if taskinfo["method"] in ["buildArch", "buildSRPMFromSCM"]:
|
|
90
|
+
return taskinfo
|
|
91
|
+
|
|
92
|
+
# Look up the subtasks for the task.
|
|
93
|
+
response = await asyncio.to_thread(koji_session.getTaskDescendents, task_id)
|
|
94
|
+
subtasks = response[f"{task_id}"]
|
|
95
|
+
arch_tasks = {}
|
|
96
|
+
for subtask in subtasks:
|
|
97
|
+
if (
|
|
98
|
+
subtask["method"] not in ["buildArch", "buildSRPMFromSCM"]
|
|
99
|
+
or subtask["state"] != koji.TASK_STATES["FAILED"] # noqa: W503 flake vs lint
|
|
100
|
+
):
|
|
101
|
+
# Skip over any completed subtasks or non-build types
|
|
102
|
+
continue
|
|
103
|
+
|
|
104
|
+
arch_tasks[subtask["arch"]] = subtask
|
|
105
|
+
|
|
106
|
+
# Return the first architecture in the order of preference.
|
|
107
|
+
for arch in ["x86_64", "aarch64", "riscv", "ppc64le", "s390x"]:
|
|
108
|
+
if arch in arch_tasks:
|
|
109
|
+
return arch_tasks[arch]
|
|
110
|
+
|
|
111
|
+
# If none of those architectures were found, return the first one
|
|
112
|
+
# alphabetically
|
|
113
|
+
return arch_tasks[sorted(arch_tasks.keys())[0]]
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
async def get_failed_log_from_task(
|
|
117
|
+
koji_session: koji.ClientSession, task_id: int, max_size: int
|
|
118
|
+
) -> Optional[tuple[str, str]]:
|
|
119
|
+
"""
|
|
120
|
+
Get the failed log from a task.
|
|
121
|
+
|
|
122
|
+
If the log is too large, this function will raise a LogsTooLargeError.
|
|
123
|
+
If the log is missing or garbage-collected, this function will raise a
|
|
124
|
+
LogsMissingError.
|
|
125
|
+
"""
|
|
126
|
+
taskinfo = await get_failed_subtask_info(koji_session, task_id)
|
|
127
|
+
|
|
128
|
+
# Read the failure reason from the task. Note that the taskinfo returned
|
|
129
|
+
# above may not be the same as passed in, so we need to use taskinfo["id"]
|
|
130
|
+
# to look up the correct failure reason.
|
|
131
|
+
result = await call_koji(
|
|
132
|
+
koji_session.getTaskResult, taskinfo["id"], raise_fault=False
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
# Examine the result message for the appropriate log file.
|
|
136
|
+
match = FAILURE_LOG_REGEX.search(result["faultString"])
|
|
137
|
+
if match:
|
|
138
|
+
failure_log_name = match.group(1)
|
|
139
|
+
else:
|
|
140
|
+
# The best thing we can do at this point is return the
|
|
141
|
+
# task_failed.log, since it will probably contain the most
|
|
142
|
+
# relevant information
|
|
143
|
+
return result["faultString"]
|
|
144
|
+
|
|
145
|
+
# Check that the size of the log file is not enormous
|
|
146
|
+
task_output = await call_koji(
|
|
147
|
+
koji_session.listTaskOutput, taskinfo["id"], stat=True
|
|
148
|
+
)
|
|
149
|
+
if not task_output:
|
|
150
|
+
# If the task has been garbage-collected, the task output will be empty
|
|
151
|
+
raise LogsMissingError(
|
|
152
|
+
"No logs attached to this task. Possibly garbage-collected."
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
if failure_log_name not in task_output:
|
|
156
|
+
# This shouldn't be possible, but we'll check anyway.
|
|
157
|
+
raise LogsMissingError(f"{failure_log_name} could not be located")
|
|
158
|
+
|
|
159
|
+
if int(task_output[failure_log_name]["st_size"]) > max_size:
|
|
160
|
+
raise LogsTooLargeError(
|
|
161
|
+
f"{task_output[failure_log_name]['st_size']} exceeds max size {max_size}"
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
log_contents = await call_koji(
|
|
165
|
+
koji_session.downloadTaskOutput, taskinfo["id"], failure_log_name
|
|
166
|
+
)
|
|
167
|
+
return failure_log_name, log_contents.decode("utf-8")
|
logdetective/server/llm.py
CHANGED
|
@@ -16,7 +16,13 @@ from logdetective.utils import (
|
|
|
16
16
|
compute_certainty,
|
|
17
17
|
prompt_to_messages,
|
|
18
18
|
)
|
|
19
|
-
from logdetective.server.config import
|
|
19
|
+
from logdetective.server.config import (
|
|
20
|
+
LOG,
|
|
21
|
+
SERVER_CONFIG,
|
|
22
|
+
PROMPT_CONFIG,
|
|
23
|
+
CLIENT,
|
|
24
|
+
SKIP_SNIPPETS_CONFIG,
|
|
25
|
+
)
|
|
20
26
|
from logdetective.server.models import (
|
|
21
27
|
AnalyzedSnippet,
|
|
22
28
|
InferenceConfig,
|
|
@@ -42,7 +48,10 @@ def format_analyzed_snippets(snippets: list[AnalyzedSnippet]) -> str:
|
|
|
42
48
|
def mine_logs(log: str) -> List[Tuple[int, str]]:
|
|
43
49
|
"""Extract snippets from log text"""
|
|
44
50
|
extractor = DrainExtractor(
|
|
45
|
-
verbose=True,
|
|
51
|
+
verbose=True,
|
|
52
|
+
context=True,
|
|
53
|
+
max_clusters=SERVER_CONFIG.extractor.max_clusters,
|
|
54
|
+
skip_snippets=SKIP_SNIPPETS_CONFIG,
|
|
46
55
|
)
|
|
47
56
|
|
|
48
57
|
LOG.info("Getting summary")
|
logdetective/server/metric.py
CHANGED
|
@@ -2,7 +2,7 @@ import io
|
|
|
2
2
|
import inspect
|
|
3
3
|
import datetime
|
|
4
4
|
|
|
5
|
-
from typing import Union
|
|
5
|
+
from typing import Optional, Union
|
|
6
6
|
from functools import wraps
|
|
7
7
|
|
|
8
8
|
import aiohttp
|
|
@@ -17,10 +17,10 @@ from logdetective.server.database.models import EndpointType, AnalyzeRequestMetr
|
|
|
17
17
|
|
|
18
18
|
async def add_new_metrics(
|
|
19
19
|
api_name: str,
|
|
20
|
-
url: str,
|
|
21
|
-
http_session: aiohttp.ClientSession,
|
|
22
|
-
received_at: datetime.datetime = None,
|
|
23
|
-
compressed_log_content: io.BytesIO = None,
|
|
20
|
+
url: Optional[str] = None,
|
|
21
|
+
http_session: Optional[aiohttp.ClientSession] = None,
|
|
22
|
+
received_at: Optional[datetime.datetime] = None,
|
|
23
|
+
compressed_log_content: Optional[io.BytesIO] = None,
|
|
24
24
|
) -> int:
|
|
25
25
|
"""Add a new database entry for a received request.
|
|
26
26
|
|
|
@@ -28,10 +28,10 @@ async def add_new_metrics(
|
|
|
28
28
|
the endpoint from where the request was received,
|
|
29
29
|
and the log (in a zip format) for which analysis is requested.
|
|
30
30
|
"""
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
compressed_log_content
|
|
34
|
-
|
|
31
|
+
if not compressed_log_content:
|
|
32
|
+
remote_log = RemoteLog(url, http_session)
|
|
33
|
+
compressed_log_content = await RemoteLogCompressor(remote_log).zip_content()
|
|
34
|
+
|
|
35
35
|
return AnalyzeRequestMetrics.create(
|
|
36
36
|
endpoint=EndpointType(api_name),
|
|
37
37
|
compressed_log=compressed_log_content,
|
|
@@ -44,7 +44,7 @@ async def add_new_metrics(
|
|
|
44
44
|
def update_metrics(
|
|
45
45
|
metrics_id: int,
|
|
46
46
|
response: Union[models.Response, models.StagedResponse, StreamingResponse],
|
|
47
|
-
sent_at: datetime.datetime = None,
|
|
47
|
+
sent_at: Optional[datetime.datetime] = None,
|
|
48
48
|
) -> None:
|
|
49
49
|
"""Update a database metric entry for a received request,
|
|
50
50
|
filling data for the given response.
|
logdetective/server/models.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import asyncio
|
|
2
|
+
from collections import defaultdict
|
|
2
3
|
import datetime
|
|
3
4
|
from logging import BASIC_FORMAT
|
|
4
5
|
from typing import List, Dict, Optional
|
|
@@ -15,6 +16,7 @@ import aiohttp
|
|
|
15
16
|
|
|
16
17
|
from aiolimiter import AsyncLimiter
|
|
17
18
|
from gitlab import Gitlab
|
|
19
|
+
import koji
|
|
18
20
|
|
|
19
21
|
from logdetective.constants import (
|
|
20
22
|
DEFAULT_TEMPERATURE,
|
|
@@ -132,6 +134,17 @@ class StagedResponse(Response):
|
|
|
132
134
|
snippets: List[AnalyzedSnippet]
|
|
133
135
|
|
|
134
136
|
|
|
137
|
+
class KojiStagedResponse(BaseModel):
|
|
138
|
+
"""Model of data returned by Log Detective API when called when a Koji build
|
|
139
|
+
analysis is requested. Contains list of reponses to prompts for individual
|
|
140
|
+
snippets.
|
|
141
|
+
"""
|
|
142
|
+
|
|
143
|
+
task_id: int
|
|
144
|
+
log_file_name: str
|
|
145
|
+
response: StagedResponse
|
|
146
|
+
|
|
147
|
+
|
|
135
148
|
class InferenceConfig(BaseModel): # pylint: disable=too-many-instance-attributes
|
|
136
149
|
"""Model for inference configuration of logdetective server."""
|
|
137
150
|
|
|
@@ -247,7 +260,7 @@ class GitLabInstanceConfig(BaseModel): # pylint: disable=too-many-instance-attr
|
|
|
247
260
|
_http_session: aiohttp.ClientSession = None
|
|
248
261
|
|
|
249
262
|
# Maximum size of artifacts.zip in MiB. (default: 300 MiB)
|
|
250
|
-
max_artifact_size: int = 300
|
|
263
|
+
max_artifact_size: int = 300 * 1024 * 1024
|
|
251
264
|
|
|
252
265
|
def __init__(self, name: str, data: Optional[dict] = None):
|
|
253
266
|
super().__init__()
|
|
@@ -259,7 +272,7 @@ class GitLabInstanceConfig(BaseModel): # pylint: disable=too-many-instance-attr
|
|
|
259
272
|
self.api_path = data.get("api_path", "/api/v4")
|
|
260
273
|
self.api_token = data.get("api_token", None)
|
|
261
274
|
self.webhook_secrets = data.get("webhook_secrets", None)
|
|
262
|
-
self.max_artifact_size = int(data.get("max_artifact_size")) * 1024 * 1024
|
|
275
|
+
self.max_artifact_size = int(data.get("max_artifact_size", 300)) * 1024 * 1024
|
|
263
276
|
|
|
264
277
|
self.timeout = data.get("timeout", 5.0)
|
|
265
278
|
self._conn = Gitlab(
|
|
@@ -323,6 +336,80 @@ class GitLabConfig(BaseModel):
|
|
|
323
336
|
self.instances[instance.url] = instance
|
|
324
337
|
|
|
325
338
|
|
|
339
|
+
class KojiInstanceConfig(BaseModel):
|
|
340
|
+
"""Model for Koji configuration of logdetective server."""
|
|
341
|
+
|
|
342
|
+
name: str = ""
|
|
343
|
+
xmlrpc_url: str = ""
|
|
344
|
+
tokens: List[str] = []
|
|
345
|
+
|
|
346
|
+
_conn: Optional[koji.ClientSession] = None
|
|
347
|
+
_callbacks: defaultdict[int, set[str]] = defaultdict(set)
|
|
348
|
+
|
|
349
|
+
def __init__(self, name: str, data: Optional[dict] = None):
|
|
350
|
+
super().__init__()
|
|
351
|
+
|
|
352
|
+
self.name = name
|
|
353
|
+
if data is None:
|
|
354
|
+
# Set some reasonable defaults
|
|
355
|
+
self.xmlrpc_url = "https://koji.fedoraproject.org/kojihub"
|
|
356
|
+
self.tokens = []
|
|
357
|
+
self.max_artifact_size = 1024 * 1024
|
|
358
|
+
return
|
|
359
|
+
|
|
360
|
+
self.xmlrpc_url = data.get(
|
|
361
|
+
"xmlrpc_url", "https://koji.fedoraproject.org/kojihub"
|
|
362
|
+
)
|
|
363
|
+
self.tokens = data.get("tokens", [])
|
|
364
|
+
|
|
365
|
+
def get_connection(self):
|
|
366
|
+
"""Get the Koji connection object"""
|
|
367
|
+
if not self._conn:
|
|
368
|
+
self._conn = koji.ClientSession(self.xmlrpc_url)
|
|
369
|
+
return self._conn
|
|
370
|
+
|
|
371
|
+
def register_callback(self, task_id: int, callback: str):
|
|
372
|
+
"""Register a callback for a task"""
|
|
373
|
+
self._callbacks[task_id].add(callback)
|
|
374
|
+
|
|
375
|
+
def clear_callbacks(self, task_id: int):
|
|
376
|
+
"""Unregister a callback for a task"""
|
|
377
|
+
try:
|
|
378
|
+
del self._callbacks[task_id]
|
|
379
|
+
except KeyError:
|
|
380
|
+
pass
|
|
381
|
+
|
|
382
|
+
def get_callbacks(self, task_id: int) -> set[str]:
|
|
383
|
+
"""Get the callbacks for a task"""
|
|
384
|
+
return self._callbacks[task_id]
|
|
385
|
+
|
|
386
|
+
|
|
387
|
+
class KojiConfig(BaseModel):
|
|
388
|
+
"""Model for Koji configuration of logdetective server."""
|
|
389
|
+
|
|
390
|
+
instances: Dict[str, KojiInstanceConfig] = {}
|
|
391
|
+
analysis_timeout: int = 15
|
|
392
|
+
max_artifact_size: int = 300 * 1024 * 1024
|
|
393
|
+
|
|
394
|
+
def __init__(self, data: Optional[dict] = None):
|
|
395
|
+
super().__init__()
|
|
396
|
+
if data is None:
|
|
397
|
+
return
|
|
398
|
+
|
|
399
|
+
# Handle analysis_timeout with default 15
|
|
400
|
+
self.analysis_timeout = data.get("analysis_timeout", 15)
|
|
401
|
+
|
|
402
|
+
# Handle max_artifact_size with default 300
|
|
403
|
+
self.max_artifact_size = data.get("max_artifact_size", 300) * 1024 * 1024
|
|
404
|
+
|
|
405
|
+
# Handle instances dictionary
|
|
406
|
+
instances_data = data.get("instances", {})
|
|
407
|
+
for instance_name, instance_data in instances_data.items():
|
|
408
|
+
self.instances[instance_name] = KojiInstanceConfig(
|
|
409
|
+
instance_name, instance_data
|
|
410
|
+
)
|
|
411
|
+
|
|
412
|
+
|
|
326
413
|
class LogConfig(BaseModel):
|
|
327
414
|
"""Logging configuration"""
|
|
328
415
|
|
|
@@ -375,6 +462,7 @@ class Config(BaseModel):
|
|
|
375
462
|
snippet_inference: InferenceConfig = InferenceConfig()
|
|
376
463
|
extractor: ExtractorConfig = ExtractorConfig()
|
|
377
464
|
gitlab: GitLabConfig = GitLabConfig()
|
|
465
|
+
koji: KojiConfig = KojiConfig()
|
|
378
466
|
general: GeneralConfig = GeneralConfig()
|
|
379
467
|
|
|
380
468
|
def __init__(self, data: Optional[dict] = None):
|
|
@@ -387,6 +475,7 @@ class Config(BaseModel):
|
|
|
387
475
|
self.inference = InferenceConfig(data.get("inference"))
|
|
388
476
|
self.extractor = ExtractorConfig(data.get("extractor"))
|
|
389
477
|
self.gitlab = GitLabConfig(data.get("gitlab"))
|
|
478
|
+
self.koji = KojiConfig(data.get("koji"))
|
|
390
479
|
self.general = GeneralConfig(data.get("general"))
|
|
391
480
|
|
|
392
481
|
if snippet_inference := data.get("snippet_inference", None):
|
logdetective/server/plot.py
CHANGED
|
@@ -2,12 +2,10 @@ import datetime
|
|
|
2
2
|
from typing import Optional, Union, Dict
|
|
3
3
|
|
|
4
4
|
import numpy
|
|
5
|
-
import
|
|
6
|
-
import
|
|
7
|
-
import matplotlib.pyplot
|
|
5
|
+
from numpy.typing import ArrayLike
|
|
6
|
+
from matplotlib import dates, colormaps, axes, pyplot, figure
|
|
8
7
|
|
|
9
|
-
from
|
|
10
|
-
from logdetective.server import models
|
|
8
|
+
from logdetective.server.models import TimePeriod
|
|
11
9
|
from logdetective.server.database.models import (
|
|
12
10
|
AnalyzeRequestMetrics,
|
|
13
11
|
EndpointType,
|
|
@@ -18,25 +16,25 @@ from logdetective.server.database.models import (
|
|
|
18
16
|
class Definition:
|
|
19
17
|
"""Define plot details, given a time period."""
|
|
20
18
|
|
|
21
|
-
def __init__(self, time_period:
|
|
19
|
+
def __init__(self, time_period: TimePeriod):
|
|
22
20
|
self.time_period = time_period
|
|
23
21
|
self.days_diff = time_period.get_time_period().days
|
|
24
22
|
if self.time_period.hours:
|
|
25
23
|
self._freq = "H"
|
|
26
24
|
self._time_format = "%Y-%m-%d %H"
|
|
27
|
-
self._locator =
|
|
25
|
+
self._locator = dates.HourLocator(interval=2)
|
|
28
26
|
self._time_unit = "hour"
|
|
29
27
|
self._time_delta = datetime.timedelta(hours=1)
|
|
30
28
|
elif self.time_period.days:
|
|
31
29
|
self._freq = "D"
|
|
32
30
|
self._time_format = "%Y-%m-%d"
|
|
33
|
-
self._locator =
|
|
31
|
+
self._locator = dates.DayLocator(interval=1)
|
|
34
32
|
self._time_unit = "day"
|
|
35
33
|
self._time_delta = datetime.timedelta(days=1)
|
|
36
34
|
elif self.time_period.weeks:
|
|
37
35
|
self._freq = "W"
|
|
38
36
|
self._time_format = "%Y-%m-%d"
|
|
39
|
-
self._locator =
|
|
37
|
+
self._locator = dates.WeekdayLocator(interval=1)
|
|
40
38
|
self._time_unit = "week"
|
|
41
39
|
self._time_delta = datetime.timedelta(weeks=1)
|
|
42
40
|
|
|
@@ -120,10 +118,10 @@ def create_time_series_arrays(
|
|
|
120
118
|
|
|
121
119
|
|
|
122
120
|
def _add_bar_chart(
|
|
123
|
-
ax:
|
|
121
|
+
ax: axes.Axes,
|
|
124
122
|
plot_def: Definition,
|
|
125
|
-
timestamps:
|
|
126
|
-
values:
|
|
123
|
+
timestamps: ArrayLike,
|
|
124
|
+
values: ArrayLike,
|
|
127
125
|
label: str,
|
|
128
126
|
) -> None:
|
|
129
127
|
"""Add a blue bar chart"""
|
|
@@ -142,18 +140,18 @@ def _add_bar_chart(
|
|
|
142
140
|
ax.set_ylabel(label, color="blue")
|
|
143
141
|
ax.tick_params(axis="y", labelcolor="blue")
|
|
144
142
|
|
|
145
|
-
ax.xaxis.set_major_formatter(
|
|
143
|
+
ax.xaxis.set_major_formatter(dates.DateFormatter(plot_def.time_format))
|
|
146
144
|
ax.xaxis.set_major_locator(plot_def.locator)
|
|
147
145
|
|
|
148
|
-
|
|
146
|
+
pyplot.xticks(rotation=45)
|
|
149
147
|
|
|
150
148
|
ax.grid(True, alpha=0.3)
|
|
151
149
|
|
|
152
150
|
|
|
153
151
|
def _add_line_chart( # pylint: disable=too-many-arguments disable=too-many-positional-arguments
|
|
154
|
-
ax:
|
|
155
|
-
timestamps:
|
|
156
|
-
values:
|
|
152
|
+
ax: axes.Axes,
|
|
153
|
+
timestamps: ArrayLike,
|
|
154
|
+
values: ArrayLike,
|
|
157
155
|
label: str,
|
|
158
156
|
color: str = "red",
|
|
159
157
|
set_label: bool = True,
|
|
@@ -166,10 +164,10 @@ def _add_line_chart( # pylint: disable=too-many-arguments disable=too-many-posi
|
|
|
166
164
|
|
|
167
165
|
|
|
168
166
|
def requests_per_time(
|
|
169
|
-
period_of_time:
|
|
167
|
+
period_of_time: TimePeriod,
|
|
170
168
|
endpoint: EndpointType = EndpointType.ANALYZE,
|
|
171
169
|
end_time: Optional[datetime.datetime] = None,
|
|
172
|
-
) ->
|
|
170
|
+
) -> figure.Figure:
|
|
173
171
|
"""
|
|
174
172
|
Generate a visualization of request counts over a specified time period.
|
|
175
173
|
|
|
@@ -200,13 +198,13 @@ def requests_per_time(
|
|
|
200
198
|
requests_counts, plot_def, start_time, end_time
|
|
201
199
|
)
|
|
202
200
|
|
|
203
|
-
fig, ax1 =
|
|
201
|
+
fig, ax1 = pyplot.subplots(figsize=(12, 6))
|
|
204
202
|
_add_bar_chart(ax1, plot_def, timestamps, counts, "Requests")
|
|
205
203
|
|
|
206
204
|
ax2 = ax1.twinx()
|
|
207
205
|
_add_line_chart(ax2, timestamps, numpy.cumsum(counts), "Cumulative Requests")
|
|
208
206
|
|
|
209
|
-
|
|
207
|
+
pyplot.title(
|
|
210
208
|
f"Requests received for API {endpoint} ({start_time.strftime(plot_def.time_format)} "
|
|
211
209
|
f"to {end_time.strftime(plot_def.time_format)})"
|
|
212
210
|
)
|
|
@@ -215,16 +213,16 @@ def requests_per_time(
|
|
|
215
213
|
lines2, labels2 = ax2.get_legend_handles_labels()
|
|
216
214
|
ax1.legend(lines1 + lines2, labels1 + labels2, loc="center")
|
|
217
215
|
|
|
218
|
-
|
|
216
|
+
pyplot.tight_layout()
|
|
219
217
|
|
|
220
218
|
return fig
|
|
221
219
|
|
|
222
220
|
|
|
223
221
|
def average_time_per_responses( # pylint: disable=too-many-locals
|
|
224
|
-
period_of_time:
|
|
222
|
+
period_of_time: TimePeriod,
|
|
225
223
|
endpoint: EndpointType = EndpointType.ANALYZE,
|
|
226
224
|
end_time: Optional[datetime.datetime] = None,
|
|
227
|
-
) ->
|
|
225
|
+
) -> figure.Figure:
|
|
228
226
|
"""
|
|
229
227
|
Generate a visualization of average response time and length over a specified time period.
|
|
230
228
|
|
|
@@ -259,7 +257,7 @@ def average_time_per_responses( # pylint: disable=too-many-locals
|
|
|
259
257
|
float,
|
|
260
258
|
)
|
|
261
259
|
|
|
262
|
-
fig, ax1 =
|
|
260
|
+
fig, ax1 = pyplot.subplots(figsize=(12, 6))
|
|
263
261
|
_add_bar_chart(
|
|
264
262
|
ax1, plot_def, timestamps, average_time, "average response time (seconds)"
|
|
265
263
|
)
|
|
@@ -280,7 +278,7 @@ def average_time_per_responses( # pylint: disable=too-many-locals
|
|
|
280
278
|
ax2 = ax1.twinx()
|
|
281
279
|
_add_line_chart(ax2, timestamps, average_length, "average response length (chars)")
|
|
282
280
|
|
|
283
|
-
|
|
281
|
+
pyplot.title(
|
|
284
282
|
f"average response time for API {endpoint} ({start_time.strftime(plot_def.time_format)} "
|
|
285
283
|
f"to {end_time.strftime(plot_def.time_format)})"
|
|
286
284
|
)
|
|
@@ -289,7 +287,7 @@ def average_time_per_responses( # pylint: disable=too-many-locals
|
|
|
289
287
|
lines2, labels2 = ax2.get_legend_handles_labels()
|
|
290
288
|
ax1.legend(lines1 + lines2, labels1 + labels2, loc="center")
|
|
291
289
|
|
|
292
|
-
|
|
290
|
+
pyplot.tight_layout()
|
|
293
291
|
|
|
294
292
|
return fig
|
|
295
293
|
|
|
@@ -322,7 +320,7 @@ def _collect_emoji_data(
|
|
|
322
320
|
|
|
323
321
|
|
|
324
322
|
def _plot_emoji_data( # pylint: disable=too-many-locals
|
|
325
|
-
ax:
|
|
323
|
+
ax: axes.Axes,
|
|
326
324
|
reactions_values_dict: Dict[str, Dict[datetime.datetime, int]],
|
|
327
325
|
plot_def: Definition,
|
|
328
326
|
start_time: datetime.datetime,
|
|
@@ -340,7 +338,10 @@ def _plot_emoji_data( # pylint: disable=too-many-locals
|
|
|
340
338
|
)
|
|
341
339
|
all_counts.extend(counts)
|
|
342
340
|
|
|
343
|
-
colors = [
|
|
341
|
+
colors = [
|
|
342
|
+
colormaps["viridis"](i)
|
|
343
|
+
for i in numpy.linspace(0, 1, len(reactions_values_dict))
|
|
344
|
+
]
|
|
344
345
|
|
|
345
346
|
first_emoji = True
|
|
346
347
|
for i, (emoji, dict_counts) in enumerate(reactions_values_dict.items()):
|
|
@@ -369,9 +370,9 @@ def _plot_emoji_data( # pylint: disable=too-many-locals
|
|
|
369
370
|
|
|
370
371
|
|
|
371
372
|
def emojis_per_time(
|
|
372
|
-
period_of_time:
|
|
373
|
+
period_of_time: TimePeriod,
|
|
373
374
|
end_time: Optional[datetime.datetime] = None,
|
|
374
|
-
) ->
|
|
375
|
+
) -> figure.Figure:
|
|
375
376
|
"""
|
|
376
377
|
Generate a visualization of overall emoji feedback
|
|
377
378
|
over a specified time period.
|
|
@@ -396,13 +397,13 @@ def emojis_per_time(
|
|
|
396
397
|
start_time = period_of_time.get_period_start_time(end_time)
|
|
397
398
|
reactions_values_dict = _collect_emoji_data(start_time, plot_def)
|
|
398
399
|
|
|
399
|
-
fig, ax =
|
|
400
|
+
fig, ax = pyplot.subplots(figsize=(12, 6))
|
|
400
401
|
|
|
401
402
|
emoji_lines, emoji_labels = _plot_emoji_data(
|
|
402
403
|
ax, reactions_values_dict, plot_def, start_time, end_time
|
|
403
404
|
)
|
|
404
405
|
|
|
405
|
-
|
|
406
|
+
pyplot.title(
|
|
406
407
|
f"Emoji feedback ({start_time.strftime(plot_def.time_format)} "
|
|
407
408
|
f"to {end_time.strftime(plot_def.time_format)})"
|
|
408
409
|
)
|
|
@@ -419,11 +420,11 @@ def emojis_per_time(
|
|
|
419
420
|
ax.set_ylabel("Count")
|
|
420
421
|
|
|
421
422
|
# Format x-axis
|
|
422
|
-
ax.xaxis.set_major_formatter(
|
|
423
|
+
ax.xaxis.set_major_formatter(dates.DateFormatter(plot_def.time_format))
|
|
423
424
|
ax.xaxis.set_major_locator(plot_def.locator)
|
|
424
425
|
ax.tick_params(axis="x", labelrotation=45)
|
|
425
426
|
ax.grid(True, alpha=0.3)
|
|
426
427
|
|
|
427
|
-
|
|
428
|
+
pyplot.tight_layout()
|
|
428
429
|
|
|
429
430
|
return fig
|