logdetective 0.5.10__py3-none-any.whl → 0.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
logdetective/constants.py CHANGED
@@ -16,6 +16,8 @@ Snippets are delimited with '================'.
16
16
 
17
17
  Finally, drawing on information from all snippets, provide complete explanation of the issue and recommend solution.
18
18
 
19
+ Explanation of the issue, and recommended solution, should take handful of sentences.
20
+
19
21
  Snippets:
20
22
 
21
23
  {}
@@ -38,6 +40,8 @@ Answer:
38
40
  SNIPPET_PROMPT_TEMPLATE = """
39
41
  Analyse following RPM build log snippet. Describe contents accurately, without speculation or suggestions for resolution.
40
42
 
43
+ Your analysis must be as concise as possible, while keeping relevant information intact.
44
+
41
45
  Snippet:
42
46
 
43
47
  {}
@@ -55,6 +59,8 @@ Snippets are delimited with '================'.
55
59
 
56
60
  Drawing on information from all snippets, provide complete explanation of the issue and recommend solution.
57
61
 
62
+ Explanation of the issue, and recommended solution, should take handful of sentences.
63
+
58
64
  Snippets:
59
65
 
60
66
  {}
@@ -64,3 +70,5 @@ Analysis:
64
70
  """
65
71
 
66
72
  SNIPPET_DELIMITER = "================"
73
+
74
+ DEFAULT_TEMPERATURE = 0.8
@@ -1,9 +1,12 @@
1
1
  import argparse
2
+ import asyncio
2
3
  import logging
3
4
  import sys
4
5
  import os
5
6
 
6
- from logdetective.constants import DEFAULT_ADVISOR
7
+ import aiohttp
8
+
9
+ from logdetective.constants import DEFAULT_ADVISOR, DEFAULT_TEMPERATURE
7
10
  from logdetective.utils import (
8
11
  process_log,
9
12
  initialize_model,
@@ -73,10 +76,16 @@ def setup_args():
73
76
  default=f"{os.path.dirname(__file__)}/prompts.yml",
74
77
  help="Path to prompt configuration file."
75
78
  )
79
+ parser.add_argument(
80
+ "--temperature",
81
+ type=float,
82
+ default=DEFAULT_TEMPERATURE,
83
+ help="Temperature for inference."
84
+ )
76
85
  return parser.parse_args()
77
86
 
78
87
 
79
- def main(): # pylint: disable=too-many-statements,too-many-locals
88
+ async def run(): # pylint: disable=too-many-statements,too-many-locals
80
89
  """Main execution function."""
81
90
  args = setup_args()
82
91
 
@@ -122,13 +131,14 @@ def main(): # pylint: disable=too-many-statements,too-many-locals
122
131
 
123
132
  LOG.info("Getting summary")
124
133
 
125
- try:
126
- log = retrieve_log_content(args.file)
127
- except ValueError as e:
128
- # file does not exists
129
- LOG.error(e)
130
- sys.exit(4)
131
- log_summary = extractor(log)
134
+ async with aiohttp.ClientSession() as http:
135
+ try:
136
+ log = await retrieve_log_content(http, args.file)
137
+ except ValueError as e:
138
+ # file does not exist
139
+ LOG.error(e)
140
+ sys.exit(4)
141
+ log_summary = extractor(log)
132
142
 
133
143
  ratio = len(log_summary) / len(log.split("\n"))
134
144
 
@@ -147,6 +157,7 @@ def main(): # pylint: disable=too-many-statements,too-many-locals
147
157
  model,
148
158
  stream,
149
159
  prompt_template=prompts_configuration.prompt_template,
160
+ temperature=args.temperature,
150
161
  )
151
162
  probs = []
152
163
  print("Explanation:")
@@ -175,5 +186,10 @@ def main(): # pylint: disable=too-many-statements,too-many-locals
175
186
  print(f"\nResponse certainty: {certainty:.2f}%\n")
176
187
 
177
188
 
189
+ def main():
190
+ """ Evaluate logdetective program and wait for it to finish """
191
+ asyncio.run(run())
192
+
193
+
178
194
  if __name__ == "__main__":
179
195
  main()
logdetective/prompts.yml CHANGED
@@ -13,6 +13,8 @@ prompt_template: |
13
13
 
14
14
  Finally, drawing on information from all snippets, provide complete explanation of the issue and recommend solution.
15
15
 
16
+ Explanation of the issue, and recommended solution, should take handful of sentences.
17
+
16
18
  Snippets:
17
19
 
18
20
  {}
@@ -33,6 +35,8 @@ summarization_prompt_template: |
33
35
  snippet_prompt_template: |
34
36
  Analyse following RPM build log snippet. Describe contents accurately, without speculation or suggestions for resolution.
35
37
 
38
+ Your analysis must be as concise as possible, while keeping relevant information intact.
39
+
36
40
  Snippet:
37
41
 
38
42
  {}
@@ -48,6 +52,8 @@ prompt_template_staged: |
48
52
 
49
53
  Drawing on information from all snippets, provide complete explanation of the issue and recommend solution.
50
54
 
55
+ Explanation of the issue, and recommended solution, should take handful of sentences.
56
+
51
57
  Snippets:
52
58
 
53
59
  {}
@@ -97,17 +97,35 @@ class AnalyzeRequestMetrics(Base):
97
97
  metrics.response_certainty = response_certainty
98
98
  session.add(metrics)
99
99
 
100
+ @classmethod
101
+ def get_postgres_time_format(cls, time_format):
102
+ """Map python time format in the PostgreSQL format."""
103
+ if time_format == "%Y-%m-%d":
104
+ pgsql_time_format = "YYYY-MM-DD"
105
+ else:
106
+ pgsql_time_format = "YYYY-MM-DD HH24"
107
+ return pgsql_time_format
108
+
109
+ @classmethod
110
+ def get_dictionary_with_datetime_keys(
111
+ cls, time_format: str, values_dict: dict[str, any]
112
+ ) -> dict[datetime.datetime, any]:
113
+ """Convert from a dictionary with str keys to a dictionary with datetime keys"""
114
+ new_dict = {
115
+ datetime.datetime.strptime(r[0], time_format): r[1] for r in values_dict
116
+ }
117
+ return new_dict
118
+
100
119
  @classmethod
101
120
  def _get_requests_by_time_for_postgres(
102
121
  cls, start_time, end_time, time_format, endpoint
103
122
  ):
104
- """func.to_char is PostgreSQL specific.
123
+ """Get total requests number in time period.
124
+
125
+ func.to_char is PostgreSQL specific.
105
126
  Let's unit tests replace this function with the SQLite version.
106
127
  """
107
- if time_format == "%Y-%m-%d":
108
- pgsql_time_format = "YYYY-MM-DD"
109
- else:
110
- pgsql_time_format = "YYYY-MM-DD HH24"
128
+ pgsql_time_format = cls.get_postgres_time_format(time_format)
111
129
 
112
130
  requests_by_time_format = (
113
131
  select(
@@ -123,10 +141,12 @@ class AnalyzeRequestMetrics(Base):
123
141
  return requests_by_time_format
124
142
 
125
143
  @classmethod
126
- def _get_requests_by_time_for_sqllite(
144
+ def _get_requests_by_time_for_sqlite(
127
145
  cls, start_time, end_time, time_format, endpoint
128
146
  ):
129
- """func.strftime is SQLite specific.
147
+ """Get total requests number in time period.
148
+
149
+ func.strftime is SQLite specific.
130
150
  Use this function in unit test using flexmock:
131
151
 
132
152
  flexmock(AnalyzeRequestMetrics).should_receive("_get_requests_by_time_for_postgres")
@@ -178,9 +198,193 @@ class AnalyzeRequestMetrics(Base):
178
198
  counts = session.execute(count_requests_by_time_format)
179
199
  results = counts.fetchall()
180
200
 
181
- # Convert results to a dictionary with proper datetime keys
182
- counts_dict = {
183
- datetime.datetime.strptime(r[0], time_format): r[1] for r in results
184
- }
201
+ return cls.get_dictionary_with_datetime_keys(time_format, results)
202
+
203
+ @classmethod
204
+ def _get_average_responses_times_for_postgres(
205
+ cls, start_time, end_time, time_format, endpoint
206
+ ):
207
+ """Get average responses time.
208
+
209
+ func.to_char is PostgreSQL specific.
210
+ Let's unit tests replace this function with the SQLite version.
211
+ """
212
+ with transaction(commit=False) as session:
213
+ pgsql_time_format = cls.get_postgres_time_format(time_format)
214
+
215
+ average_responses_times = (
216
+ select(
217
+ func.to_char(cls.request_received_at, pgsql_time_format).label(
218
+ "time_range"
219
+ ),
220
+ (
221
+ func.avg(
222
+ func.extract( # pylint: disable=not-callable
223
+ "epoch", cls.response_sent_at - cls.request_received_at
224
+ )
225
+ )
226
+ ).label("average_response_seconds"),
227
+ )
228
+ .filter(cls.request_received_at.between(start_time, end_time))
229
+ .filter(cls.endpoint == endpoint)
230
+ .group_by("time_range")
231
+ .order_by("time_range")
232
+ )
233
+
234
+ results = session.execute(average_responses_times).fetchall()
235
+ return results
236
+
237
+ @classmethod
238
+ def _get_average_responses_times_for_sqlite(
239
+ cls, start_time, end_time, time_format, endpoint
240
+ ):
241
+ """Get average responses time.
242
+
243
+ func.strftime is SQLite specific.
244
+ Use this function in unit test using flexmock:
245
+
246
+ flexmock(AnalyzeRequestMetrics).should_receive("_get_average_responses_times_for_postgres")
247
+ .replace_with(AnalyzeRequestMetrics._get_average_responses_times_for_sqlite)
248
+ """
249
+ with transaction(commit=False) as session:
250
+ average_responses_times = (
251
+ select(
252
+ func.strftime(time_format, cls.request_received_at).label(
253
+ "time_range"
254
+ ),
255
+ (
256
+ func.avg(
257
+ func.julianday(cls.response_sent_at)
258
+ - func.julianday(cls.request_received_at) # noqa: W503 flake8 vs ruff
259
+ )
260
+ * 86400 # noqa: W503 flake8 vs ruff
261
+ ).label("average_response_seconds"),
262
+ )
263
+ .filter(cls.request_received_at.between(start_time, end_time))
264
+ .filter(cls.endpoint == endpoint)
265
+ .group_by("time_range")
266
+ .order_by("time_range")
267
+ )
268
+
269
+ results = session.execute(average_responses_times).fetchall()
270
+ return results
271
+
272
+ @classmethod
273
+ def get_responses_average_time_in_period(
274
+ cls,
275
+ start_time: datetime.datetime,
276
+ end_time: datetime.datetime,
277
+ time_format: str,
278
+ endpoint: Optional[EndpointType] = EndpointType.ANALYZE,
279
+ ) -> dict[datetime.datetime, int]:
280
+ """
281
+ Get a dictionary with average responses times
282
+ grouped by time units within a specified period.
283
+
284
+ Args:
285
+ start_time (datetime): The start of the time period to query
286
+ end_time (datetime): The end of the time period to query
287
+ time_format (str): The strftime format string to format timestamps (e.g., '%Y-%m-%d')
288
+ endpoint (EndpointType): The analyze API endpoint to query
185
289
 
186
- return counts_dict
290
+ Returns:
291
+ dict[datetime, int]: A dictionary mapping datetime objects
292
+ to average responses times
293
+ """
294
+ with transaction(commit=False) as _:
295
+ average_responses_times = cls._get_average_responses_times_for_postgres(
296
+ start_time, end_time, time_format, endpoint
297
+ )
298
+
299
+ return cls.get_dictionary_with_datetime_keys(
300
+ time_format, average_responses_times
301
+ )
302
+
303
+ @classmethod
304
+ def _get_average_responses_lengths_for_postgres(
305
+ cls, start_time, end_time, time_format, endpoint
306
+ ):
307
+ """Get average responses length.
308
+
309
+ func.to_char is PostgreSQL specific.
310
+ Let's unit tests replace this function with the SQLite version.
311
+ """
312
+ with transaction(commit=False) as session:
313
+ pgsql_time_format = cls.get_postgres_time_format(time_format)
314
+
315
+ average_responses_lengths = (
316
+ select(
317
+ func.to_char(cls.request_received_at, pgsql_time_format).label(
318
+ "time_range"
319
+ ),
320
+ (func.avg(cls.response_length)).label("average_responses_length"),
321
+ )
322
+ .filter(cls.request_received_at.between(start_time, end_time))
323
+ .filter(cls.endpoint == endpoint)
324
+ .group_by("time_range")
325
+ .order_by("time_range")
326
+ )
327
+
328
+ results = session.execute(average_responses_lengths).fetchall()
329
+ return results
330
+
331
+ @classmethod
332
+ def _get_average_responses_lengths_for_sqlite(
333
+ cls, start_time, end_time, time_format, endpoint
334
+ ):
335
+ """Get average responses length.
336
+
337
+ func.strftime is SQLite specific.
338
+ Use this function in unit test using flexmock:
339
+
340
+ flexmock(AnalyzeRequestMetrics)
341
+ .should_receive("_get_average_responses_lengths_for_postgres")
342
+ .replace_with(AnalyzeRequestMetrics._get_average_responses_lengths_for_sqlite)
343
+ """
344
+ with transaction(commit=False) as session:
345
+ average_responses_lengths = (
346
+ select(
347
+ func.strftime(time_format, cls.request_received_at).label(
348
+ "time_range"
349
+ ),
350
+ (func.avg(cls.response_length)).label("average_responses_length"),
351
+ )
352
+ .filter(cls.request_received_at.between(start_time, end_time))
353
+ .filter(cls.endpoint == endpoint)
354
+ .group_by("time_range")
355
+ .order_by("time_range")
356
+ )
357
+
358
+ results = session.execute(average_responses_lengths).fetchall()
359
+ return results
360
+
361
+ @classmethod
362
+ def get_responses_average_length_in_period(
363
+ cls,
364
+ start_time: datetime.datetime,
365
+ end_time: datetime.datetime,
366
+ time_format: str,
367
+ endpoint: Optional[EndpointType] = EndpointType.ANALYZE,
368
+ ) -> dict[datetime.datetime, int]:
369
+ """
370
+ Get a dictionary with average responses length
371
+ grouped by time units within a specified period.
372
+
373
+ Args:
374
+ start_time (datetime): The start of the time period to query
375
+ end_time (datetime): The end of the time period to query
376
+ time_format (str): The strftime format string to format timestamps (e.g., '%Y-%m-%d')
377
+ endpoint (EndpointType): The analyze API endpoint to query
378
+
379
+ Returns:
380
+ dict[datetime, int]: A dictionary mapping datetime objects
381
+ to average responses lengths
382
+ """
383
+ with transaction(commit=False) as _:
384
+ average_responses_lengths = cls._get_average_responses_lengths_for_postgres(
385
+ start_time, end_time, time_format, endpoint
386
+ )
387
+
388
+ return cls.get_dictionary_with_datetime_keys(
389
+ time_format, average_responses_lengths
390
+ )
@@ -41,12 +41,10 @@ def update_metrics(
41
41
  sent_at if sent_at else datetime.datetime.now(datetime.timezone.utc)
42
42
  )
43
43
  response_length = None
44
- if hasattr(response, "explanation") and "choices" in response.explanation:
45
- response_length = sum(
46
- len(choice["text"])
47
- for choice in response.explanation["choices"]
48
- if "text" in choice
49
- )
44
+ if hasattr(response, "explanation") and isinstance(
45
+ response.explanation, models.Explanation
46
+ ):
47
+ response_length = len(response.explanation.text)
50
48
  response_certainty = (
51
49
  response.response_certainty if hasattr(response, "response_certainty") else None
52
50
  )
@@ -2,7 +2,9 @@ import datetime
2
2
  from logging import BASIC_FORMAT
3
3
  from typing import List, Dict, Optional, Literal
4
4
 
5
- from pydantic import BaseModel, Field, model_validator, field_validator
5
+ from pydantic import BaseModel, Field, model_validator, field_validator, NonNegativeFloat
6
+
7
+ from logdetective.constants import DEFAULT_TEMPERATURE
6
8
 
7
9
 
8
10
  class BuildLog(BaseModel):
@@ -95,6 +97,8 @@ class InferenceConfig(BaseModel):
95
97
  )
96
98
  url: str = ""
97
99
  api_token: str = ""
100
+ model: str = ""
101
+ temperature: NonNegativeFloat = DEFAULT_TEMPERATURE
98
102
 
99
103
  def __init__(self, data: Optional[dict] = None):
100
104
  super().__init__()
@@ -106,6 +110,8 @@ class InferenceConfig(BaseModel):
106
110
  self.api_endpoint = data.get("api_endpoint", "/chat/completions")
107
111
  self.url = data.get("url", "")
108
112
  self.api_token = data.get("api_token", "")
113
+ self.model = data.get("model", "default-model")
114
+ self.temperature = data.get("temperature", DEFAULT_TEMPERATURE)
109
115
 
110
116
 
111
117
  class ExtractorConfig(BaseModel):
@@ -150,7 +156,8 @@ class LogConfig(BaseModel):
150
156
  """Logging configuration"""
151
157
 
152
158
  name: str = "logdetective"
153
- level: str | int = "INFO"
159
+ level_stream: str | int = "INFO"
160
+ level_file: str | int = "INFO"
154
161
  path: str | None = None
155
162
  format: str = BASIC_FORMAT
156
163
 
@@ -160,7 +167,8 @@ class LogConfig(BaseModel):
160
167
  return
161
168
 
162
169
  self.name = data.get("name", "logdetective")
163
- self.level = data.get("level", "INFO").upper()
170
+ self.level_stream = data.get("level_stream", "INFO").upper()
171
+ self.level_file = data.get("level_file", "INFO").upper()
164
172
  self.path = data.get("path")
165
173
  self.format = data.get("format", BASIC_FORMAT)
166
174
 
@@ -1,5 +1,5 @@
1
1
  import datetime
2
- from typing import Optional
2
+ from typing import Optional, Union
3
3
 
4
4
  import numpy
5
5
  import matplotlib
@@ -62,24 +62,24 @@ class Definition:
62
62
 
63
63
 
64
64
  def create_time_series_arrays(
65
- counts_dict: dict[datetime.datetime, int],
65
+ values_dict: dict[datetime.datetime, int],
66
+ plot_def: Definition,
66
67
  start_time: datetime.datetime,
67
68
  end_time: datetime.datetime,
68
- time_delta: datetime.timedelta,
69
- time_format: str,
69
+ value_type: Optional[Union[int, float]] = int,
70
70
  ) -> tuple[numpy.ndarray, numpy.ndarray]:
71
- """Create time series arrays from a dictionary of counts.
71
+ """Create time series arrays from a dictionary of values.
72
72
 
73
73
  This function generates two aligned numpy arrays:
74
74
  1. An array of timestamps from start_time to end_time
75
- 2. A corresponding array of counts for each timestamp
75
+ 2. A corresponding array of valuesfor each timestamp
76
76
 
77
77
  The timestamps are truncated to the precision specified by time_format.
78
- If a timestamp in counts_dict matches a generated timestamp, its count is used;
79
- otherwise, the count defaults to zero.
78
+ If a timestamp in values_dict matches a generated timestamp, its values is used;
79
+ otherwise, the value defaults to zero.
80
80
 
81
81
  Args:
82
- counts_dict: Dictionary mapping timestamps to their respective counts
82
+ values_dict: Dictionary mapping timestamps to their respective values
83
83
  start_time: The starting timestamp of the time series
84
84
  end_time: The ending timestamp of the time series
85
85
  time_delta: The time interval between consecutive timestamps
@@ -88,67 +88,70 @@ def create_time_series_arrays(
88
88
  Returns:
89
89
  A tuple containing:
90
90
  - numpy.ndarray: Array of timestamps
91
- - numpy.ndarray: Array of corresponding counts
91
+ - numpy.ndarray: Array of corresponding values
92
92
  """
93
- num_intervals = int((end_time - start_time) / time_delta) + 1
93
+ num_intervals = int((end_time - start_time) / plot_def.time_delta) + 1
94
94
 
95
95
  timestamps = numpy.array(
96
96
  [
97
97
  datetime.datetime.strptime(
98
- (start_time + i * time_delta).strftime(format=time_format), time_format
98
+ (start_time + i * plot_def.time_delta).strftime(
99
+ format=plot_def.time_format
100
+ ),
101
+ plot_def.time_format,
99
102
  )
100
103
  for i in range(num_intervals)
101
104
  ]
102
105
  )
103
- counts = numpy.zeros(num_intervals, dtype=int)
106
+ values = numpy.zeros(num_intervals, dtype=value_type)
104
107
 
105
108
  timestamp_to_index = {timestamp: i for i, timestamp in enumerate(timestamps)}
106
109
 
107
- for timestamp, count in counts_dict.items():
110
+ for timestamp, count in values_dict.items():
108
111
  if timestamp in timestamp_to_index:
109
- counts[timestamp_to_index[timestamp]] = count
112
+ values[timestamp_to_index[timestamp]] = count
110
113
 
111
- return timestamps, counts
114
+ return timestamps, values
112
115
 
113
116
 
114
- def _add_bar_chart_for_requests_count(
115
- ax1: matplotlib.figure.Axes,
117
+ def _add_bar_chart(
118
+ ax: matplotlib.figure.Axes,
116
119
  plot_def: Definition,
117
120
  timestamps: numpy.array,
118
- counts: numpy.array,
121
+ values: numpy.array,
122
+ label: str,
119
123
  ) -> None:
120
- """Add a bar chart for requests count (axes 1)"""
124
+ """Add a blue bar chart"""
121
125
  bar_width = (
122
126
  0.8 * plot_def.time_delta.total_seconds() / 86400
123
127
  ) # Convert to days for matplotlib
124
- ax1.bar(
128
+ ax.bar(
125
129
  timestamps,
126
- counts,
130
+ values,
127
131
  width=bar_width,
128
132
  alpha=0.7,
129
133
  color="skyblue",
130
- label="Requests",
134
+ label=label,
131
135
  )
132
- ax1.set_xlabel("Time")
133
- ax1.set_ylabel("Requests", color="blue")
134
- ax1.tick_params(axis="y", labelcolor="blue")
136
+ ax.set_xlabel("Time")
137
+ ax.set_ylabel(label, color="blue")
138
+ ax.tick_params(axis="y", labelcolor="blue")
135
139
 
136
- ax1.xaxis.set_major_formatter(matplotlib.dates.DateFormatter(plot_def.time_format))
137
- ax1.xaxis.set_major_locator(plot_def.locator)
140
+ ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter(plot_def.time_format))
141
+ ax.xaxis.set_major_locator(plot_def.locator)
138
142
 
139
143
  matplotlib.pyplot.xticks(rotation=45)
140
144
 
141
- ax1.grid(True, alpha=0.3)
145
+ ax.grid(True, alpha=0.3)
142
146
 
143
147
 
144
- def _add_cumulative_line_for_requests_count(
145
- ax2: matplotlib.figure.Axes, timestamps: numpy.array, counts: numpy.array
148
+ def _add_line_chart(
149
+ ax: matplotlib.figure.Axes, timestamps: numpy.array, values: numpy.array, label: str
146
150
  ) -> None:
147
- """Add cumulative line on secondary y-axis"""
148
- cumulative = numpy.cumsum(counts)
149
- ax2.plot(timestamps, cumulative, "r-", linewidth=2, label="Cumulative")
150
- ax2.set_ylabel("Cumulative Requests", color="red")
151
- ax2.tick_params(axis="y", labelcolor="red")
151
+ """Add a red line chart"""
152
+ ax.plot(timestamps, values, "r-", linewidth=2, label=label)
153
+ ax.set_ylabel(label, color="red")
154
+ ax.tick_params(axis="y", labelcolor="red")
152
155
 
153
156
 
154
157
  def requests_per_time(
@@ -183,14 +186,14 @@ def requests_per_time(
183
186
  start_time, end_time, plot_def.time_format, endpoint
184
187
  )
185
188
  timestamps, counts = create_time_series_arrays(
186
- requests_counts, start_time, end_time, plot_def.time_delta, plot_def.time_format
189
+ requests_counts, plot_def, start_time, end_time
187
190
  )
188
191
 
189
192
  fig, ax1 = matplotlib.pyplot.subplots(figsize=(12, 6))
190
- _add_bar_chart_for_requests_count(ax1, plot_def, timestamps, counts)
193
+ _add_bar_chart(ax1, plot_def, timestamps, counts, "Requests")
191
194
 
192
195
  ax2 = ax1.twinx()
193
- _add_cumulative_line_for_requests_count(ax2, timestamps, counts)
196
+ _add_line_chart(ax2, timestamps, numpy.cumsum(counts), "Cumulative Requests")
194
197
 
195
198
  matplotlib.pyplot.title(
196
199
  f"Requests received for API {endpoint} ({start_time.strftime(plot_def.time_format)} "
@@ -204,3 +207,75 @@ def requests_per_time(
204
207
  matplotlib.pyplot.tight_layout()
205
208
 
206
209
  return fig
210
+
211
+
212
+ def average_time_per_responses( # pylint: disable=too-many-locals
213
+ period_of_time: models.TimePeriod,
214
+ endpoint: EndpointType = EndpointType.ANALYZE,
215
+ end_time: Optional[datetime.datetime] = None,
216
+ ) -> matplotlib.figure.Figure:
217
+ """
218
+ Generate a visualization of average response time and length over a specified time period.
219
+
220
+ This function creates a dual-axis plot showing:
221
+ 1. A bar chart of average response time per time interval
222
+ 1. A line chart of average response length per time interval
223
+
224
+ The time intervals are determined by the provided TimePeriod object, which defines
225
+ the granularity and formatting of the time axis.
226
+
227
+ Args:
228
+ period_of_time: A TimePeriod object that defines the time period and interval
229
+ for the analysis (e.g., hourly, daily, weekly)
230
+ endpoint: One of the API endpoints
231
+ end_time: The end time for the analysis period. If None, defaults to the current
232
+ UTC time
233
+
234
+ Returns:
235
+ A matplotlib Figure object containing the generated visualization
236
+ """
237
+ end_time = end_time or datetime.datetime.now(datetime.timezone.utc)
238
+ start_time = period_of_time.get_period_start_time(end_time)
239
+ plot_def = Definition(period_of_time)
240
+ responses_average_time = AnalyzeRequestMetrics.get_responses_average_time_in_period(
241
+ start_time, end_time, plot_def.time_format, endpoint
242
+ )
243
+ timestamps, average_time = create_time_series_arrays(
244
+ responses_average_time,
245
+ plot_def,
246
+ start_time,
247
+ end_time,
248
+ float,
249
+ )
250
+
251
+ fig, ax1 = matplotlib.pyplot.subplots(figsize=(12, 6))
252
+ _add_bar_chart(ax1, plot_def, timestamps, average_time, "average response time (seconds)")
253
+
254
+ responses_average_length = (
255
+ AnalyzeRequestMetrics.get_responses_average_length_in_period(
256
+ start_time, end_time, plot_def.time_format, endpoint
257
+ )
258
+ )
259
+ timestamps, average_length = create_time_series_arrays(
260
+ responses_average_length,
261
+ plot_def,
262
+ start_time,
263
+ end_time,
264
+ float,
265
+ )
266
+
267
+ ax2 = ax1.twinx()
268
+ _add_line_chart(ax2, timestamps, average_length, "average response length (chars)")
269
+
270
+ matplotlib.pyplot.title(
271
+ f"average response time for API {endpoint} ({start_time.strftime(plot_def.time_format)} "
272
+ f"to {end_time.strftime(plot_def.time_format)})"
273
+ )
274
+
275
+ lines1, labels1 = ax1.get_legend_handles_labels()
276
+ lines2, labels2 = ax2.get_legend_handles_labels()
277
+ ax1.legend(lines1 + lines2, labels1 + labels2, loc="center")
278
+
279
+ matplotlib.pyplot.tight_layout()
280
+
281
+ return fig