aiqa-client 0.4.1__py3-none-any.whl → 0.4.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
aiqa/tracing.py CHANGED
@@ -7,6 +7,7 @@ import json
7
7
  import logging
8
8
  import inspect
9
9
  import os
10
+ import copy
10
11
  from typing import Any, Callable, Optional, List
11
12
  from functools import wraps
12
13
  from opentelemetry import trace
@@ -15,10 +16,12 @@ from opentelemetry.trace import Status, StatusCode, SpanContext, TraceFlags
15
16
  from opentelemetry.propagate import inject, extract
16
17
  from .aiqa_exporter import AIQASpanExporter
17
18
  from .client import get_aiqa_client, get_component_tag, set_component_tag as _set_component_tag, get_aiqa_tracer
18
- from .constants import AIQA_TRACER_NAME
19
+ from .constants import AIQA_TRACER_NAME, LOG_TAG
19
20
  from .object_serialiser import serialize_for_span
21
+ from .http_utils import build_headers, get_server_url, get_api_key
22
+ from .tracing_llm_utils import _extract_and_set_token_usage, _extract_and_set_provider_and_model
20
23
 
21
- logger = logging.getLogger("AIQA")
24
+ logger = logging.getLogger(LOG_TAG)
22
25
 
23
26
 
24
27
  async def flush_tracing() -> None:
@@ -114,10 +117,12 @@ class TracingOptions:
114
117
  self.filter_output = filter_output
115
118
 
116
119
 
117
-
118
-
119
120
  def _prepare_input(args: tuple, kwargs: dict) -> Any:
120
- """Prepare input for span attributes.
121
+ """Prepare input for span attributes.
122
+ Aims to produce nice span attributes for the input, since {args, kwargs} is not a natural way to read function input.
123
+ So can "unwrap" the args, kwargs.
124
+
125
+ For single-arg-dicts or kwargs-only, returns a shallow copy of the input data.
121
126
 
122
127
  Note: This function does NOT serialize values - it just structures the data.
123
128
  Serialization happens later via serialize_for_span() to avoid double-encoding
@@ -125,14 +130,18 @@ def _prepare_input(args: tuple, kwargs: dict) -> Any:
125
130
  """
126
131
  if not args and not kwargs:
127
132
  return None
128
- if len(args) == 1 and not kwargs:
129
- return args[0] # Don't serialize here - will be serialized later
130
- # Multiple args or kwargs - combine into dict
131
- result = {}
132
- if args:
133
- result["args"] = list(args) # Keep as-is, will be serialized later
134
- if kwargs:
135
- result["kwargs"] = dict(kwargs) # Keep as-is, will be serialized later
133
+ if not kwargs:
134
+ if len(args) == 1:
135
+ arg0 = args[0]
136
+ if isinstance(arg0, dict): # shallow copy to protect against mutating the input
137
+ return arg0.copy()
138
+ return arg0
139
+ return list(args)
140
+ if kwargs and len(args) == 0:
141
+ return kwargs.copy() # shallow copy to protect against mutating the input
142
+ # Multiple args and kwargs - combine into dict
143
+ result = kwargs.copy()
144
+ result["args"] = list(args)
136
145
  return result
137
146
 
138
147
 
@@ -142,7 +151,10 @@ def _prepare_and_filter_input(
142
151
  filter_input: Optional[Callable[[Any], Any]],
143
152
  ignore_input: Optional[List[str]],
144
153
  ) -> Any:
145
- """Prepare and filter input for span attributes."""
154
+ """
155
+ Prepare and filter input for span attributes - applies the user's filter_input and ignore_input.
156
+ For single-arg-dicts or kwargs-only, returns a shallow copy of the input data.
157
+ """
146
158
  # Handle "self" in ignore_input by skipping the first argument
147
159
  filtered_args = args
148
160
  filtered_kwargs = kwargs.copy() if kwargs else {}
@@ -156,14 +168,17 @@ def _prepare_and_filter_input(
156
168
  # Also remove "self" from kwargs if present
157
169
  if "self" in filtered_kwargs:
158
170
  del filtered_kwargs["self"]
159
-
171
+ # turn args, kwargs into one "nice" object
160
172
  input_data = _prepare_input(filtered_args, filtered_kwargs)
161
- if filter_input:
173
+ if filter_input and input_data is not None:
162
174
  input_data = filter_input(input_data)
163
- if filtered_ignore_input and isinstance(input_data, dict):
164
- for key in filtered_ignore_input:
165
- if key in input_data:
166
- del input_data[key]
175
+ if filtered_ignore_input and len(filtered_ignore_input) > 0:
176
+ if not isinstance(input_data, dict):
177
+ logger.warning(f"_prepare_and_filter_input: skip: ignore_input is set beyond 'self': {filtered_ignore_input} but input_data is not a dict: {type(input_data)}")
178
+ else:
179
+ for key in filtered_ignore_input:
180
+ if key in input_data:
181
+ del input_data[key]
167
182
  # Also handle case where input_data is just self (single value, not dict)
168
183
  # If we filtered out self and there are no remaining args/kwargs, return None
169
184
  if ignore_input and "self" in ignore_input and not filtered_args and not filtered_kwargs:
@@ -171,213 +186,67 @@ def _prepare_and_filter_input(
171
186
  return input_data
172
187
 
173
188
 
174
- def _prepare_and_filter_output(
189
+ def _filter_and_serialize_output(
175
190
  result: Any,
176
191
  filter_output: Optional[Callable[[Any], Any]],
177
192
  ignore_output: Optional[List[str]],
178
193
  ) -> Any:
179
- """Prepare and filter output for span attributes."""
194
+ """Filter and serialize output for span attributes."""
180
195
  output_data = result
181
196
  if filter_output:
197
+ if isinstance(output_data, dict):
198
+ output_data = output_data.copy() # copy to provide shallow protection against the user accidentally mutating the output with filter_output
182
199
  output_data = filter_output(output_data)
183
200
  if ignore_output and isinstance(output_data, dict):
184
201
  output_data = output_data.copy()
185
202
  for key in ignore_output:
186
203
  if key in output_data:
187
204
  del output_data[key]
188
- return output_data
205
+
206
+ # Serialize immediately to create immutable result (removes mutable structures)
207
+ return serialize_for_span(output_data)
189
208
 
190
209
 
191
210
  def _handle_span_exception(span: trace.Span, exception: Exception) -> None:
192
211
  """Record exception on span and set error status."""
212
+ logger.info(f"span end: Handling span exception for {span.name}")
193
213
  error = exception if isinstance(exception, Exception) else Exception(str(exception))
194
214
  span.record_exception(error)
195
215
  span.set_status(Status(StatusCode.ERROR, str(error)))
196
216
 
197
217
 
198
- def _is_attribute_set(span: trace.Span, attribute_name: str) -> bool:
199
- """
200
- Check if an attribute is already set on a span.
201
- Returns True if the attribute exists, False otherwise.
202
- Safe against exceptions.
203
- """
204
- try:
205
- # Try multiple ways to access span attributes (SDK spans may store them differently)
206
- # Check public 'attributes' property
207
- if hasattr(span, "attributes"):
208
- attrs = span.attributes
209
- if attrs and attribute_name in attrs:
210
- return True
211
-
212
- # Check private '_attributes' (common in OpenTelemetry SDK)
213
- if hasattr(span, "_attributes"):
214
- attrs = span._attributes
215
- if attrs and attribute_name in attrs:
216
- return True
217
-
218
- # If we can't find the attribute, assume not set (conservative approach)
219
- return False
220
- except Exception:
221
- # If anything goes wrong, assume not set (conservative approach)
222
- return False
223
218
 
224
-
225
- def _extract_and_set_token_usage(span: trace.Span, result: Any) -> None:
219
+ def _finalize_span_success_common(
220
+ span: trace.Span,
221
+ result_for_metadata: Any,
222
+ output_data: Any,
223
+ filter_output: Optional[Callable[[Any], Any]] = None,
224
+ ignore_output: Optional[List[str]] = None,
225
+ ) -> None:
226
226
  """
227
- Extract OpenAI API style token usage from result and add to span attributes
228
- using OpenTelemetry semantic conventions for gen_ai.
227
+ Common logic for finalizing a span with success status.
228
+ Extracts token usage and provider/model from result, sets output attribute, and sets status to OK.
229
229
 
230
- Looks for usage dict with prompt_tokens, completion_tokens, and total_tokens.
231
- Sets gen_ai.usage.input_tokens, gen_ai.usage.output_tokens, and gen_ai.usage.total_tokens.
232
- Only sets attributes that are not already set.
230
+ Serializes output immediately to capture its state when the function returns,
231
+ preventing mutations from affecting the trace.
233
232
 
234
- This function detects token usage from OpenAI API response patterns:
235
- - OpenAI Chat Completions API: The 'usage' object contains 'prompt_tokens', 'completion_tokens', and 'total_tokens'.
236
- See https://platform.openai.com/docs/api-reference/chat/object (usage field)
237
- - OpenAI Completions API: The 'usage' object contains 'prompt_tokens', 'completion_tokens', and 'total_tokens'.
238
- See https://platform.openai.com/docs/api-reference/completions/object (usage field)
239
-
240
- This function is safe against exceptions and will not derail tracing or program execution.
241
- """
242
- try:
243
- if not span.is_recording():
244
- return
245
-
246
- usage = None
247
-
248
- # Check if result is a dict with 'usage' key
249
- try:
250
- if isinstance(result, dict):
251
- usage = result.get("usage")
252
- # Also check if result itself is a usage dict (OpenAI format)
253
- if usage is None and all(key in result for key in ("prompt_tokens", "completion_tokens", "total_tokens")):
254
- usage = result
255
- # Also check if result itself is a usage dict (Bedrock format)
256
- elif usage is None and all(key in result for key in ("input_tokens", "output_tokens")):
257
- usage = result
258
-
259
- # Check if result has a 'usage' attribute (e.g., OpenAI response object)
260
- elif hasattr(result, "usage"):
261
- usage = result.usage
262
- except Exception:
263
- # If accessing result properties fails, just return silently
264
- return
265
-
266
- # Extract token usage if found
267
- if isinstance(usage, dict):
268
- try:
269
- # Support both OpenAI format (prompt_tokens/completion_tokens) and Bedrock format (input_tokens/output_tokens)
270
- prompt_tokens = usage.get("prompt_tokens") or usage.get("PromptTokens")
271
- completion_tokens = usage.get("completion_tokens") or usage.get("CompletionTokens")
272
- input_tokens = usage.get("input_tokens") or usage.get("InputTokens")
273
- output_tokens = usage.get("output_tokens") or usage.get("OutputTokens")
274
- total_tokens = usage.get("total_tokens") or usage.get("TotalTokens")
275
-
276
- # Use Bedrock format if OpenAI format not available
277
- if prompt_tokens is None:
278
- prompt_tokens = input_tokens
279
- if completion_tokens is None:
280
- completion_tokens = output_tokens
281
-
282
- # Calculate total_tokens if not provided but we have input and output
283
- if total_tokens is None and prompt_tokens is not None and completion_tokens is not None:
284
- total_tokens = prompt_tokens + completion_tokens
285
-
286
- # Only set attributes that are not already set
287
- if prompt_tokens is not None and not _is_attribute_set(span, "gen_ai.usage.input_tokens"):
288
- span.set_attribute("gen_ai.usage.input_tokens", prompt_tokens)
289
- if completion_tokens is not None and not _is_attribute_set(span, "gen_ai.usage.output_tokens"):
290
- span.set_attribute("gen_ai.usage.output_tokens", completion_tokens)
291
- if total_tokens is not None and not _is_attribute_set(span, "gen_ai.usage.total_tokens"):
292
- span.set_attribute("gen_ai.usage.total_tokens", total_tokens)
293
- except Exception:
294
- # If setting attributes fails, log but don't raise
295
- logger.debug(f"Failed to set token usage attributes on span")
296
- except Exception:
297
- # Catch any other exceptions to ensure this never derails tracing
298
- logger.debug(f"Error in _extract_and_set_token_usage")
299
-
300
-
301
- def _extract_and_set_provider_and_model(span: trace.Span, result: Any) -> None:
233
+ Args:
234
+ span: The span to finalize
235
+ result_for_metadata: Value to extract token usage and provider/model from
236
+ output_data: The output data to set on the span (will be filtered if needed)
237
+ filter_output: Optional function to filter output data
238
+ ignore_output: Optional list of keys to exclude from output
302
239
  """
303
- Extract provider and model information from result and add to span attributes
304
- using OpenTelemetry semantic conventions for gen_ai.
305
-
306
- Looks for 'model', 'provider', 'provider_name' fields in the result.
307
- Sets gen_ai.provider.name and gen_ai.request.model.
308
- Only sets attributes that are not already set.
240
+ logger.info(f"span end: Finalizing for {span.name}")
241
+ _extract_and_set_token_usage(span, result_for_metadata)
242
+ _extract_and_set_provider_and_model(span, result_for_metadata)
309
243
 
310
- This function detects model information from common API response patterns:
311
- - OpenAI Chat Completions API: The 'model' field is at the top level of the response.
312
- See https://platform.openai.com/docs/api-reference/chat/object
313
- - OpenAI Completions API: The 'model' field is at the top level of the response.
314
- See https://platform.openai.com/docs/api-reference/completions/object
315
-
316
- This function is safe against exceptions and will not derail tracing or program execution.
317
- """
318
- try:
319
- if not span.is_recording():
320
- return
321
-
322
- model = None
323
- provider = None
324
-
325
- # Check if result is a dict
326
- try:
327
- if isinstance(result, dict):
328
- model = result.get("model") or result.get("Model")
329
- provider = result.get("provider") or result.get("Provider") or result.get("provider_name") or result.get("providerName")
330
-
331
- # Check if result has attributes (e.g., OpenAI response object)
332
- elif hasattr(result, "model"):
333
- model = result.model
334
- if hasattr(result, "provider"):
335
- provider = result.provider
336
- elif hasattr(result, "provider_name"):
337
- provider = result.provider_name
338
- elif hasattr(result, "providerName"):
339
- provider = result.providerName
340
-
341
- # Check nested structures (e.g., response.data.model)
342
- if model is None and hasattr(result, "data"):
343
- data = result.data
344
- if isinstance(data, dict):
345
- model = data.get("model") or data.get("Model")
346
- elif hasattr(data, "model"):
347
- model = data.model
348
-
349
- # Check for model in choices (OpenAI pattern)
350
- if model is None and isinstance(result, dict):
351
- choices = result.get("choices")
352
- if choices and isinstance(choices, list) and len(choices) > 0:
353
- first_choice = choices[0]
354
- if isinstance(first_choice, dict):
355
- model = first_choice.get("model")
356
- elif hasattr(first_choice, "model"):
357
- model = first_choice.model
358
- except Exception:
359
- # If accessing result properties fails, just return silently
360
- return
361
-
362
- # Set attributes if found and not already set
363
- try:
364
- if model is not None and not _is_attribute_set(span, "gen_ai.request.model"):
365
- # Convert to string if needed
366
- model_str = str(model) if model is not None else None
367
- if model_str:
368
- span.set_attribute("gen_ai.request.model", model_str)
369
-
370
- if provider is not None and not _is_attribute_set(span, "gen_ai.provider.name"):
371
- # Convert to string if needed
372
- provider_str = str(provider) if provider is not None else None
373
- if provider_str:
374
- span.set_attribute("gen_ai.provider.name", provider_str)
375
- except Exception:
376
- # If setting attributes fails, log but don't raise
377
- logger.debug(f"Failed to set provider/model attributes on span")
378
- except Exception:
379
- # Catch any other exceptions to ensure this never derails tracing
380
- logger.debug(f"Error in _extract_and_set_provider_and_model")
244
+ # Prepare, filter, and serialize output (serialization happens in _prepare_and_filter_output)
245
+ output_data = _filter_and_serialize_output(output_data, filter_output, ignore_output)
246
+ if output_data is not None:
247
+ # output_data is already serialized (immutable) from _prepare_and_filter_output
248
+ span.set_attribute("output", output_data)
249
+ span.set_status(Status(StatusCode.OK))
381
250
 
382
251
 
383
252
  class TracedGenerator:
@@ -410,7 +279,8 @@ class TracedGenerator:
410
279
 
411
280
  try:
412
281
  value = next(self._generator)
413
- self._yielded_values.append(value)
282
+ # Serialize immediately to create immutable result (removes mutable structures)
283
+ self._yielded_values.append(serialize_for_span(value))
414
284
  return value
415
285
  except StopIteration:
416
286
  self._exhausted = True
@@ -428,10 +298,7 @@ class TracedGenerator:
428
298
  def _finalize_span_success(self):
429
299
  """Set output and success status on span."""
430
300
  # Check last yielded value for token usage (common pattern in streaming responses)
431
- if self._yielded_values:
432
- last_value = self._yielded_values[-1]
433
- _extract_and_set_token_usage(self._span, last_value)
434
- _extract_and_set_provider_and_model(self._span, last_value)
301
+ result_for_metadata = self._yielded_values[-1] if self._yielded_values else None
435
302
 
436
303
  # Record summary of yielded values
437
304
  output_data = {
@@ -448,10 +315,13 @@ class TracedGenerator:
448
315
  if len(self._yielded_values) > sample_size:
449
316
  output_data["truncated"] = True
450
317
 
451
- output_data = _prepare_and_filter_output(output_data, self._filter_output, self._ignore_output)
452
- if output_data is not None:
453
- self._span.set_attribute("output", serialize_for_span(output_data))
454
- self._span.set_status(Status(StatusCode.OK))
318
+ _finalize_span_success_common(
319
+ self._span,
320
+ result_for_metadata,
321
+ output_data,
322
+ self._filter_output,
323
+ self._ignore_output,
324
+ )
455
325
 
456
326
 
457
327
  class TracedAsyncGenerator:
@@ -484,7 +354,8 @@ class TracedAsyncGenerator:
484
354
 
485
355
  try:
486
356
  value = await self._generator.__anext__()
487
- self._yielded_values.append(value)
357
+ # Serialize immediately to create immutable result (removes mutable structures)
358
+ self._yielded_values.append(serialize_for_span(value))
488
359
  return value
489
360
  except StopAsyncIteration:
490
361
  self._exhausted = True
@@ -502,10 +373,7 @@ class TracedAsyncGenerator:
502
373
  def _finalize_span_success(self):
503
374
  """Set output and success status on span."""
504
375
  # Check last yielded value for token usage (common pattern in streaming responses)
505
- if self._yielded_values:
506
- last_value = self._yielded_values[-1]
507
- _extract_and_set_token_usage(self._span, last_value)
508
- _extract_and_set_provider_and_model(self._span, last_value)
376
+ result_for_metadata = self._yielded_values[-1] if self._yielded_values else None
509
377
 
510
378
  # Record summary of yielded values
511
379
  output_data = {
@@ -522,10 +390,13 @@ class TracedAsyncGenerator:
522
390
  if len(self._yielded_values) > sample_size:
523
391
  output_data["truncated"] = True
524
392
 
525
- output_data = _prepare_and_filter_output(output_data, self._filter_output, self._ignore_output)
526
- if output_data is not None:
527
- self._span.set_attribute("output", serialize_for_span(output_data))
528
- self._span.set_status(Status(StatusCode.OK))
393
+ _finalize_span_success_common(
394
+ self._span,
395
+ result_for_metadata,
396
+ output_data,
397
+ self._filter_output,
398
+ self._ignore_output,
399
+ )
529
400
 
530
401
 
531
402
  def WithTracing(
@@ -560,20 +431,6 @@ def WithTracing(
560
431
  def my_function(x, y):
561
432
  return x + y
562
433
 
563
- @WithTracing
564
- async def my_async_function(x, y):
565
- return x + y
566
-
567
- @WithTracing
568
- def my_generator(n):
569
- for i in range(n):
570
- yield i * 2
571
-
572
- @WithTracing
573
- async def my_async_generator(n):
574
- for i in range(n):
575
- yield i * 2
576
-
577
434
  @WithTracing(name="custom_name")
578
435
  def another_function():
579
436
  pass
@@ -585,7 +442,7 @@ def WithTracing(
585
442
  if hasattr(fn, "_is_traced"):
586
443
  logger.warning(f"Function {fn_name} is already traced, skipping tracing again")
587
444
  return fn
588
-
445
+ logger.info(f"WithTracing function {fn_name}")
589
446
  is_async = inspect.iscoroutinefunction(fn)
590
447
  is_generator = inspect.isgeneratorfunction(fn)
591
448
  is_async_generator = inspect.isasyncgenfunction(fn) if hasattr(inspect, 'isasyncgenfunction') else False
@@ -594,7 +451,12 @@ def WithTracing(
594
451
  # This ensures initialization only happens when tracing is actually used
595
452
 
596
453
  def _setup_span(span: trace.Span, input_data: Any) -> bool:
597
- """Setup span with input data. Returns True if span is recording."""
454
+ """
455
+ Setup span with input data. Returns True if span is recording.
456
+
457
+ Serializes input immediately to capture its state at function start,
458
+ preventing mutations from affecting the trace.
459
+ """
598
460
  if not span.is_recording():
599
461
  logger.warning(f"Span {fn_name} is not recording - will not be exported")
600
462
  return False
@@ -607,6 +469,8 @@ def WithTracing(
607
469
  span.set_attribute("gen_ai.component.id", component_tag)
608
470
 
609
471
  if input_data is not None:
472
+ # Serialize input immediately to capture state at function start
473
+ # input_data has already been copied in _prepare_and_filter_input
610
474
  span.set_attribute("input", serialize_for_span(input_data))
611
475
 
612
476
  trace_id = format(span.get_span_context().trace_id, "032x")
@@ -615,30 +479,28 @@ def WithTracing(
615
479
 
616
480
  def _finalize_span_success(span: trace.Span, result: Any) -> None:
617
481
  """Set output and success status on span."""
618
- # Extract and set token usage if present (before filtering output)
619
- _extract_and_set_token_usage(span, result)
620
- # Extract and set provider/model if present (before filtering output)
621
- _extract_and_set_provider_and_model(span, result)
622
-
623
- output_data = _prepare_and_filter_output(result, filter_output, ignore_output)
624
- if output_data is not None:
625
- span.set_attribute("output", serialize_for_span(output_data))
626
- span.set_status(Status(StatusCode.OK))
482
+ _finalize_span_success_common(
483
+ span,
484
+ result,
485
+ result,
486
+ filter_output,
487
+ ignore_output,
488
+ )
627
489
 
628
490
  def _execute_with_span_sync(executor: Callable[[], Any], input_data: Any) -> Any:
629
- """Execute sync function within span context, handling input/output and exceptions."""
491
+ """Execute sync function within span context, handling input/output and exceptions.
492
+ Note: input_data has already gone through _prepare_and_filter_input
493
+ """
630
494
  # Ensure tracer provider is initialized before creating spans
631
495
  # This is called lazily when the function runs, not at decorator definition time
632
496
  client = get_aiqa_client()
633
497
  if not client.enabled:
634
498
  return executor()
635
-
636
499
  # Get tracer after initialization (lazy)
637
500
  tracer = get_aiqa_tracer()
638
501
  with tracer.start_as_current_span(fn_name) as span:
639
502
  if not _setup_span(span, input_data):
640
- return executor()
641
-
503
+ return executor() # span is not recording, so just execute the function and return the result
642
504
  try:
643
505
  result = executor()
644
506
  _finalize_span_success(span, result)
@@ -688,7 +550,7 @@ def WithTracing(
688
550
 
689
551
  try:
690
552
  if not _setup_span(span, input_data):
691
- generator = executor()
553
+ generator = executor() # span is not recording, so just execute the function and return the result
692
554
  trace.context_api.detach(token)
693
555
  span.end()
694
556
  return generator
@@ -1168,8 +1030,8 @@ def get_span(span_id: str, organisation_id: Optional[str] = None, exclude: Optio
1168
1030
  import os
1169
1031
  import requests
1170
1032
 
1171
- server_url = os.getenv("AIQA_SERVER_URL", "").rstrip("/")
1172
- api_key = os.getenv("AIQA_API_KEY", "")
1033
+ server_url = get_server_url()
1034
+ api_key = get_api_key()
1173
1035
  org_id = organisation_id or os.getenv("AIQA_ORGANISATION_ID", "")
1174
1036
 
1175
1037
  if not server_url:
@@ -1190,9 +1052,7 @@ def get_span(span_id: str, organisation_id: Optional[str] = None, exclude: Optio
1190
1052
  "fields": "*" if not exclude else None,
1191
1053
  }
1192
1054
 
1193
- headers = {"Content-Type": "application/json"}
1194
- if api_key:
1195
- headers["Authorization"] = f"ApiKey {api_key}"
1055
+ headers = build_headers(api_key)
1196
1056
 
1197
1057
  response = requests.get(url, params=params, headers=headers)
1198
1058
  if response.status_code == 200: