aiqa-client 0.4.3__py3-none-any.whl → 0.4.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aiqa/aiqa_exporter.py +192 -99
- aiqa/client.py +13 -10
- aiqa/constants.py +3 -1
- aiqa/experiment_runner.py +12 -29
- aiqa/http_utils.py +69 -0
- aiqa/object_serialiser.py +136 -115
- aiqa/tracing.py +113 -253
- aiqa/tracing_llm_utils.py +191 -0
- {aiqa_client-0.4.3.dist-info → aiqa_client-0.4.7.dist-info}/METADATA +1 -1
- aiqa_client-0.4.7.dist-info/RECORD +15 -0
- aiqa/test_experiment_runner.py +0 -176
- aiqa/test_startup_reliability.py +0 -249
- aiqa/test_tracing.py +0 -230
- aiqa_client-0.4.3.dist-info/RECORD +0 -16
- {aiqa_client-0.4.3.dist-info → aiqa_client-0.4.7.dist-info}/WHEEL +0 -0
- {aiqa_client-0.4.3.dist-info → aiqa_client-0.4.7.dist-info}/licenses/LICENSE.txt +0 -0
- {aiqa_client-0.4.3.dist-info → aiqa_client-0.4.7.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,191 @@
|
|
|
1
|
+
# Functions for extracting and setting LLM-specific attributes on a span.
|
|
2
|
+
import logging
|
|
3
|
+
from .constants import LOG_TAG
|
|
4
|
+
from opentelemetry import trace
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
logger = logging.getLogger(LOG_TAG)
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def _is_attribute_set(span: trace.Span, attribute_name: str) -> bool:
|
|
11
|
+
"""
|
|
12
|
+
Check if an attribute is already set on a span.
|
|
13
|
+
Returns True if the attribute exists, False otherwise.
|
|
14
|
+
Safe against exceptions.
|
|
15
|
+
"""
|
|
16
|
+
try:
|
|
17
|
+
# Try multiple ways to access span attributes (SDK spans may store them differently)
|
|
18
|
+
# Check public 'attributes' property
|
|
19
|
+
if hasattr(span, "attributes"):
|
|
20
|
+
attrs = span.attributes
|
|
21
|
+
if attrs and attribute_name in attrs:
|
|
22
|
+
return True
|
|
23
|
+
|
|
24
|
+
# Check private '_attributes' (common in OpenTelemetry SDK)
|
|
25
|
+
if hasattr(span, "_attributes"):
|
|
26
|
+
attrs = span._attributes
|
|
27
|
+
if attrs and attribute_name in attrs:
|
|
28
|
+
return True
|
|
29
|
+
|
|
30
|
+
# If we can't find the attribute, assume not set (conservative approach)
|
|
31
|
+
return False
|
|
32
|
+
except Exception:
|
|
33
|
+
# If anything goes wrong, assume not set (conservative approach)
|
|
34
|
+
return False
|
|
35
|
+
|
|
36
|
+
def _extract_and_set_token_usage(span: trace.Span, result: Any) -> None:
|
|
37
|
+
"""
|
|
38
|
+
Extract OpenAI API style token usage from result and add to span attributes
|
|
39
|
+
using OpenTelemetry semantic conventions for gen_ai.
|
|
40
|
+
|
|
41
|
+
Looks for usage dict with prompt_tokens, completion_tokens, and total_tokens.
|
|
42
|
+
Sets gen_ai.usage.input_tokens, gen_ai.usage.output_tokens, and gen_ai.usage.total_tokens.
|
|
43
|
+
Only sets attributes that are not already set.
|
|
44
|
+
|
|
45
|
+
This function detects token usage from OpenAI API response patterns:
|
|
46
|
+
- OpenAI Chat Completions API: The 'usage' object contains 'prompt_tokens', 'completion_tokens', and 'total_tokens'.
|
|
47
|
+
See https://platform.openai.com/docs/api-reference/chat/object (usage field)
|
|
48
|
+
- OpenAI Completions API: The 'usage' object contains 'prompt_tokens', 'completion_tokens', and 'total_tokens'.
|
|
49
|
+
See https://platform.openai.com/docs/api-reference/completions/object (usage field)
|
|
50
|
+
|
|
51
|
+
This function is safe against exceptions and will not derail tracing or program execution.
|
|
52
|
+
"""
|
|
53
|
+
try:
|
|
54
|
+
if not span.is_recording():
|
|
55
|
+
return
|
|
56
|
+
|
|
57
|
+
usage = None
|
|
58
|
+
|
|
59
|
+
# Check if result is a dict with 'usage' key
|
|
60
|
+
try:
|
|
61
|
+
if isinstance(result, dict):
|
|
62
|
+
usage = result.get("usage")
|
|
63
|
+
# Also check if result itself is a usage dict (OpenAI format)
|
|
64
|
+
if usage is None and all(key in result for key in ("prompt_tokens", "completion_tokens", "total_tokens")):
|
|
65
|
+
usage = result
|
|
66
|
+
# Also check if result itself is a usage dict (Bedrock format)
|
|
67
|
+
elif usage is None and all(key in result for key in ("input_tokens", "output_tokens")):
|
|
68
|
+
usage = result
|
|
69
|
+
|
|
70
|
+
# Check if result has a 'usage' attribute (e.g., OpenAI response object)
|
|
71
|
+
elif hasattr(result, "usage"):
|
|
72
|
+
usage = result.usage
|
|
73
|
+
except Exception:
|
|
74
|
+
# If accessing result properties fails, just return silently
|
|
75
|
+
return
|
|
76
|
+
|
|
77
|
+
# Extract token usage if found
|
|
78
|
+
if isinstance(usage, dict):
|
|
79
|
+
try:
|
|
80
|
+
# Support both OpenAI format (prompt_tokens/completion_tokens) and Bedrock format (input_tokens/output_tokens)
|
|
81
|
+
prompt_tokens = usage.get("prompt_tokens") or usage.get("PromptTokens")
|
|
82
|
+
completion_tokens = usage.get("completion_tokens") or usage.get("CompletionTokens")
|
|
83
|
+
input_tokens = usage.get("input_tokens") or usage.get("InputTokens")
|
|
84
|
+
output_tokens = usage.get("output_tokens") or usage.get("OutputTokens")
|
|
85
|
+
total_tokens = usage.get("total_tokens") or usage.get("TotalTokens")
|
|
86
|
+
|
|
87
|
+
# Use Bedrock format if OpenAI format not available
|
|
88
|
+
if prompt_tokens is None:
|
|
89
|
+
prompt_tokens = input_tokens
|
|
90
|
+
if completion_tokens is None:
|
|
91
|
+
completion_tokens = output_tokens
|
|
92
|
+
|
|
93
|
+
# Calculate total_tokens if not provided but we have input and output
|
|
94
|
+
if total_tokens is None and prompt_tokens is not None and completion_tokens is not None:
|
|
95
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
96
|
+
|
|
97
|
+
# Only set attributes that are not already set
|
|
98
|
+
if prompt_tokens is not None and not _is_attribute_set(span, "gen_ai.usage.input_tokens"):
|
|
99
|
+
span.set_attribute("gen_ai.usage.input_tokens", prompt_tokens)
|
|
100
|
+
if completion_tokens is not None and not _is_attribute_set(span, "gen_ai.usage.output_tokens"):
|
|
101
|
+
span.set_attribute("gen_ai.usage.output_tokens", completion_tokens)
|
|
102
|
+
if total_tokens is not None and not _is_attribute_set(span, "gen_ai.usage.total_tokens"):
|
|
103
|
+
span.set_attribute("gen_ai.usage.total_tokens", total_tokens)
|
|
104
|
+
except Exception:
|
|
105
|
+
# If setting attributes fails, log but don't raise
|
|
106
|
+
logger.debug(f"Failed to set token usage attributes on span")
|
|
107
|
+
except Exception:
|
|
108
|
+
# Catch any other exceptions to ensure this never derails tracing
|
|
109
|
+
logger.debug(f"Error in _extract_and_set_token_usage")
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
def _extract_and_set_provider_and_model(span: trace.Span, result: Any) -> None:
|
|
113
|
+
"""
|
|
114
|
+
Extract provider and model information from result and add to span attributes
|
|
115
|
+
using OpenTelemetry semantic conventions for gen_ai.
|
|
116
|
+
|
|
117
|
+
Looks for 'model', 'provider', 'provider_name' fields in the result.
|
|
118
|
+
Sets gen_ai.provider.name and gen_ai.request.model.
|
|
119
|
+
Only sets attributes that are not already set.
|
|
120
|
+
|
|
121
|
+
This function detects model information from common API response patterns:
|
|
122
|
+
- OpenAI Chat Completions API: The 'model' field is at the top level of the response.
|
|
123
|
+
See https://platform.openai.com/docs/api-reference/chat/object
|
|
124
|
+
- OpenAI Completions API: The 'model' field is at the top level of the response.
|
|
125
|
+
See https://platform.openai.com/docs/api-reference/completions/object
|
|
126
|
+
|
|
127
|
+
This function is safe against exceptions and will not derail tracing or program execution.
|
|
128
|
+
"""
|
|
129
|
+
try:
|
|
130
|
+
if not span.is_recording():
|
|
131
|
+
return
|
|
132
|
+
|
|
133
|
+
model = None
|
|
134
|
+
provider = None
|
|
135
|
+
|
|
136
|
+
# Check if result is a dict
|
|
137
|
+
try:
|
|
138
|
+
if isinstance(result, dict):
|
|
139
|
+
model = result.get("model") or result.get("Model")
|
|
140
|
+
provider = result.get("provider") or result.get("Provider") or result.get("provider_name") or result.get("providerName")
|
|
141
|
+
|
|
142
|
+
# Check if result has attributes (e.g., OpenAI response object)
|
|
143
|
+
elif hasattr(result, "model"):
|
|
144
|
+
model = result.model
|
|
145
|
+
if hasattr(result, "provider"):
|
|
146
|
+
provider = result.provider
|
|
147
|
+
elif hasattr(result, "provider_name"):
|
|
148
|
+
provider = result.provider_name
|
|
149
|
+
elif hasattr(result, "providerName"):
|
|
150
|
+
provider = result.providerName
|
|
151
|
+
|
|
152
|
+
# Check nested structures (e.g., response.data.model)
|
|
153
|
+
if model is None and hasattr(result, "data"):
|
|
154
|
+
data = result.data
|
|
155
|
+
if isinstance(data, dict):
|
|
156
|
+
model = data.get("model") or data.get("Model")
|
|
157
|
+
elif hasattr(data, "model"):
|
|
158
|
+
model = data.model
|
|
159
|
+
|
|
160
|
+
# Check for model in choices (OpenAI pattern)
|
|
161
|
+
if model is None and isinstance(result, dict):
|
|
162
|
+
choices = result.get("choices")
|
|
163
|
+
if choices and isinstance(choices, list) and len(choices) > 0:
|
|
164
|
+
first_choice = choices[0]
|
|
165
|
+
if isinstance(first_choice, dict):
|
|
166
|
+
model = first_choice.get("model")
|
|
167
|
+
elif hasattr(first_choice, "model"):
|
|
168
|
+
model = first_choice.model
|
|
169
|
+
except Exception:
|
|
170
|
+
# If accessing result properties fails, just return silently
|
|
171
|
+
return
|
|
172
|
+
|
|
173
|
+
# Set attributes if found and not already set
|
|
174
|
+
try:
|
|
175
|
+
if model is not None and not _is_attribute_set(span, "gen_ai.request.model"):
|
|
176
|
+
# Convert to string if needed
|
|
177
|
+
model_str = str(model) if model is not None else None
|
|
178
|
+
if model_str:
|
|
179
|
+
span.set_attribute("gen_ai.request.model", model_str)
|
|
180
|
+
|
|
181
|
+
if provider is not None and not _is_attribute_set(span, "gen_ai.provider.name"):
|
|
182
|
+
# Convert to string if needed
|
|
183
|
+
provider_str = str(provider) if provider is not None else None
|
|
184
|
+
if provider_str:
|
|
185
|
+
span.set_attribute("gen_ai.provider.name", provider_str)
|
|
186
|
+
except Exception:
|
|
187
|
+
# If setting attributes fails, log but don't raise
|
|
188
|
+
logger.debug(f"Failed to set provider/model attributes on span")
|
|
189
|
+
except Exception:
|
|
190
|
+
# Catch any other exceptions to ensure this never derails tracing
|
|
191
|
+
logger.debug(f"Error in _extract_and_set_provider_and_model")
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
aiqa/__init__.py,sha256=8MQBrnisjeYNrwrbTheUafEWS09GtIF7ff0fBZ1Jb24,1710
|
|
2
|
+
aiqa/aiqa_exporter.py,sha256=PAEwnrqTiII_OY1q6bskPob7rKGoYOYaE7ismU1pIv4,38630
|
|
3
|
+
aiqa/client.py,sha256=lcENe5LlyfH8v312ElcX_HtVuOoyIMzzJnmeKrbjXYw,10063
|
|
4
|
+
aiqa/constants.py,sha256=rUI3WuY1fKB_Isaok4C9vYer2XZYEgAVxAIe13pJi14,226
|
|
5
|
+
aiqa/experiment_runner.py,sha256=XAZsjVP70UH_QTk5ANSOQYAhmozuGXwKB5qWWHs-zeE,11186
|
|
6
|
+
aiqa/http_utils.py,sha256=m4fu3NI9CSAfdz4yz3S-nYLoAOmUhDGas4ZcpuMLog8,2241
|
|
7
|
+
aiqa/object_serialiser.py,sha256=DBv7EyXIwfwjwXHDsIwdZNFmQffRb5fKAE0r8qhoqgc,16958
|
|
8
|
+
aiqa/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
9
|
+
aiqa/tracing.py,sha256=1DLiQ-HHRgVV-mLIdkzeBeWD0bLsNCV4kh1yRlurvps,44080
|
|
10
|
+
aiqa/tracing_llm_utils.py,sha256=rNx6v6Wh_Mhv-_DPU9_aWS7YQcO46oiv0YPdBK1KVL8,9338
|
|
11
|
+
aiqa_client-0.4.7.dist-info/licenses/LICENSE.txt,sha256=kIzkzLuzG0HHaWYm4F4W5FeJ1Yxut3Ec6bhLWyw798A,1062
|
|
12
|
+
aiqa_client-0.4.7.dist-info/METADATA,sha256=a8uGQEoyu0rmY4N2UcS7FyTC6sIyGSb8Qn_-cxJq8Yc,7705
|
|
13
|
+
aiqa_client-0.4.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
14
|
+
aiqa_client-0.4.7.dist-info/top_level.txt,sha256=nwcsuVVSuWu27iLxZd4n1evVzv1W6FVTrSnCXCc-NQs,5
|
|
15
|
+
aiqa_client-0.4.7.dist-info/RECORD,,
|
aiqa/test_experiment_runner.py
DELETED
|
@@ -1,176 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Example usage of the ExperimentRunner class.
|
|
3
|
-
"""
|
|
4
|
-
|
|
5
|
-
import asyncio
|
|
6
|
-
import os
|
|
7
|
-
from dotenv import load_dotenv
|
|
8
|
-
from aiqa import ExperimentRunner
|
|
9
|
-
|
|
10
|
-
# Load environment variables
|
|
11
|
-
load_dotenv()
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
# A dummy test engine that returns a dummy response
|
|
15
|
-
async def my_engine(input_data):
|
|
16
|
-
"""
|
|
17
|
-
Example engine function that simulates an API call.
|
|
18
|
-
Note: For run(), the engine only takes input_data.
|
|
19
|
-
For run_example(), you can use an engine that takes (input_data, parameters).
|
|
20
|
-
"""
|
|
21
|
-
# Imitate an OpenAI API response
|
|
22
|
-
# Sleep for random about 0.5 - 1 seconds
|
|
23
|
-
import random
|
|
24
|
-
|
|
25
|
-
sleep_time = random.random() * 0.5 + 0.5
|
|
26
|
-
await asyncio.sleep(sleep_time)
|
|
27
|
-
return {
|
|
28
|
-
"choices": [
|
|
29
|
-
{
|
|
30
|
-
"message": {
|
|
31
|
-
"content": f"hello {input_data}",
|
|
32
|
-
},
|
|
33
|
-
},
|
|
34
|
-
],
|
|
35
|
-
}
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
async def scorer(output, example):
|
|
39
|
-
"""
|
|
40
|
-
Example scorer function that scores the output.
|
|
41
|
-
In a real scenario, you would use the metrics from the dataset.
|
|
42
|
-
Note: For run(), the scorer only takes (output, example).
|
|
43
|
-
For run_example(), you can use a scorer that takes (output, example, parameters).
|
|
44
|
-
"""
|
|
45
|
-
# This is a simple example - in practice, you'd use the metrics from the dataset
|
|
46
|
-
# and call the scoring functions accordingly
|
|
47
|
-
scores = {}
|
|
48
|
-
# Add your scoring logic here
|
|
49
|
-
return scores
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
async def example_basic_usage():
|
|
53
|
-
"""
|
|
54
|
-
Basic example of using ExperimentRunner.
|
|
55
|
-
"""
|
|
56
|
-
if not os.getenv("AIQA_API_KEY"):
|
|
57
|
-
print("Warning: AIQA_API_KEY environment variable is not set. Example may fail.")
|
|
58
|
-
|
|
59
|
-
dataset_id = "your-dataset-id-here"
|
|
60
|
-
organisation_id = "your-organisation-id-here"
|
|
61
|
-
|
|
62
|
-
experiment_runner = ExperimentRunner(
|
|
63
|
-
dataset_id=dataset_id,
|
|
64
|
-
organisation_id=organisation_id,
|
|
65
|
-
)
|
|
66
|
-
|
|
67
|
-
# Get metrics from the dataset
|
|
68
|
-
dataset = experiment_runner.get_dataset()
|
|
69
|
-
metrics = dataset.get("metrics", [])
|
|
70
|
-
print(f"Found {len(metrics)} metrics in dataset: {[m['name'] for m in metrics]}")
|
|
71
|
-
|
|
72
|
-
# Create scorer that scores all metrics from the dataset
|
|
73
|
-
# (In practice, you'd implement this based on your metrics)
|
|
74
|
-
async def dataset_scorer(output, example):
|
|
75
|
-
# Use the metrics from the dataset to score
|
|
76
|
-
# This is a placeholder - implement based on your actual metrics
|
|
77
|
-
return await scorer(output, example)
|
|
78
|
-
|
|
79
|
-
# Get example inputs
|
|
80
|
-
example_inputs = experiment_runner.get_example_inputs()
|
|
81
|
-
print(f"Processing {len(example_inputs)} examples")
|
|
82
|
-
|
|
83
|
-
# Run experiments on each example
|
|
84
|
-
for example in example_inputs:
|
|
85
|
-
result = await experiment_runner.run_example(example, my_engine, dataset_scorer)
|
|
86
|
-
if result and len(result) > 0:
|
|
87
|
-
print(f"Scored example {example['id']}: {result}")
|
|
88
|
-
else:
|
|
89
|
-
print(f"No results for example {example['id']}")
|
|
90
|
-
|
|
91
|
-
# Get summary results
|
|
92
|
-
summary_results = experiment_runner.get_summary_results()
|
|
93
|
-
print(f"Summary results: {summary_results}")
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
async def example_with_experiment_setup():
|
|
97
|
-
"""
|
|
98
|
-
Example of creating an experiment with custom setup.
|
|
99
|
-
"""
|
|
100
|
-
dataset_id = "your-dataset-id-here"
|
|
101
|
-
organisation_id = "your-organisation-id-here"
|
|
102
|
-
|
|
103
|
-
experiment_runner = ExperimentRunner(
|
|
104
|
-
dataset_id=dataset_id,
|
|
105
|
-
organisation_id=organisation_id,
|
|
106
|
-
)
|
|
107
|
-
|
|
108
|
-
# Create experiment with custom parameters
|
|
109
|
-
experiment = experiment_runner.create_experiment(
|
|
110
|
-
{
|
|
111
|
-
"name": "My Custom Experiment",
|
|
112
|
-
"parameters": {
|
|
113
|
-
"model": "gpt-4",
|
|
114
|
-
"temperature": 0.7,
|
|
115
|
-
},
|
|
116
|
-
"comparison_parameters": [
|
|
117
|
-
{"temperature": 0.5},
|
|
118
|
-
{"temperature": 0.9},
|
|
119
|
-
],
|
|
120
|
-
}
|
|
121
|
-
)
|
|
122
|
-
|
|
123
|
-
print(f"Created experiment: {experiment['id']}")
|
|
124
|
-
|
|
125
|
-
# Now run the experiment
|
|
126
|
-
await experiment_runner.run(my_engine, scorer)
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
async def example_stepwise():
|
|
130
|
-
"""
|
|
131
|
-
Example of running experiments step by step (more control).
|
|
132
|
-
"""
|
|
133
|
-
dataset_id = "your-dataset-id-here"
|
|
134
|
-
organisation_id = "your-organisation-id-here"
|
|
135
|
-
|
|
136
|
-
experiment_runner = ExperimentRunner(
|
|
137
|
-
dataset_id=dataset_id,
|
|
138
|
-
organisation_id=organisation_id,
|
|
139
|
-
)
|
|
140
|
-
|
|
141
|
-
# Get the dataset
|
|
142
|
-
dataset = experiment_runner.get_dataset()
|
|
143
|
-
metrics = dataset.get("metrics", [])
|
|
144
|
-
print(f"Found {len(metrics)} metrics in dataset")
|
|
145
|
-
|
|
146
|
-
# Create scorer for run_example (takes parameters)
|
|
147
|
-
async def my_scorer(output, example, parameters):
|
|
148
|
-
# Implement your scoring logic here
|
|
149
|
-
# Note: run_example() passes parameters, so this scorer can use them
|
|
150
|
-
return {"score": 0.8} # Placeholder
|
|
151
|
-
|
|
152
|
-
# Get examples
|
|
153
|
-
examples = experiment_runner.get_example_inputs(limit=100)
|
|
154
|
-
print(f"Processing {len(examples)} examples")
|
|
155
|
-
|
|
156
|
-
# Process each example individually
|
|
157
|
-
for example in examples:
|
|
158
|
-
try:
|
|
159
|
-
result = await experiment_runner.run_example(example, my_engine, my_scorer)
|
|
160
|
-
print(f"Example {example['id']} completed: {result}")
|
|
161
|
-
except Exception as e:
|
|
162
|
-
print(f"Example {example['id']} failed: {e}")
|
|
163
|
-
|
|
164
|
-
# Get final summary
|
|
165
|
-
summary = experiment_runner.get_summary_results()
|
|
166
|
-
print(f"Final summary: {summary}")
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
if __name__ == "__main__":
|
|
170
|
-
# Uncomment the example you want to run:
|
|
171
|
-
# asyncio.run(example_basic_usage())
|
|
172
|
-
# asyncio.run(example_with_experiment_setup())
|
|
173
|
-
# asyncio.run(example_stepwise())
|
|
174
|
-
print("Please uncomment one of the examples above to run it.")
|
|
175
|
-
print("Make sure to set your dataset_id and organisation_id in the example functions.")
|
|
176
|
-
|
aiqa/test_startup_reliability.py
DELETED
|
@@ -1,249 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Test startup reliability - simulates ECS deployment scenarios where rapid initialization
|
|
3
|
-
and network issues could cause deployment failures.
|
|
4
|
-
|
|
5
|
-
These tests verify that:
|
|
6
|
-
1. Exporter initialization doesn't block or create threads immediately
|
|
7
|
-
2. Thread creation is lazy (only on first export)
|
|
8
|
-
3. Network failures during startup don't cause hangs
|
|
9
|
-
4. Multiple rapid initializations don't cause issues
|
|
10
|
-
"""
|
|
11
|
-
|
|
12
|
-
import os
|
|
13
|
-
import time
|
|
14
|
-
import threading
|
|
15
|
-
import pytest
|
|
16
|
-
from unittest.mock import patch, MagicMock
|
|
17
|
-
from opentelemetry.sdk.trace import TracerProvider
|
|
18
|
-
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
|
19
|
-
|
|
20
|
-
from aiqa.client import get_aiqa_client, AIQAClient
|
|
21
|
-
from aiqa.aiqa_exporter import AIQASpanExporter
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
class TestStartupReliability:
|
|
25
|
-
"""Tests for startup reliability in ECS-like scenarios."""
|
|
26
|
-
|
|
27
|
-
def test_exporter_initialization_does_not_create_thread_immediately(self):
|
|
28
|
-
"""Verify that creating an exporter doesn't immediately start a thread."""
|
|
29
|
-
with patch.dict(
|
|
30
|
-
os.environ,
|
|
31
|
-
{
|
|
32
|
-
"AIQA_SERVER_URL": "http://localhost:3000",
|
|
33
|
-
"AIQA_API_KEY": "test-api-key",
|
|
34
|
-
},
|
|
35
|
-
):
|
|
36
|
-
exporter = AIQASpanExporter(startup_delay_seconds=0.1)
|
|
37
|
-
|
|
38
|
-
# Thread should not be created immediately
|
|
39
|
-
assert exporter.flush_timer is None
|
|
40
|
-
assert not exporter._auto_flush_started
|
|
41
|
-
|
|
42
|
-
# Cleanup
|
|
43
|
-
exporter.shutdown()
|
|
44
|
-
|
|
45
|
-
def test_thread_created_lazily_on_first_export(self):
|
|
46
|
-
"""Verify thread is only created when first span is exported."""
|
|
47
|
-
with patch.dict(
|
|
48
|
-
os.environ,
|
|
49
|
-
{
|
|
50
|
-
"AIQA_SERVER_URL": "http://localhost:3000",
|
|
51
|
-
"AIQA_API_KEY": "test-api-key",
|
|
52
|
-
},
|
|
53
|
-
):
|
|
54
|
-
exporter = AIQASpanExporter(startup_delay_seconds=0.1)
|
|
55
|
-
|
|
56
|
-
# Thread should not exist yet
|
|
57
|
-
assert exporter.flush_timer is None
|
|
58
|
-
|
|
59
|
-
# Create a mock span and export it
|
|
60
|
-
from opentelemetry.sdk.trace import ReadableSpan
|
|
61
|
-
from opentelemetry.trace import SpanContext, TraceFlags
|
|
62
|
-
|
|
63
|
-
mock_span = MagicMock(spec=ReadableSpan)
|
|
64
|
-
mock_span.get_span_context.return_value = SpanContext(
|
|
65
|
-
trace_id=1, span_id=1, is_remote=False, trace_flags=TraceFlags(0x01)
|
|
66
|
-
)
|
|
67
|
-
mock_span.name = "test_span"
|
|
68
|
-
mock_span.kind = 1
|
|
69
|
-
mock_span.start_time = 1000000000
|
|
70
|
-
mock_span.end_time = 2000000000
|
|
71
|
-
mock_span.status.status_code = 1
|
|
72
|
-
mock_span.attributes = {}
|
|
73
|
-
mock_span.links = []
|
|
74
|
-
mock_span.events = []
|
|
75
|
-
mock_span.resource.attributes = {}
|
|
76
|
-
mock_span.parent = None
|
|
77
|
-
|
|
78
|
-
# Export should trigger thread creation
|
|
79
|
-
result = exporter.export([mock_span])
|
|
80
|
-
|
|
81
|
-
# Give thread a moment to start
|
|
82
|
-
time.sleep(0.2)
|
|
83
|
-
|
|
84
|
-
# Now thread should exist
|
|
85
|
-
assert exporter._auto_flush_started
|
|
86
|
-
assert exporter.flush_timer is not None
|
|
87
|
-
assert exporter.flush_timer.is_alive()
|
|
88
|
-
|
|
89
|
-
# Cleanup
|
|
90
|
-
exporter.shutdown()
|
|
91
|
-
if exporter.flush_timer:
|
|
92
|
-
exporter.flush_timer.join(timeout=2.0)
|
|
93
|
-
|
|
94
|
-
def test_rapid_multiple_initializations(self):
|
|
95
|
-
"""Test that multiple rapid initializations don't cause issues (simulates health checks)."""
|
|
96
|
-
with patch.dict(
|
|
97
|
-
os.environ,
|
|
98
|
-
{
|
|
99
|
-
"AIQA_SERVER_URL": "http://localhost:3000",
|
|
100
|
-
"AIQA_API_KEY": "test-api-key",
|
|
101
|
-
},
|
|
102
|
-
):
|
|
103
|
-
# Simulate rapid health check calls
|
|
104
|
-
clients = []
|
|
105
|
-
for _ in range(10):
|
|
106
|
-
client = get_aiqa_client()
|
|
107
|
-
clients.append(client)
|
|
108
|
-
time.sleep(0.01) # Very short delay
|
|
109
|
-
|
|
110
|
-
# All should be the same singleton
|
|
111
|
-
assert all(c is clients[0] for c in clients)
|
|
112
|
-
|
|
113
|
-
# Should not have created multiple threads
|
|
114
|
-
if clients[0].exporter:
|
|
115
|
-
assert clients[0].exporter._auto_flush_started or clients[0].exporter.flush_timer is None
|
|
116
|
-
|
|
117
|
-
def test_initialization_with_unreachable_server(self):
|
|
118
|
-
"""Test that initialization doesn't hang when server is unreachable."""
|
|
119
|
-
with patch.dict(
|
|
120
|
-
os.environ,
|
|
121
|
-
{
|
|
122
|
-
"AIQA_SERVER_URL": "http://unreachable-server:3000",
|
|
123
|
-
"AIQA_API_KEY": "test-api-key",
|
|
124
|
-
},
|
|
125
|
-
):
|
|
126
|
-
# Should not block or raise
|
|
127
|
-
client = get_aiqa_client()
|
|
128
|
-
assert client is not None
|
|
129
|
-
assert client._initialized
|
|
130
|
-
|
|
131
|
-
# Exporter should exist but thread shouldn't be started yet
|
|
132
|
-
if client.exporter:
|
|
133
|
-
# Thread creation is lazy, so it might not exist
|
|
134
|
-
assert client.exporter.flush_timer is None or not client.exporter._auto_flush_started
|
|
135
|
-
|
|
136
|
-
def test_startup_delay_respected(self):
|
|
137
|
-
"""Verify that startup delay prevents immediate flush attempts."""
|
|
138
|
-
with patch.dict(
|
|
139
|
-
os.environ,
|
|
140
|
-
{
|
|
141
|
-
"AIQA_SERVER_URL": "http://localhost:3000",
|
|
142
|
-
"AIQA_API_KEY": "test-api-key",
|
|
143
|
-
},
|
|
144
|
-
):
|
|
145
|
-
exporter = AIQASpanExporter(startup_delay_seconds=0.5)
|
|
146
|
-
|
|
147
|
-
# Create and export a span to trigger thread creation
|
|
148
|
-
from opentelemetry.sdk.trace import ReadableSpan
|
|
149
|
-
from opentelemetry.trace import SpanContext, TraceFlags
|
|
150
|
-
|
|
151
|
-
mock_span = MagicMock(spec=ReadableSpan)
|
|
152
|
-
mock_span.get_span_context.return_value = SpanContext(
|
|
153
|
-
trace_id=1, span_id=1, is_remote=False, trace_flags=TraceFlags(0x01)
|
|
154
|
-
)
|
|
155
|
-
mock_span.name = "test_span"
|
|
156
|
-
mock_span.kind = 1
|
|
157
|
-
mock_span.start_time = 1000000000
|
|
158
|
-
mock_span.end_time = 2000000000
|
|
159
|
-
mock_span.status.status_code = 1
|
|
160
|
-
mock_span.attributes = {}
|
|
161
|
-
mock_span.links = []
|
|
162
|
-
mock_span.events = []
|
|
163
|
-
mock_span.resource.attributes = {}
|
|
164
|
-
mock_span.parent = None
|
|
165
|
-
|
|
166
|
-
exporter.export([mock_span])
|
|
167
|
-
|
|
168
|
-
# Thread should be created
|
|
169
|
-
time.sleep(0.1)
|
|
170
|
-
assert exporter._auto_flush_started
|
|
171
|
-
|
|
172
|
-
# But flush should not have happened yet (within delay period)
|
|
173
|
-
# We can't easily test this without mocking time, but we verify thread exists
|
|
174
|
-
assert exporter.flush_timer is not None
|
|
175
|
-
|
|
176
|
-
# Cleanup
|
|
177
|
-
exporter.shutdown()
|
|
178
|
-
if exporter.flush_timer:
|
|
179
|
-
exporter.flush_timer.join(timeout=2.0)
|
|
180
|
-
|
|
181
|
-
def test_concurrent_initialization(self):
|
|
182
|
-
"""Test concurrent initialization from multiple threads (simulates ECS health checks)."""
|
|
183
|
-
with patch.dict(
|
|
184
|
-
os.environ,
|
|
185
|
-
{
|
|
186
|
-
"AIQA_SERVER_URL": "http://localhost:3000",
|
|
187
|
-
"AIQA_API_KEY": "test-api-key",
|
|
188
|
-
},
|
|
189
|
-
):
|
|
190
|
-
clients = []
|
|
191
|
-
errors = []
|
|
192
|
-
|
|
193
|
-
def init_client():
|
|
194
|
-
try:
|
|
195
|
-
client = get_aiqa_client()
|
|
196
|
-
clients.append(client)
|
|
197
|
-
except Exception as e:
|
|
198
|
-
errors.append(e)
|
|
199
|
-
|
|
200
|
-
# Start multiple threads initializing simultaneously
|
|
201
|
-
threads = [threading.Thread(target=init_client) for _ in range(5)]
|
|
202
|
-
for t in threads:
|
|
203
|
-
t.start()
|
|
204
|
-
for t in threads:
|
|
205
|
-
t.join(timeout=5.0)
|
|
206
|
-
|
|
207
|
-
# Should have no errors
|
|
208
|
-
assert len(errors) == 0
|
|
209
|
-
|
|
210
|
-
# All should be the same singleton
|
|
211
|
-
assert len(set(id(c) for c in clients)) == 1
|
|
212
|
-
|
|
213
|
-
def test_shutdown_before_thread_starts(self):
|
|
214
|
-
"""Test that shutdown works even if thread was never started."""
|
|
215
|
-
with patch.dict(
|
|
216
|
-
os.environ,
|
|
217
|
-
{
|
|
218
|
-
"AIQA_SERVER_URL": "http://localhost:3000",
|
|
219
|
-
"AIQA_API_KEY": "test-api-key",
|
|
220
|
-
},
|
|
221
|
-
):
|
|
222
|
-
exporter = AIQASpanExporter(startup_delay_seconds=1.0)
|
|
223
|
-
|
|
224
|
-
# Thread should not exist
|
|
225
|
-
assert exporter.flush_timer is None
|
|
226
|
-
|
|
227
|
-
# Shutdown should work without errors
|
|
228
|
-
exporter.shutdown()
|
|
229
|
-
|
|
230
|
-
# Should still be able to call shutdown again
|
|
231
|
-
exporter.shutdown()
|
|
232
|
-
|
|
233
|
-
def test_initialization_timeout(self):
|
|
234
|
-
"""Test that initialization completes quickly even with network issues."""
|
|
235
|
-
with patch.dict(
|
|
236
|
-
os.environ,
|
|
237
|
-
{
|
|
238
|
-
"AIQA_SERVER_URL": "http://localhost:3000",
|
|
239
|
-
"AIQA_API_KEY": "test-api-key",
|
|
240
|
-
},
|
|
241
|
-
):
|
|
242
|
-
start_time = time.time()
|
|
243
|
-
client = get_aiqa_client()
|
|
244
|
-
elapsed = time.time() - start_time
|
|
245
|
-
|
|
246
|
-
# Initialization should be fast (< 1 second)
|
|
247
|
-
assert elapsed < 1.0
|
|
248
|
-
assert client is not None
|
|
249
|
-
|