azure-functions-durable 1.3.2__py3-none-any.whl → 1.4.0rc2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- azure/durable_functions/__init__.py +8 -0
- azure/durable_functions/decorators/durable_app.py +64 -1
- azure/durable_functions/models/DurableOrchestrationContext.py +24 -0
- azure/durable_functions/openai_agents/__init__.py +13 -0
- azure/durable_functions/openai_agents/context.py +194 -0
- azure/durable_functions/openai_agents/event_loop.py +17 -0
- azure/durable_functions/openai_agents/exceptions.py +11 -0
- azure/durable_functions/openai_agents/handoffs.py +67 -0
- azure/durable_functions/openai_agents/model_invocation_activity.py +268 -0
- azure/durable_functions/openai_agents/orchestrator_generator.py +67 -0
- azure/durable_functions/openai_agents/runner.py +103 -0
- azure/durable_functions/openai_agents/task_tracker.py +171 -0
- azure/durable_functions/openai_agents/tools.py +148 -0
- azure/durable_functions/openai_agents/usage_telemetry.py +69 -0
- {azure_functions_durable-1.3.2.dist-info → azure_functions_durable-1.4.0rc2.dist-info}/METADATA +7 -2
- {azure_functions_durable-1.3.2.dist-info → azure_functions_durable-1.4.0rc2.dist-info}/RECORD +26 -9
- tests/models/test_DurableOrchestrationContext.py +8 -0
- tests/openai_agents/__init__.py +0 -0
- tests/openai_agents/test_context.py +466 -0
- tests/openai_agents/test_task_tracker.py +290 -0
- tests/openai_agents/test_usage_telemetry.py +99 -0
- tests/orchestrator/openai_agents/__init__.py +0 -0
- tests/orchestrator/openai_agents/test_openai_agents.py +316 -0
- {azure_functions_durable-1.3.2.dist-info → azure_functions_durable-1.4.0rc2.dist-info}/LICENSE +0 -0
- {azure_functions_durable-1.3.2.dist-info → azure_functions_durable-1.4.0rc2.dist-info}/WHEEL +0 -0
- {azure_functions_durable-1.3.2.dist-info → azure_functions_durable-1.4.0rc2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,290 @@
|
|
|
1
|
+
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
2
|
+
# Licensed under the MIT License.
|
|
3
|
+
import pytest
|
|
4
|
+
import json
|
|
5
|
+
from unittest.mock import Mock
|
|
6
|
+
|
|
7
|
+
from azure.durable_functions.openai_agents.task_tracker import TaskTracker
|
|
8
|
+
from azure.durable_functions.openai_agents.exceptions import YieldException
|
|
9
|
+
from azure.durable_functions.models.DurableOrchestrationContext import DurableOrchestrationContext
|
|
10
|
+
from azure.durable_functions.models.history.HistoryEvent import HistoryEvent
|
|
11
|
+
from azure.durable_functions.models.history.HistoryEventType import HistoryEventType
|
|
12
|
+
from azure.durable_functions.models.RetryOptions import RetryOptions
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class MockTask:
|
|
16
|
+
"""Mock Task object for testing."""
|
|
17
|
+
|
|
18
|
+
def __init__(self, activity_name: str, input_data: str):
|
|
19
|
+
self.activity_name = activity_name
|
|
20
|
+
self.input = input_data
|
|
21
|
+
self.id = f"task_{activity_name}"
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def create_mock_context(task_completed_results=None):
|
|
25
|
+
"""Create a mock DurableOrchestrationContext with configurable history.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
----
|
|
29
|
+
task_completed_results: List of objects to be serialized as JSON results.
|
|
30
|
+
Each object will be json.dumps() serialized automatically.
|
|
31
|
+
"""
|
|
32
|
+
context = Mock(spec=DurableOrchestrationContext)
|
|
33
|
+
|
|
34
|
+
# Create history events for completed tasks
|
|
35
|
+
histories = []
|
|
36
|
+
if task_completed_results:
|
|
37
|
+
for i, result_object in enumerate(task_completed_results):
|
|
38
|
+
history_event = Mock(spec=HistoryEvent)
|
|
39
|
+
history_event.event_type = HistoryEventType.TASK_COMPLETED
|
|
40
|
+
history_event.Result = json.dumps(result_object)
|
|
41
|
+
histories.append(history_event)
|
|
42
|
+
|
|
43
|
+
context.histories = histories
|
|
44
|
+
|
|
45
|
+
# Mock call_activity method
|
|
46
|
+
def mock_call_activity(activity_name, input_data):
|
|
47
|
+
return MockTask(activity_name, input_data)
|
|
48
|
+
|
|
49
|
+
context.call_activity = Mock(side_effect=mock_call_activity)
|
|
50
|
+
|
|
51
|
+
# Mock call_activity_with_retry method
|
|
52
|
+
def mock_call_activity_with_retry(activity_name, retry_options, input_data):
|
|
53
|
+
return MockTask(activity_name, input_data)
|
|
54
|
+
|
|
55
|
+
context.call_activity_with_retry = Mock(side_effect=mock_call_activity_with_retry)
|
|
56
|
+
|
|
57
|
+
return context
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class TestTaskTracker:
|
|
61
|
+
"""Tests for the TaskTracker implementation."""
|
|
62
|
+
|
|
63
|
+
def _consume_generator_with_return_value(self, generator):
|
|
64
|
+
"""Consume a generator and capture both yielded items and return value.
|
|
65
|
+
|
|
66
|
+
Returns
|
|
67
|
+
-------
|
|
68
|
+
tuple
|
|
69
|
+
(yielded_items, return_value) where return_value is None if no return value
|
|
70
|
+
"""
|
|
71
|
+
yielded_items = []
|
|
72
|
+
return_value = None
|
|
73
|
+
try:
|
|
74
|
+
while True:
|
|
75
|
+
yielded_items.append(next(generator))
|
|
76
|
+
except StopIteration as e:
|
|
77
|
+
return_value = e.value
|
|
78
|
+
return yielded_items, return_value
|
|
79
|
+
|
|
80
|
+
def test_get_activity_call_result_returns_result_when_history_available(self):
|
|
81
|
+
"""Test get_activity_call_result returns result when history is available."""
|
|
82
|
+
context = create_mock_context(task_completed_results=["test_result"])
|
|
83
|
+
tracker = TaskTracker(context)
|
|
84
|
+
|
|
85
|
+
result = tracker.get_activity_call_result("test_activity", "test_input")
|
|
86
|
+
assert result == "test_result"
|
|
87
|
+
|
|
88
|
+
def test_get_activity_call_result_raises_yield_exception_when_no_history(self):
|
|
89
|
+
"""Test get_activity_call_result raises YieldException when no history."""
|
|
90
|
+
context = create_mock_context(task_completed_results=[])
|
|
91
|
+
tracker = TaskTracker(context)
|
|
92
|
+
|
|
93
|
+
with pytest.raises(YieldException) as exc_info:
|
|
94
|
+
tracker.get_activity_call_result("test_activity", "test_input")
|
|
95
|
+
|
|
96
|
+
task = exc_info.value.task
|
|
97
|
+
assert task.activity_name == "test_activity"
|
|
98
|
+
assert task.input == "test_input"
|
|
99
|
+
|
|
100
|
+
def test_get_activity_call_result_with_retry_returns_result_when_history_available(self):
|
|
101
|
+
"""Test get_activity_call_result_with_retry returns result when history is available."""
|
|
102
|
+
context = create_mock_context(task_completed_results=["result"])
|
|
103
|
+
tracker = TaskTracker(context)
|
|
104
|
+
retry_options = RetryOptions(1000, 3)
|
|
105
|
+
|
|
106
|
+
result = tracker.get_activity_call_result_with_retry("activity", retry_options, "input")
|
|
107
|
+
assert result == "result"
|
|
108
|
+
|
|
109
|
+
def test_get_activity_call_result_with_retry_raises_yield_exception_when_no_history(self):
|
|
110
|
+
"""Test get_activity_call_result_with_retry raises YieldException when no history."""
|
|
111
|
+
context = create_mock_context(task_completed_results=[])
|
|
112
|
+
tracker = TaskTracker(context)
|
|
113
|
+
retry_options = RetryOptions(1000, 3)
|
|
114
|
+
|
|
115
|
+
with pytest.raises(YieldException) as exc_info:
|
|
116
|
+
tracker.get_activity_call_result_with_retry("activity", retry_options, "input")
|
|
117
|
+
|
|
118
|
+
task = exc_info.value.task
|
|
119
|
+
assert task.activity_name == "activity"
|
|
120
|
+
assert task.input == "input"
|
|
121
|
+
|
|
122
|
+
def test_multiple_activity_calls_with_partial_history(self):
|
|
123
|
+
"""Test sequential activity calls with partial history available."""
|
|
124
|
+
context = create_mock_context(task_completed_results=["result1", "result2"])
|
|
125
|
+
tracker = TaskTracker(context)
|
|
126
|
+
|
|
127
|
+
# First call returns result1
|
|
128
|
+
result1 = tracker.get_activity_call_result("activity1", "input1")
|
|
129
|
+
assert result1 == "result1"
|
|
130
|
+
|
|
131
|
+
# Second call returns result2
|
|
132
|
+
result2 = tracker.get_activity_call_result("activity2", "input2")
|
|
133
|
+
assert result2 == "result2"
|
|
134
|
+
|
|
135
|
+
# Third call raises YieldException (no more history)
|
|
136
|
+
with pytest.raises(YieldException):
|
|
137
|
+
tracker.get_activity_call_result("activity3", "input3")
|
|
138
|
+
|
|
139
|
+
def test_execute_orchestrator_function_return_value(self):
|
|
140
|
+
"""Test execute_orchestrator_function with orchestrator function that returns a value."""
|
|
141
|
+
context = create_mock_context()
|
|
142
|
+
tracker = TaskTracker(context)
|
|
143
|
+
|
|
144
|
+
expected_result = "orchestrator_result"
|
|
145
|
+
|
|
146
|
+
def test_orchestrator():
|
|
147
|
+
return expected_result
|
|
148
|
+
|
|
149
|
+
result_gen = tracker.execute_orchestrator_function(test_orchestrator)
|
|
150
|
+
yielded_items, return_value = self._consume_generator_with_return_value(result_gen)
|
|
151
|
+
|
|
152
|
+
# Should yield nothing and return the value
|
|
153
|
+
assert yielded_items == []
|
|
154
|
+
assert return_value == expected_result
|
|
155
|
+
|
|
156
|
+
def test_execute_orchestrator_function_get_activity_call_result_incomplete(self):
|
|
157
|
+
"""Test execute_orchestrator_function with orchestrator function that tries to get an activity result before this activity call completes (not a replay)."""
|
|
158
|
+
context = create_mock_context() # No history available
|
|
159
|
+
tracker = TaskTracker(context)
|
|
160
|
+
|
|
161
|
+
def test_orchestrator():
|
|
162
|
+
return tracker.get_activity_call_result("activity", "test_input")
|
|
163
|
+
|
|
164
|
+
result_gen = tracker.execute_orchestrator_function(test_orchestrator)
|
|
165
|
+
yielded_items, return_value = self._consume_generator_with_return_value(result_gen)
|
|
166
|
+
|
|
167
|
+
# Should yield a task with this activity name
|
|
168
|
+
assert yielded_items[0].activity_name == "activity"
|
|
169
|
+
assert len(yielded_items) == 1
|
|
170
|
+
assert return_value is None
|
|
171
|
+
|
|
172
|
+
def test_execute_orchestrator_function_get_complete_activity_result(self):
|
|
173
|
+
"""Test execute_orchestrator_function with orchestrator function that gets a complete activity call result (replay)."""
|
|
174
|
+
context = create_mock_context(task_completed_results=["activity_result"])
|
|
175
|
+
tracker = TaskTracker(context)
|
|
176
|
+
|
|
177
|
+
def test_orchestrator():
|
|
178
|
+
return tracker.get_activity_call_result("activity", "test_input")
|
|
179
|
+
|
|
180
|
+
result_gen = tracker.execute_orchestrator_function(test_orchestrator)
|
|
181
|
+
yielded_items, return_value = self._consume_generator_with_return_value(result_gen)
|
|
182
|
+
|
|
183
|
+
# Should yield the queued task and return the result
|
|
184
|
+
assert yielded_items[0].activity_name == "activity"
|
|
185
|
+
assert len(yielded_items) == 1
|
|
186
|
+
assert return_value == "activity_result"
|
|
187
|
+
|
|
188
|
+
def test_execute_orchestrator_function_yields_tasks(self):
|
|
189
|
+
"""Test execute_orchestrator_function with orchestrator function that yields tasks."""
|
|
190
|
+
context = create_mock_context()
|
|
191
|
+
tracker = TaskTracker(context)
|
|
192
|
+
|
|
193
|
+
def test_orchestrator():
|
|
194
|
+
yield "task_1"
|
|
195
|
+
yield "task_2"
|
|
196
|
+
return "final_result"
|
|
197
|
+
|
|
198
|
+
result_gen = tracker.execute_orchestrator_function(test_orchestrator)
|
|
199
|
+
yielded_items, return_value = self._consume_generator_with_return_value(result_gen)
|
|
200
|
+
|
|
201
|
+
# Should yield the tasks in order and return the final result
|
|
202
|
+
assert yielded_items[0] == "task_1"
|
|
203
|
+
assert yielded_items[1] == "task_2"
|
|
204
|
+
assert len(yielded_items) == 2
|
|
205
|
+
assert return_value == "final_result"
|
|
206
|
+
|
|
207
|
+
def test_execute_orchestrator_function_context_activity_call_incomplete(self):
|
|
208
|
+
"""Test execute_orchestrator_function with orchestrator function that tries to get an activity result before this activity call completes (not a replay) after a DurableAIAgentContext.call_activity invocation."""
|
|
209
|
+
context = create_mock_context(task_completed_results=["result1"])
|
|
210
|
+
tracker = TaskTracker(context)
|
|
211
|
+
|
|
212
|
+
def test_orchestrator():
|
|
213
|
+
# Simulate invoking DurableAIAgentContext.call_activity and yielding the resulting task
|
|
214
|
+
tracker.record_activity_call()
|
|
215
|
+
yield "task" # Produced "result1"
|
|
216
|
+
|
|
217
|
+
return tracker.get_activity_call_result("activity", "input") # Incomplete, should raise YieldException that will be translated to yield
|
|
218
|
+
|
|
219
|
+
result_gen = tracker.execute_orchestrator_function(test_orchestrator)
|
|
220
|
+
yielded_items, return_value = self._consume_generator_with_return_value(result_gen)
|
|
221
|
+
|
|
222
|
+
# Should yield the incomplete task
|
|
223
|
+
assert yielded_items[0] == "task"
|
|
224
|
+
assert yielded_items[1].activity_name == "activity"
|
|
225
|
+
assert len(yielded_items) == 2
|
|
226
|
+
assert return_value == None
|
|
227
|
+
|
|
228
|
+
def test_execute_orchestrator_function_context_activity_call_complete(self):
|
|
229
|
+
"""Test execute_orchestrator_function with orchestrator function that gets a complete activity call result (replay) after a DurableAIAgentContext.call_activity invocation."""
|
|
230
|
+
context = create_mock_context(task_completed_results=["result1", "result2"])
|
|
231
|
+
tracker = TaskTracker(context)
|
|
232
|
+
|
|
233
|
+
def test_orchestrator():
|
|
234
|
+
# Simulate invoking DurableAIAgentContext.call_activity and yielding the resulting task
|
|
235
|
+
tracker.record_activity_call()
|
|
236
|
+
yield "task" # Produced "result1"
|
|
237
|
+
|
|
238
|
+
return tracker.get_activity_call_result("activity", "input") # Complete, should return "result2"
|
|
239
|
+
|
|
240
|
+
result_gen = tracker.execute_orchestrator_function(test_orchestrator)
|
|
241
|
+
yielded_items, return_value = self._consume_generator_with_return_value(result_gen)
|
|
242
|
+
|
|
243
|
+
# Should yield the queued task and return the result
|
|
244
|
+
assert yielded_items[0] == "task"
|
|
245
|
+
assert yielded_items[1].activity_name == "activity"
|
|
246
|
+
assert len(yielded_items) == 2
|
|
247
|
+
assert return_value == "result2"
|
|
248
|
+
|
|
249
|
+
def test_execute_orchestrator_function_mixed_behaviors_combination(self):
|
|
250
|
+
"""Test execute_orchestrator_function mixing all documented behaviors."""
|
|
251
|
+
context = create_mock_context(task_completed_results=[
|
|
252
|
+
"result1",
|
|
253
|
+
"result2",
|
|
254
|
+
"result3",
|
|
255
|
+
"result4"
|
|
256
|
+
])
|
|
257
|
+
tracker = TaskTracker(context)
|
|
258
|
+
|
|
259
|
+
def test_orchestrator():
|
|
260
|
+
activity1_result = tracker.get_activity_call_result("activity1", "input1")
|
|
261
|
+
|
|
262
|
+
# Simulate invoking DurableAIAgentContext.call_activity("activity2") and yielding the resulting task
|
|
263
|
+
tracker.record_activity_call()
|
|
264
|
+
yield "yielded task from activity2" # Produced "result2"
|
|
265
|
+
|
|
266
|
+
# Yield a regular task, possibly returned from DurableAIAgentContext methods like wait_for_external_event, etc.
|
|
267
|
+
yield "another yielded task"
|
|
268
|
+
|
|
269
|
+
activity3_result = tracker.get_activity_call_result("activity3", "input3")
|
|
270
|
+
|
|
271
|
+
# Simulate invoking DurableAIAgentContext.call_activity("activity4") and yielding the resulting task
|
|
272
|
+
tracker.record_activity_call()
|
|
273
|
+
yield "yielded task from activity4" # Produced "result4"
|
|
274
|
+
|
|
275
|
+
return f"activity1={activity1_result};activity3={activity3_result}"
|
|
276
|
+
|
|
277
|
+
result_gen = tracker.execute_orchestrator_function(test_orchestrator)
|
|
278
|
+
yielded_items, return_value = self._consume_generator_with_return_value(result_gen)
|
|
279
|
+
|
|
280
|
+
# Verify yield order
|
|
281
|
+
assert yielded_items[0].activity_name == "activity1"
|
|
282
|
+
assert yielded_items[1] == "yielded task from activity2"
|
|
283
|
+
assert yielded_items[2] == "another yielded task"
|
|
284
|
+
assert yielded_items[3].activity_name == "activity3"
|
|
285
|
+
assert yielded_items[4] == "yielded task from activity4"
|
|
286
|
+
assert len(yielded_items) == 5
|
|
287
|
+
|
|
288
|
+
# Verify return value
|
|
289
|
+
expected_return = "activity1=result1;activity3=result3"
|
|
290
|
+
assert return_value == expected_return
|
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
2
|
+
# Licensed under the MIT License.
|
|
3
|
+
import unittest.mock
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class TestUsageTelemetry:
|
|
7
|
+
"""Test cases for the UsageTelemetry class."""
|
|
8
|
+
|
|
9
|
+
def test_log_usage_once_logs_message_on_first_call(self, capsys):
|
|
10
|
+
"""Test that log_usage_once logs the telemetry message."""
|
|
11
|
+
# Reset any previous state by creating a fresh import
|
|
12
|
+
import importlib
|
|
13
|
+
from azure.durable_functions.openai_agents import usage_telemetry
|
|
14
|
+
importlib.reload(usage_telemetry)
|
|
15
|
+
UsageTelemetryFresh = usage_telemetry.UsageTelemetry
|
|
16
|
+
|
|
17
|
+
def mock_version(package_name):
|
|
18
|
+
if package_name == "azure-functions-durable":
|
|
19
|
+
return "1.3.4"
|
|
20
|
+
elif package_name == "openai":
|
|
21
|
+
return "1.98.0"
|
|
22
|
+
elif package_name == "openai-agents":
|
|
23
|
+
return "0.2.5"
|
|
24
|
+
return "unknown"
|
|
25
|
+
|
|
26
|
+
with unittest.mock.patch('importlib.metadata.version', side_effect=mock_version):
|
|
27
|
+
UsageTelemetryFresh.log_usage_once()
|
|
28
|
+
|
|
29
|
+
captured = capsys.readouterr()
|
|
30
|
+
assert captured.out.startswith("LanguageWorkerConsoleLog")
|
|
31
|
+
assert "Detected OpenAI Agents SDK integration with Durable Functions." in captured.out
|
|
32
|
+
assert "azure-functions-durable=1.3.4" in captured.out
|
|
33
|
+
assert "openai=1.98.0" in captured.out
|
|
34
|
+
assert "openai-agents=0.2.5" in captured.out
|
|
35
|
+
|
|
36
|
+
def test_log_usage_handles_package_version_errors(self, capsys):
|
|
37
|
+
"""Test that log_usage_once handles package version lookup errors gracefully."""
|
|
38
|
+
# Reset any previous state by creating a fresh import
|
|
39
|
+
import importlib
|
|
40
|
+
from azure.durable_functions.openai_agents import usage_telemetry
|
|
41
|
+
importlib.reload(usage_telemetry)
|
|
42
|
+
UsageTelemetryFresh = usage_telemetry.UsageTelemetry
|
|
43
|
+
|
|
44
|
+
# Test with mixed success/failure scenario: some packages work, others fail
|
|
45
|
+
def mock_version(package_name):
|
|
46
|
+
if package_name == "azure-functions-durable":
|
|
47
|
+
return "1.3.4"
|
|
48
|
+
elif package_name == "openai":
|
|
49
|
+
raise Exception("Package not found")
|
|
50
|
+
elif package_name == "openai-agents":
|
|
51
|
+
return "0.2.5"
|
|
52
|
+
return "unknown"
|
|
53
|
+
|
|
54
|
+
with unittest.mock.patch('importlib.metadata.version', side_effect=mock_version):
|
|
55
|
+
UsageTelemetryFresh.log_usage_once()
|
|
56
|
+
|
|
57
|
+
captured = capsys.readouterr()
|
|
58
|
+
assert captured.out.startswith("LanguageWorkerConsoleLog")
|
|
59
|
+
assert "Detected OpenAI Agents SDK integration with Durable Functions." in captured.out
|
|
60
|
+
# Should handle errors gracefully: successful packages show versions, failed ones show "(not installed)"
|
|
61
|
+
assert "azure-functions-durable=1.3.4" in captured.out
|
|
62
|
+
assert "openai=(not installed)" in captured.out
|
|
63
|
+
assert "openai-agents=0.2.5" in captured.out
|
|
64
|
+
|
|
65
|
+
def test_log_usage_works_with_real_packages(self, capsys):
|
|
66
|
+
"""Test that log_usage_once works with real package versions."""
|
|
67
|
+
# Reset any previous state by creating a fresh import
|
|
68
|
+
import importlib
|
|
69
|
+
from azure.durable_functions.openai_agents import usage_telemetry
|
|
70
|
+
importlib.reload(usage_telemetry)
|
|
71
|
+
UsageTelemetryFresh = usage_telemetry.UsageTelemetry
|
|
72
|
+
|
|
73
|
+
# Test without mocking to see the real behavior
|
|
74
|
+
UsageTelemetryFresh.log_usage_once()
|
|
75
|
+
|
|
76
|
+
captured = capsys.readouterr()
|
|
77
|
+
assert captured.out.startswith("LanguageWorkerConsoleLog")
|
|
78
|
+
assert "Detected OpenAI Agents SDK integration with Durable Functions." in captured.out
|
|
79
|
+
# Should contain some version information or (unavailable)
|
|
80
|
+
assert ("azure-functions-durable=" in captured.out or "(unavailable)" in captured.out)
|
|
81
|
+
|
|
82
|
+
def test_log_usage_once_is_idempotent(self, capsys):
|
|
83
|
+
"""Test that multiple calls to log_usage_once only log once."""
|
|
84
|
+
# Reset any previous state by creating a fresh import
|
|
85
|
+
import importlib
|
|
86
|
+
from azure.durable_functions.openai_agents import usage_telemetry
|
|
87
|
+
importlib.reload(usage_telemetry)
|
|
88
|
+
UsageTelemetryFresh = usage_telemetry.UsageTelemetry
|
|
89
|
+
|
|
90
|
+
with unittest.mock.patch('importlib.metadata.version', return_value="1.0.0"):
|
|
91
|
+
# Call multiple times
|
|
92
|
+
UsageTelemetryFresh.log_usage_once()
|
|
93
|
+
UsageTelemetryFresh.log_usage_once()
|
|
94
|
+
UsageTelemetryFresh.log_usage_once()
|
|
95
|
+
|
|
96
|
+
captured = capsys.readouterr()
|
|
97
|
+
# Should only see one log message despite multiple calls
|
|
98
|
+
log_count = captured.out.count("LanguageWorkerConsoleLogDetected OpenAI Agents SDK integration")
|
|
99
|
+
assert log_count == 1
|
|
File without changes
|