azure-ai-agentserver-agentframework 1.0.0b2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- azure/ai/agentserver/__init__.py +1 -0
- azure/ai/agentserver/agentframework/__init__.py +16 -0
- azure/ai/agentserver/agentframework/_version.py +9 -0
- azure/ai/agentserver/agentframework/agent_framework.py +153 -0
- azure/ai/agentserver/agentframework/models/__init__.py +5 -0
- azure/ai/agentserver/agentframework/models/agent_framework_input_converters.py +120 -0
- azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py +232 -0
- azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py +596 -0
- azure/ai/agentserver/agentframework/models/agent_id_generator.py +44 -0
- azure/ai/agentserver/agentframework/models/constants.py +13 -0
- azure/ai/agentserver/agentframework/py.typed +0 -0
- azure_ai_agentserver_agentframework-1.0.0b2.dist-info/METADATA +83 -0
- azure_ai_agentserver_agentframework-1.0.0b2.dist-info/RECORD +16 -0
- azure_ai_agentserver_agentframework-1.0.0b2.dist-info/WHEEL +5 -0
- azure_ai_agentserver_agentframework-1.0.0b2.dist-info/licenses/LICENSE +21 -0
- azure_ai_agentserver_agentframework-1.0.0b2.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
# ---------------------------------------------------------
|
|
2
|
+
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
|
+
# ---------------------------------------------------------
|
|
4
|
+
__path__ = __import__("pkgutil").extend_path(__path__, __name__)
|
|
5
|
+
|
|
6
|
+
from ._version import VERSION
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def from_agent_framework(agent):
|
|
10
|
+
from .agent_framework import AgentFrameworkCBAgent
|
|
11
|
+
|
|
12
|
+
return AgentFrameworkCBAgent(agent)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
__all__ = ["from_agent_framework"]
|
|
16
|
+
__version__ = VERSION
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
# coding=utf-8
|
|
2
|
+
# --------------------------------------------------------------------------
|
|
3
|
+
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
4
|
+
# Licensed under the MIT License. See License.txt in the project root for license information.
|
|
5
|
+
# Code generated by Microsoft (R) Python Code Generator.
|
|
6
|
+
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
|
7
|
+
# --------------------------------------------------------------------------
|
|
8
|
+
|
|
9
|
+
VERSION = "1.0.0b2"
|
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
# ---------------------------------------------------------
|
|
2
|
+
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
|
+
# ---------------------------------------------------------
|
|
4
|
+
# pylint: disable=logging-fstring-interpolation,no-name-in-module
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
7
|
+
import asyncio # pylint: disable=do-not-import-asyncio
|
|
8
|
+
import os
|
|
9
|
+
from typing import Any, AsyncGenerator, Union
|
|
10
|
+
|
|
11
|
+
from agent_framework import AgentProtocol
|
|
12
|
+
from agent_framework.azure import AzureAIAgentClient # pylint: disable=no-name-in-module
|
|
13
|
+
from opentelemetry import trace
|
|
14
|
+
|
|
15
|
+
from azure.ai.agentserver.core import AgentRunContext, FoundryCBAgent
|
|
16
|
+
from azure.ai.agentserver.core.constants import Constants as AdapterConstants
|
|
17
|
+
from azure.ai.agentserver.core.logger import APPINSIGHT_CONNSTR_ENV_NAME, get_logger
|
|
18
|
+
from azure.ai.agentserver.core.models import (
|
|
19
|
+
CreateResponse,
|
|
20
|
+
Response as OpenAIResponse,
|
|
21
|
+
ResponseStreamEvent,
|
|
22
|
+
)
|
|
23
|
+
from azure.ai.projects import AIProjectClient
|
|
24
|
+
from azure.identity import DefaultAzureCredential
|
|
25
|
+
|
|
26
|
+
from .models.agent_framework_input_converters import AgentFrameworkInputConverter
|
|
27
|
+
from .models.agent_framework_output_non_streaming_converter import (
|
|
28
|
+
AgentFrameworkOutputNonStreamingConverter,
|
|
29
|
+
)
|
|
30
|
+
from .models.agent_framework_output_streaming_converter import AgentFrameworkOutputStreamingConverter
|
|
31
|
+
from .models.constants import Constants
|
|
32
|
+
|
|
33
|
+
logger = get_logger()
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class AgentFrameworkCBAgent(FoundryCBAgent):
|
|
37
|
+
"""
|
|
38
|
+
Adapter class for integrating Agent Framework agents with the FoundryCB agent interface.
|
|
39
|
+
|
|
40
|
+
This class wraps an Agent Framework `AgentProtocol` instance and provides a unified interface
|
|
41
|
+
for running agents in both streaming and non-streaming modes. It handles input and output
|
|
42
|
+
conversion between the Agent Framework and the expected formats for FoundryCB agents.
|
|
43
|
+
|
|
44
|
+
Parameters:
|
|
45
|
+
agent (AgentProtocol): An instance of an Agent Framework agent to be adapted.
|
|
46
|
+
|
|
47
|
+
Usage:
|
|
48
|
+
- Instantiate with an Agent Framework agent.
|
|
49
|
+
- Call `agent_run` with a `CreateResponse` request body to execute the agent.
|
|
50
|
+
- Supports both streaming and non-streaming responses based on the `stream` flag.
|
|
51
|
+
"""
|
|
52
|
+
|
|
53
|
+
def __init__(self, agent: AgentProtocol):
|
|
54
|
+
super().__init__()
|
|
55
|
+
self.agent = agent
|
|
56
|
+
logger.info(f"Initialized AgentFrameworkCBAgent with agent: {type(agent).__name__}")
|
|
57
|
+
|
|
58
|
+
def _resolve_stream_timeout(self, request_body: CreateResponse) -> float:
|
|
59
|
+
"""Resolve idle timeout for streaming updates.
|
|
60
|
+
|
|
61
|
+
Order of precedence:
|
|
62
|
+
1) request_body.stream_timeout_s (if provided)
|
|
63
|
+
2) env var Constants.AGENTS_ADAPTER_STREAM_TIMEOUT_S
|
|
64
|
+
3) Constants.DEFAULT_STREAM_TIMEOUT_S
|
|
65
|
+
|
|
66
|
+
:param request_body: The CreateResponse request body.
|
|
67
|
+
:type request_body: CreateResponse
|
|
68
|
+
|
|
69
|
+
:return: The resolved stream timeout in seconds.
|
|
70
|
+
:rtype: float
|
|
71
|
+
"""
|
|
72
|
+
override = request_body.get("stream_timeout_s", None)
|
|
73
|
+
if override is not None:
|
|
74
|
+
return float(override)
|
|
75
|
+
env_val = os.getenv(Constants.AGENTS_ADAPTER_STREAM_TIMEOUT_S)
|
|
76
|
+
return float(env_val) if env_val is not None else float(Constants.DEFAULT_STREAM_TIMEOUT_S)
|
|
77
|
+
|
|
78
|
+
def init_tracing(self):
|
|
79
|
+
exporter = os.environ.get(AdapterConstants.OTEL_EXPORTER_ENDPOINT)
|
|
80
|
+
app_insights_conn_str = os.environ.get(APPINSIGHT_CONNSTR_ENV_NAME)
|
|
81
|
+
project_endpoint = os.environ.get(AdapterConstants.AZURE_AI_PROJECT_ENDPOINT)
|
|
82
|
+
|
|
83
|
+
if project_endpoint:
|
|
84
|
+
project_client = AIProjectClient(endpoint=project_endpoint, credential=DefaultAzureCredential())
|
|
85
|
+
agent_client = AzureAIAgentClient(project_client=project_client)
|
|
86
|
+
agent_client.setup_azure_ai_observability()
|
|
87
|
+
elif exporter or app_insights_conn_str:
|
|
88
|
+
os.environ["WORKFLOW_ENABLE_OTEL"] = "true"
|
|
89
|
+
from agent_framework.observability import setup_observability
|
|
90
|
+
|
|
91
|
+
setup_observability(
|
|
92
|
+
enable_sensitive_data=True,
|
|
93
|
+
otlp_endpoint=exporter,
|
|
94
|
+
applicationinsights_connection_string=app_insights_conn_str,
|
|
95
|
+
)
|
|
96
|
+
self.tracer = trace.get_tracer(__name__)
|
|
97
|
+
|
|
98
|
+
async def agent_run(
|
|
99
|
+
self, context: AgentRunContext
|
|
100
|
+
) -> Union[
|
|
101
|
+
OpenAIResponse,
|
|
102
|
+
AsyncGenerator[ResponseStreamEvent, Any],
|
|
103
|
+
]:
|
|
104
|
+
logger.info(f"Starting agent_run with stream={context.stream}")
|
|
105
|
+
request_input = context.request.get("input")
|
|
106
|
+
|
|
107
|
+
input_converter = AgentFrameworkInputConverter()
|
|
108
|
+
message = input_converter.transform_input(request_input)
|
|
109
|
+
logger.debug(f"Transformed input message type: {type(message)}")
|
|
110
|
+
|
|
111
|
+
# Use split converters
|
|
112
|
+
if context.stream:
|
|
113
|
+
logger.info("Running agent in streaming mode")
|
|
114
|
+
streaming_converter = AgentFrameworkOutputStreamingConverter(context)
|
|
115
|
+
|
|
116
|
+
async def stream_updates():
|
|
117
|
+
update_count = 0
|
|
118
|
+
timeout_s = self._resolve_stream_timeout(context.request)
|
|
119
|
+
logger.info("Starting streaming with idle-timeout=%.2fs", timeout_s)
|
|
120
|
+
for ev in streaming_converter.initial_events():
|
|
121
|
+
yield ev
|
|
122
|
+
|
|
123
|
+
# Iterate with per-update timeout; terminate if idle too long
|
|
124
|
+
aiter = self.agent.run_stream(message).__aiter__()
|
|
125
|
+
while True:
|
|
126
|
+
try:
|
|
127
|
+
update = await asyncio.wait_for(aiter.__anext__(), timeout=timeout_s)
|
|
128
|
+
except StopAsyncIteration:
|
|
129
|
+
logger.debug("Agent streaming iterator finished (StopAsyncIteration)")
|
|
130
|
+
break
|
|
131
|
+
except asyncio.TimeoutError:
|
|
132
|
+
logger.warning("Streaming idle timeout reached (%.1fs); terminating stream.", timeout_s)
|
|
133
|
+
for ev in streaming_converter.completion_events():
|
|
134
|
+
yield ev
|
|
135
|
+
return
|
|
136
|
+
update_count += 1
|
|
137
|
+
transformed = streaming_converter.transform_output_for_streaming(update)
|
|
138
|
+
for event in transformed:
|
|
139
|
+
yield event
|
|
140
|
+
for ev in streaming_converter.completion_events():
|
|
141
|
+
yield ev
|
|
142
|
+
logger.info("Streaming completed with %d updates", update_count)
|
|
143
|
+
|
|
144
|
+
return stream_updates()
|
|
145
|
+
|
|
146
|
+
# Non-streaming path
|
|
147
|
+
logger.info("Running agent in non-streaming mode")
|
|
148
|
+
non_streaming_converter = AgentFrameworkOutputNonStreamingConverter(context)
|
|
149
|
+
result = await self.agent.run(message)
|
|
150
|
+
logger.debug(f"Agent run completed, result type: {type(result)}")
|
|
151
|
+
transformed_result = non_streaming_converter.transform_output_for_response(result)
|
|
152
|
+
logger.info("Agent run and transformation completed successfully")
|
|
153
|
+
return transformed_result
|
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
# ---------------------------------------------------------
|
|
2
|
+
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
|
+
# ---------------------------------------------------------
|
|
4
|
+
# pylint: disable=too-many-nested-blocks,too-many-return-statements,too-many-branches
|
|
5
|
+
# mypy: disable-error-code="no-redef"
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
from typing import Dict, List
|
|
9
|
+
|
|
10
|
+
from agent_framework import ChatMessage, Role as ChatRole
|
|
11
|
+
from agent_framework._types import TextContent
|
|
12
|
+
|
|
13
|
+
from azure.ai.agentserver.core.logger import get_logger
|
|
14
|
+
|
|
15
|
+
logger = get_logger()
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class AgentFrameworkInputConverter:
|
|
19
|
+
"""Normalize inputs for agent.run.
|
|
20
|
+
|
|
21
|
+
Accepts: str | List | None
|
|
22
|
+
Returns: None | str | ChatMessage | list[str] | list[ChatMessage]
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
def transform_input(
|
|
26
|
+
self,
|
|
27
|
+
input: str | List[Dict] | None,
|
|
28
|
+
) -> str | ChatMessage | list[str] | list[ChatMessage] | None:
|
|
29
|
+
logger.debug("Transforming input of type: %s", type(input))
|
|
30
|
+
|
|
31
|
+
if input is None:
|
|
32
|
+
return None
|
|
33
|
+
|
|
34
|
+
if isinstance(input, str):
|
|
35
|
+
return input
|
|
36
|
+
|
|
37
|
+
try:
|
|
38
|
+
if isinstance(input, list):
|
|
39
|
+
messages: list[str | ChatMessage] = []
|
|
40
|
+
|
|
41
|
+
for item in input:
|
|
42
|
+
# Case 1: ImplicitUserMessage with content as str or list of ItemContentInputText
|
|
43
|
+
if self._is_implicit_user_message(item):
|
|
44
|
+
content = item.get("content", None)
|
|
45
|
+
if isinstance(content, str):
|
|
46
|
+
messages.append(content)
|
|
47
|
+
elif isinstance(content, list):
|
|
48
|
+
text_parts: list[str] = []
|
|
49
|
+
for content_item in content:
|
|
50
|
+
text_content = self._extract_input_text(content_item)
|
|
51
|
+
if text_content:
|
|
52
|
+
text_parts.append(text_content)
|
|
53
|
+
if text_parts:
|
|
54
|
+
messages.append(" ".join(text_parts))
|
|
55
|
+
|
|
56
|
+
# Case 2: Explicit message params (user/assistant/system)
|
|
57
|
+
elif (
|
|
58
|
+
item.get("type") == "message"
|
|
59
|
+
and item.get("role") is not None
|
|
60
|
+
and item.get("content") is not None
|
|
61
|
+
):
|
|
62
|
+
role_map = {
|
|
63
|
+
"user": ChatRole.USER,
|
|
64
|
+
"assistant": ChatRole.ASSISTANT,
|
|
65
|
+
"system": ChatRole.SYSTEM,
|
|
66
|
+
}
|
|
67
|
+
role = role_map.get(item.get("role", "user"), ChatRole.USER)
|
|
68
|
+
|
|
69
|
+
content_text = ""
|
|
70
|
+
item_content = item.get("content", None)
|
|
71
|
+
if item_content and isinstance(item_content, list):
|
|
72
|
+
text_parts: list[str] = []
|
|
73
|
+
for content_item in item_content:
|
|
74
|
+
item_text = self._extract_input_text(content_item)
|
|
75
|
+
if item_text:
|
|
76
|
+
text_parts.append(item_text)
|
|
77
|
+
content_text = " ".join(text_parts) if text_parts else ""
|
|
78
|
+
elif item_content and isinstance(item_content, str):
|
|
79
|
+
content_text = str(item_content)
|
|
80
|
+
|
|
81
|
+
if content_text:
|
|
82
|
+
messages.append(ChatMessage(role=role, text=content_text))
|
|
83
|
+
|
|
84
|
+
# Determine the most natural return type
|
|
85
|
+
if not messages:
|
|
86
|
+
return None
|
|
87
|
+
if len(messages) == 1:
|
|
88
|
+
return messages[0]
|
|
89
|
+
if all(isinstance(m, str) for m in messages):
|
|
90
|
+
return [m for m in messages if isinstance(m, str)]
|
|
91
|
+
if all(isinstance(m, ChatMessage) for m in messages):
|
|
92
|
+
return [m for m in messages if isinstance(m, ChatMessage)]
|
|
93
|
+
|
|
94
|
+
# Mixed content: coerce ChatMessage to str by extracting TextContent parts
|
|
95
|
+
result: list[str] = []
|
|
96
|
+
for msg in messages:
|
|
97
|
+
if isinstance(msg, ChatMessage):
|
|
98
|
+
text_parts: list[str] = []
|
|
99
|
+
for c in getattr(msg, "contents", []) or []:
|
|
100
|
+
if isinstance(c, TextContent):
|
|
101
|
+
text_parts.append(c.text)
|
|
102
|
+
result.append(" ".join(text_parts) if text_parts else str(msg))
|
|
103
|
+
else:
|
|
104
|
+
result.append(str(msg))
|
|
105
|
+
return result
|
|
106
|
+
|
|
107
|
+
raise TypeError(f"Unsupported input type: {type(input)}")
|
|
108
|
+
except Exception as e:
|
|
109
|
+
logger.error("Error processing messages: %s", e, exc_info=True)
|
|
110
|
+
raise Exception(f"Error processing messages: {e}") from e # pylint: disable=broad-exception-raised
|
|
111
|
+
|
|
112
|
+
def _is_implicit_user_message(self, item: Dict) -> bool:
|
|
113
|
+
return "content" in item and "role" not in item and "type" not in item
|
|
114
|
+
|
|
115
|
+
def _extract_input_text(self, content_item: Dict) -> str:
|
|
116
|
+
if content_item.get("type") == "input_text" and "text" in content_item:
|
|
117
|
+
text_content = content_item.get("text")
|
|
118
|
+
if isinstance(text_content, str):
|
|
119
|
+
return text_content
|
|
120
|
+
return None # type: ignore
|
|
@@ -0,0 +1,232 @@
|
|
|
1
|
+
# ---------------------------------------------------------
|
|
2
|
+
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
|
+
# ---------------------------------------------------------
|
|
4
|
+
from __future__ import annotations
|
|
5
|
+
|
|
6
|
+
import datetime
|
|
7
|
+
import json
|
|
8
|
+
from typing import Any, List
|
|
9
|
+
|
|
10
|
+
from agent_framework import AgentRunResponse, FunctionResultContent
|
|
11
|
+
from agent_framework._types import FunctionCallContent, TextContent
|
|
12
|
+
|
|
13
|
+
from azure.ai.agentserver.core import AgentRunContext
|
|
14
|
+
from azure.ai.agentserver.core.logger import get_logger
|
|
15
|
+
from azure.ai.agentserver.core.models import Response as OpenAIResponse
|
|
16
|
+
from azure.ai.agentserver.core.models.projects import (
|
|
17
|
+
ItemContentOutputText,
|
|
18
|
+
ResponsesAssistantMessageItemResource,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
from .agent_id_generator import AgentIdGenerator
|
|
22
|
+
from .constants import Constants
|
|
23
|
+
|
|
24
|
+
logger = get_logger()
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class AgentFrameworkOutputNonStreamingConverter: # pylint: disable=name-too-long
|
|
28
|
+
"""Non-streaming converter: AgentRunResponse -> OpenAIResponse."""
|
|
29
|
+
|
|
30
|
+
def __init__(self, context: AgentRunContext):
|
|
31
|
+
self._context = context
|
|
32
|
+
self._response_id = None
|
|
33
|
+
self._response_created_at = None
|
|
34
|
+
|
|
35
|
+
def _ensure_response_started(self) -> None:
|
|
36
|
+
if not self._response_id:
|
|
37
|
+
self._response_id = self._context.response_id # type: ignore
|
|
38
|
+
if not self._response_created_at:
|
|
39
|
+
self._response_created_at = int(datetime.datetime.now(datetime.timezone.utc).timestamp()) # type: ignore
|
|
40
|
+
|
|
41
|
+
def _build_item_content_output_text(self, text: str) -> ItemContentOutputText:
|
|
42
|
+
return ItemContentOutputText(text=text, annotations=[])
|
|
43
|
+
|
|
44
|
+
def _new_assistant_message_item(self, message_text: str) -> ResponsesAssistantMessageItemResource:
|
|
45
|
+
item_content = self._build_item_content_output_text(message_text)
|
|
46
|
+
return ResponsesAssistantMessageItemResource(
|
|
47
|
+
id=self._context.id_generator.generate_message_id(), status="completed", content=[item_content]
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
def transform_output_for_response(self, response: AgentRunResponse) -> OpenAIResponse:
|
|
51
|
+
"""Build an OpenAIResponse capturing all supported content types.
|
|
52
|
+
|
|
53
|
+
Previously this method only emitted text message items. We now also capture:
|
|
54
|
+
- FunctionCallContent -> function_call output item
|
|
55
|
+
- FunctionResultContent -> function_call_output item
|
|
56
|
+
|
|
57
|
+
to stay aligned with the streaming converter so no output is lost.
|
|
58
|
+
|
|
59
|
+
:param response: The AgentRunResponse from the agent framework.
|
|
60
|
+
:type response: AgentRunResponse
|
|
61
|
+
|
|
62
|
+
:return: The constructed OpenAIResponse.
|
|
63
|
+
:rtype: OpenAIResponse
|
|
64
|
+
"""
|
|
65
|
+
logger.debug("Transforming non-streaming response (messages=%d)", len(response.messages))
|
|
66
|
+
self._ensure_response_started()
|
|
67
|
+
|
|
68
|
+
completed_items: List[dict] = []
|
|
69
|
+
|
|
70
|
+
for i, message in enumerate(response.messages):
|
|
71
|
+
logger.debug("Non-streaming: processing message index=%d type=%s", i, type(message).__name__)
|
|
72
|
+
contents = getattr(message, "contents", None)
|
|
73
|
+
if not contents:
|
|
74
|
+
continue
|
|
75
|
+
for j, content in enumerate(contents):
|
|
76
|
+
logger.debug(" content index=%d in message=%d type=%s", j, i, type(content).__name__)
|
|
77
|
+
self._append_content_item(content, completed_items)
|
|
78
|
+
|
|
79
|
+
response_data = self._construct_response_data(completed_items)
|
|
80
|
+
openai_response = OpenAIResponse(response_data)
|
|
81
|
+
logger.info(
|
|
82
|
+
"OpenAIResponse built (id=%s, items=%d)",
|
|
83
|
+
self._response_id,
|
|
84
|
+
len(completed_items),
|
|
85
|
+
)
|
|
86
|
+
return openai_response
|
|
87
|
+
|
|
88
|
+
# ------------------------- helper append methods -------------------------
|
|
89
|
+
|
|
90
|
+
def _append_content_item(self, content: Any, sink: List[dict]) -> None:
|
|
91
|
+
"""Dispatch a content object to the appropriate append helper.
|
|
92
|
+
|
|
93
|
+
Adding this indirection keeps the main transform method compact and makes it
|
|
94
|
+
simpler to extend with new content types later.
|
|
95
|
+
|
|
96
|
+
:param content: The content object to append.
|
|
97
|
+
:type content: Any
|
|
98
|
+
:param sink: The list to append the converted content dict to.
|
|
99
|
+
:type sink: List[dict]
|
|
100
|
+
|
|
101
|
+
:return: None
|
|
102
|
+
:rtype: None
|
|
103
|
+
"""
|
|
104
|
+
if isinstance(content, TextContent):
|
|
105
|
+
self._append_text_content(content, sink)
|
|
106
|
+
elif isinstance(content, FunctionCallContent):
|
|
107
|
+
self._append_function_call_content(content, sink)
|
|
108
|
+
elif isinstance(content, FunctionResultContent):
|
|
109
|
+
self._append_function_result_content(content, sink)
|
|
110
|
+
else:
|
|
111
|
+
logger.debug("unsupported content type skipped: %s", type(content).__name__)
|
|
112
|
+
|
|
113
|
+
def _append_text_content(self, content: TextContent, sink: List[dict]) -> None:
|
|
114
|
+
text_value = getattr(content, "text", None)
|
|
115
|
+
if not text_value:
|
|
116
|
+
return
|
|
117
|
+
item_id = self._context.id_generator.generate_message_id()
|
|
118
|
+
sink.append(
|
|
119
|
+
{
|
|
120
|
+
"id": item_id,
|
|
121
|
+
"type": "message",
|
|
122
|
+
"status": "completed",
|
|
123
|
+
"role": "assistant",
|
|
124
|
+
"content": [
|
|
125
|
+
{
|
|
126
|
+
"type": "output_text",
|
|
127
|
+
"text": text_value,
|
|
128
|
+
"annotations": [],
|
|
129
|
+
"logprobs": [],
|
|
130
|
+
}
|
|
131
|
+
],
|
|
132
|
+
}
|
|
133
|
+
)
|
|
134
|
+
logger.debug(" added message item id=%s text_len=%d", item_id, len(text_value))
|
|
135
|
+
|
|
136
|
+
def _append_function_call_content(self, content: FunctionCallContent, sink: List[dict]) -> None:
|
|
137
|
+
name = getattr(content, "name", "") or ""
|
|
138
|
+
arguments = getattr(content, "arguments", "")
|
|
139
|
+
if not isinstance(arguments, str):
|
|
140
|
+
try:
|
|
141
|
+
arguments = json.dumps(arguments)
|
|
142
|
+
except Exception: # pragma: no cover - fallback # pylint: disable=broad-exception-caught
|
|
143
|
+
arguments = str(arguments)
|
|
144
|
+
call_id = getattr(content, "call_id", None) or self._context.id_generator.generate_function_call_id()
|
|
145
|
+
func_item_id = self._context.id_generator.generate_function_call_id()
|
|
146
|
+
sink.append(
|
|
147
|
+
{
|
|
148
|
+
"id": func_item_id,
|
|
149
|
+
"type": "function_call",
|
|
150
|
+
"status": "completed",
|
|
151
|
+
"call_id": call_id,
|
|
152
|
+
"name": name,
|
|
153
|
+
"arguments": arguments or "",
|
|
154
|
+
}
|
|
155
|
+
)
|
|
156
|
+
logger.debug(
|
|
157
|
+
" added function_call item id=%s call_id=%s name=%s args_len=%d",
|
|
158
|
+
func_item_id,
|
|
159
|
+
call_id,
|
|
160
|
+
name,
|
|
161
|
+
len(arguments or ""),
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
def _append_function_result_content(self, content: FunctionResultContent, sink: List[dict]) -> None:
|
|
165
|
+
# Coerce the function result into a simple display string.
|
|
166
|
+
result = []
|
|
167
|
+
raw = getattr(content, "result", None)
|
|
168
|
+
if isinstance(raw, str):
|
|
169
|
+
result = [raw]
|
|
170
|
+
elif isinstance(raw, list):
|
|
171
|
+
for item in raw:
|
|
172
|
+
result.append(self._coerce_result_text(item)) # type: ignore
|
|
173
|
+
call_id = getattr(content, "call_id", None) or ""
|
|
174
|
+
func_out_id = self._context.id_generator.generate_function_output_id()
|
|
175
|
+
sink.append(
|
|
176
|
+
{
|
|
177
|
+
"id": func_out_id,
|
|
178
|
+
"type": "function_call_output",
|
|
179
|
+
"status": "completed",
|
|
180
|
+
"call_id": call_id,
|
|
181
|
+
"output": json.dumps(result) if len(result) > 0 else "",
|
|
182
|
+
}
|
|
183
|
+
)
|
|
184
|
+
logger.debug(
|
|
185
|
+
"added function_call_output item id=%s call_id=%s output_len=%d",
|
|
186
|
+
func_out_id,
|
|
187
|
+
call_id,
|
|
188
|
+
len(result),
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
# ------------- simple normalization helper -------------------------
|
|
192
|
+
def _coerce_result_text(self, value: Any) -> str | dict:
|
|
193
|
+
"""
|
|
194
|
+
Return a string if value is already str or a TextContent-like object; else str(value).
|
|
195
|
+
|
|
196
|
+
:param value: The value to coerce.
|
|
197
|
+
:type value: Any
|
|
198
|
+
|
|
199
|
+
:return: The coerced string or dict.
|
|
200
|
+
:rtype: str | dict
|
|
201
|
+
"""
|
|
202
|
+
if value is None:
|
|
203
|
+
return ""
|
|
204
|
+
if isinstance(value, str):
|
|
205
|
+
return value
|
|
206
|
+
# Direct TextContent instance
|
|
207
|
+
if isinstance(value, TextContent):
|
|
208
|
+
content_payload = {"type": "text", "text": getattr(value, "text", "")}
|
|
209
|
+
return content_payload
|
|
210
|
+
|
|
211
|
+
return ""
|
|
212
|
+
|
|
213
|
+
def _construct_response_data(self, output_items: List[dict]) -> dict:
|
|
214
|
+
agent_id = AgentIdGenerator.generate(self._context)
|
|
215
|
+
|
|
216
|
+
response_data = {
|
|
217
|
+
"object": "response",
|
|
218
|
+
"metadata": {},
|
|
219
|
+
"agent": agent_id,
|
|
220
|
+
"conversation": self._context.get_conversation_object(),
|
|
221
|
+
"type": "message",
|
|
222
|
+
"role": "assistant",
|
|
223
|
+
"temperature": Constants.DEFAULT_TEMPERATURE,
|
|
224
|
+
"top_p": Constants.DEFAULT_TOP_P,
|
|
225
|
+
"user": "",
|
|
226
|
+
"id": self._context.response_id,
|
|
227
|
+
"created_at": self._response_created_at,
|
|
228
|
+
"output": output_items,
|
|
229
|
+
"parallel_tool_calls": True,
|
|
230
|
+
"status": "completed",
|
|
231
|
+
}
|
|
232
|
+
return response_data
|
|
@@ -0,0 +1,596 @@
|
|
|
1
|
+
# ---------------------------------------------------------
|
|
2
|
+
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
|
+
# ---------------------------------------------------------
|
|
4
|
+
# pylint: disable=attribute-defined-outside-init,protected-access
|
|
5
|
+
# mypy: disable-error-code="call-overload,assignment,arg-type"
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
import datetime
|
|
9
|
+
import json
|
|
10
|
+
import uuid
|
|
11
|
+
from typing import Any, List, Optional, cast
|
|
12
|
+
|
|
13
|
+
from agent_framework import AgentRunResponseUpdate, FunctionApprovalRequestContent, FunctionResultContent
|
|
14
|
+
from agent_framework._types import (
|
|
15
|
+
ErrorContent,
|
|
16
|
+
FunctionCallContent,
|
|
17
|
+
TextContent,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
from azure.ai.agentserver.core import AgentRunContext
|
|
21
|
+
from azure.ai.agentserver.core.logger import get_logger
|
|
22
|
+
from azure.ai.agentserver.core.models import (
|
|
23
|
+
Response as OpenAIResponse,
|
|
24
|
+
ResponseStreamEvent,
|
|
25
|
+
)
|
|
26
|
+
from azure.ai.agentserver.core.models.projects import (
|
|
27
|
+
FunctionToolCallItemResource,
|
|
28
|
+
FunctionToolCallOutputItemResource,
|
|
29
|
+
ItemContentOutputText,
|
|
30
|
+
ResponseCompletedEvent,
|
|
31
|
+
ResponseContentPartAddedEvent,
|
|
32
|
+
ResponseContentPartDoneEvent,
|
|
33
|
+
ResponseCreatedEvent,
|
|
34
|
+
ResponseErrorEvent,
|
|
35
|
+
ResponseFunctionCallArgumentsDeltaEvent,
|
|
36
|
+
ResponseFunctionCallArgumentsDoneEvent,
|
|
37
|
+
ResponseInProgressEvent,
|
|
38
|
+
ResponseOutputItemAddedEvent,
|
|
39
|
+
ResponseOutputItemDoneEvent,
|
|
40
|
+
ResponsesAssistantMessageItemResource,
|
|
41
|
+
ResponseTextDeltaEvent,
|
|
42
|
+
ResponseTextDoneEvent,
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
from .agent_id_generator import AgentIdGenerator
|
|
46
|
+
|
|
47
|
+
logger = get_logger()
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class _BaseStreamingState:
|
|
51
|
+
"""Base interface for streaming state handlers."""
|
|
52
|
+
|
|
53
|
+
def prework(self, ctx: Any) -> List[ResponseStreamEvent]: # pylint: disable=unused-argument
|
|
54
|
+
return []
|
|
55
|
+
|
|
56
|
+
def convert_content(self, ctx: Any, content) -> List[ResponseStreamEvent]: # pylint: disable=unused-argument
|
|
57
|
+
raise NotImplementedError
|
|
58
|
+
|
|
59
|
+
def afterwork(self, ctx: Any) -> List[ResponseStreamEvent]: # pylint: disable=unused-argument
|
|
60
|
+
return []
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class _TextContentStreamingState(_BaseStreamingState):
|
|
64
|
+
"""State handler for text and reasoning-text content during streaming."""
|
|
65
|
+
|
|
66
|
+
def __init__(self, context: AgentRunContext) -> None:
|
|
67
|
+
self.context = context
|
|
68
|
+
self.item_id = None
|
|
69
|
+
self.output_index = None
|
|
70
|
+
self.text_buffer = ""
|
|
71
|
+
self.text_part_started = False
|
|
72
|
+
|
|
73
|
+
def prework(self, ctx: Any) -> List[ResponseStreamEvent]:
|
|
74
|
+
events: List[ResponseStreamEvent] = []
|
|
75
|
+
if self.item_id is not None:
|
|
76
|
+
return events
|
|
77
|
+
|
|
78
|
+
# Start a new assistant message item (in_progress)
|
|
79
|
+
self.item_id = self.context.id_generator.generate_message_id()
|
|
80
|
+
self.output_index = ctx._next_output_index # pylint: disable=protected-access
|
|
81
|
+
ctx._next_output_index += 1
|
|
82
|
+
|
|
83
|
+
message_item = ResponsesAssistantMessageItemResource(
|
|
84
|
+
id=self.item_id,
|
|
85
|
+
status="in_progress",
|
|
86
|
+
content=[],
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
events.append(
|
|
90
|
+
ResponseOutputItemAddedEvent(
|
|
91
|
+
sequence_number=ctx.next_sequence(),
|
|
92
|
+
output_index=self.output_index,
|
|
93
|
+
item=message_item,
|
|
94
|
+
)
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
if not self.text_part_started:
|
|
98
|
+
empty_part = ItemContentOutputText(text="", annotations=[], logprobs=[])
|
|
99
|
+
events.append(
|
|
100
|
+
ResponseContentPartAddedEvent(
|
|
101
|
+
sequence_number=ctx.next_sequence(),
|
|
102
|
+
item_id=self.item_id,
|
|
103
|
+
output_index=self.output_index,
|
|
104
|
+
content_index=0,
|
|
105
|
+
part=empty_part,
|
|
106
|
+
)
|
|
107
|
+
)
|
|
108
|
+
self.text_part_started = True
|
|
109
|
+
return events
|
|
110
|
+
|
|
111
|
+
def convert_content(self, ctx: Any, content: TextContent) -> List[ResponseStreamEvent]:
|
|
112
|
+
events: List[ResponseStreamEvent] = []
|
|
113
|
+
if isinstance(content, TextContent):
|
|
114
|
+
delta = content.text or ""
|
|
115
|
+
else:
|
|
116
|
+
delta = getattr(content, "text", None) or getattr(content, "reasoning", "") or ""
|
|
117
|
+
|
|
118
|
+
# buffer accumulated text
|
|
119
|
+
self.text_buffer += delta
|
|
120
|
+
|
|
121
|
+
# emit delta event for text
|
|
122
|
+
assert self.item_id is not None, "Text state not initialized: missing item_id"
|
|
123
|
+
assert self.output_index is not None, "Text state not initialized: missing output_index"
|
|
124
|
+
events.append(
|
|
125
|
+
ResponseTextDeltaEvent(
|
|
126
|
+
sequence_number=ctx.next_sequence(),
|
|
127
|
+
item_id=self.item_id,
|
|
128
|
+
output_index=self.output_index,
|
|
129
|
+
content_index=0,
|
|
130
|
+
delta=delta,
|
|
131
|
+
)
|
|
132
|
+
)
|
|
133
|
+
return events
|
|
134
|
+
|
|
135
|
+
def afterwork(self, ctx: Any) -> List[ResponseStreamEvent]:
|
|
136
|
+
events: List[ResponseStreamEvent] = []
|
|
137
|
+
if not self.item_id:
|
|
138
|
+
return events
|
|
139
|
+
|
|
140
|
+
full_text = self.text_buffer
|
|
141
|
+
assert self.item_id is not None and self.output_index is not None
|
|
142
|
+
events.append(
|
|
143
|
+
ResponseTextDoneEvent(
|
|
144
|
+
sequence_number=ctx.next_sequence(),
|
|
145
|
+
item_id=self.item_id,
|
|
146
|
+
output_index=self.output_index,
|
|
147
|
+
content_index=0,
|
|
148
|
+
text=full_text,
|
|
149
|
+
)
|
|
150
|
+
)
|
|
151
|
+
final_part = ItemContentOutputText(text=full_text, annotations=[], logprobs=[])
|
|
152
|
+
events.append(
|
|
153
|
+
ResponseContentPartDoneEvent(
|
|
154
|
+
sequence_number=ctx.next_sequence(),
|
|
155
|
+
item_id=self.item_id,
|
|
156
|
+
output_index=self.output_index,
|
|
157
|
+
content_index=0,
|
|
158
|
+
part=final_part,
|
|
159
|
+
)
|
|
160
|
+
)
|
|
161
|
+
completed_item = ResponsesAssistantMessageItemResource(
|
|
162
|
+
id=self.item_id, status="completed", content=[final_part]
|
|
163
|
+
)
|
|
164
|
+
events.append(
|
|
165
|
+
ResponseOutputItemDoneEvent(
|
|
166
|
+
sequence_number=ctx.next_sequence(),
|
|
167
|
+
output_index=self.output_index,
|
|
168
|
+
item=completed_item,
|
|
169
|
+
)
|
|
170
|
+
)
|
|
171
|
+
ctx._last_completed_text = full_text # pylint: disable=protected-access
|
|
172
|
+
# store for final response
|
|
173
|
+
ctx._completed_output_items.append(
|
|
174
|
+
{
|
|
175
|
+
"id": self.item_id,
|
|
176
|
+
"type": "message",
|
|
177
|
+
"status": "completed",
|
|
178
|
+
"content": [
|
|
179
|
+
{
|
|
180
|
+
"type": "output_text",
|
|
181
|
+
"text": full_text,
|
|
182
|
+
"annotations": [],
|
|
183
|
+
"logprobs": [],
|
|
184
|
+
}
|
|
185
|
+
],
|
|
186
|
+
"role": "assistant",
|
|
187
|
+
}
|
|
188
|
+
)
|
|
189
|
+
# reset state
|
|
190
|
+
self.item_id = None
|
|
191
|
+
self.output_index = None
|
|
192
|
+
self.text_buffer = ""
|
|
193
|
+
self.text_part_started = False
|
|
194
|
+
return events
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
class _FunctionCallStreamingState(_BaseStreamingState):
|
|
198
|
+
"""State handler for function_call content during streaming."""
|
|
199
|
+
|
|
200
|
+
def __init__(self, context: AgentRunContext) -> None:
|
|
201
|
+
self.context = context
|
|
202
|
+
self.item_id = None
|
|
203
|
+
self.output_index = None
|
|
204
|
+
self.call_id = None
|
|
205
|
+
self.name = None
|
|
206
|
+
self.args_buffer = ""
|
|
207
|
+
self.requires_approval = False
|
|
208
|
+
self.approval_request_id: str | None = None
|
|
209
|
+
|
|
210
|
+
def prework(self, ctx: Any) -> List[ResponseStreamEvent]:
|
|
211
|
+
events: List[ResponseStreamEvent] = []
|
|
212
|
+
if self.item_id is not None:
|
|
213
|
+
return events
|
|
214
|
+
# initialize function-call item
|
|
215
|
+
self.item_id = self.context.id_generator.generate_function_call_id()
|
|
216
|
+
self.output_index = ctx._next_output_index
|
|
217
|
+
ctx._next_output_index += 1
|
|
218
|
+
|
|
219
|
+
self.call_id = self.call_id or str(uuid.uuid4())
|
|
220
|
+
function_item = FunctionToolCallItemResource(
|
|
221
|
+
id=self.item_id,
|
|
222
|
+
status="in_progress",
|
|
223
|
+
call_id=self.call_id,
|
|
224
|
+
name=self.name or "",
|
|
225
|
+
arguments="",
|
|
226
|
+
)
|
|
227
|
+
events.append(
|
|
228
|
+
ResponseOutputItemAddedEvent(
|
|
229
|
+
sequence_number=ctx.next_sequence(),
|
|
230
|
+
output_index=self.output_index,
|
|
231
|
+
item=function_item,
|
|
232
|
+
)
|
|
233
|
+
)
|
|
234
|
+
return events
|
|
235
|
+
|
|
236
|
+
def convert_content(self, ctx: Any, content: FunctionCallContent) -> List[ResponseStreamEvent]:
|
|
237
|
+
events: List[ResponseStreamEvent] = []
|
|
238
|
+
# record identifiers (once available)
|
|
239
|
+
self.name = getattr(content, "name", None) or self.name or ""
|
|
240
|
+
self.call_id = getattr(content, "call_id", None) or self.call_id or str(uuid.uuid4())
|
|
241
|
+
|
|
242
|
+
args_delta = content.arguments if isinstance(content.arguments, str) else json.dumps(content.arguments)
|
|
243
|
+
args_delta = args_delta or ""
|
|
244
|
+
self.args_buffer += args_delta
|
|
245
|
+
assert self.item_id is not None and self.output_index is not None
|
|
246
|
+
for ch in args_delta:
|
|
247
|
+
events.append(
|
|
248
|
+
ResponseFunctionCallArgumentsDeltaEvent(
|
|
249
|
+
sequence_number=ctx.next_sequence(),
|
|
250
|
+
item_id=self.item_id,
|
|
251
|
+
output_index=self.output_index,
|
|
252
|
+
delta=ch,
|
|
253
|
+
)
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
# finalize if arguments are detected to be complete
|
|
257
|
+
is_done = bool(
|
|
258
|
+
getattr(content, "is_final", False)
|
|
259
|
+
or getattr(content, "final", False)
|
|
260
|
+
or getattr(content, "done", False)
|
|
261
|
+
or getattr(content, "arguments_final", False)
|
|
262
|
+
or getattr(content, "arguments_done", False)
|
|
263
|
+
or getattr(content, "finish", False)
|
|
264
|
+
)
|
|
265
|
+
if not is_done and self.args_buffer:
|
|
266
|
+
try:
|
|
267
|
+
json.loads(self.args_buffer)
|
|
268
|
+
is_done = True
|
|
269
|
+
except Exception: # pylint: disable=broad-exception-caught
|
|
270
|
+
pass
|
|
271
|
+
|
|
272
|
+
if is_done:
|
|
273
|
+
events.append(
|
|
274
|
+
ResponseFunctionCallArgumentsDoneEvent(
|
|
275
|
+
sequence_number=ctx.next_sequence(),
|
|
276
|
+
item_id=self.item_id,
|
|
277
|
+
output_index=self.output_index,
|
|
278
|
+
arguments=self.args_buffer,
|
|
279
|
+
)
|
|
280
|
+
)
|
|
281
|
+
events.extend(self.afterwork(ctx))
|
|
282
|
+
return events
|
|
283
|
+
|
|
284
|
+
def afterwork(self, ctx: Any) -> List[ResponseStreamEvent]:
|
|
285
|
+
events: List[ResponseStreamEvent] = []
|
|
286
|
+
if not self.item_id:
|
|
287
|
+
return events
|
|
288
|
+
assert self.call_id is not None
|
|
289
|
+
done_item = FunctionToolCallItemResource(
|
|
290
|
+
id=self.item_id,
|
|
291
|
+
status="completed",
|
|
292
|
+
call_id=self.call_id,
|
|
293
|
+
name=self.name or "",
|
|
294
|
+
arguments=self.args_buffer,
|
|
295
|
+
)
|
|
296
|
+
assert self.output_index is not None
|
|
297
|
+
events.append(
|
|
298
|
+
ResponseOutputItemDoneEvent(
|
|
299
|
+
sequence_number=ctx.next_sequence(),
|
|
300
|
+
output_index=self.output_index,
|
|
301
|
+
item=done_item,
|
|
302
|
+
)
|
|
303
|
+
)
|
|
304
|
+
# store for final response
|
|
305
|
+
ctx._completed_output_items.append(
|
|
306
|
+
{
|
|
307
|
+
"id": self.item_id,
|
|
308
|
+
"type": "function_call",
|
|
309
|
+
"call_id": self.call_id,
|
|
310
|
+
"name": self.name or "",
|
|
311
|
+
"arguments": self.args_buffer,
|
|
312
|
+
"status": "requires_approval" if self.requires_approval else "completed",
|
|
313
|
+
"requires_approval": self.requires_approval,
|
|
314
|
+
"approval_request_id": self.approval_request_id,
|
|
315
|
+
}
|
|
316
|
+
)
|
|
317
|
+
# reset
|
|
318
|
+
self.item_id = None
|
|
319
|
+
self.output_index = None
|
|
320
|
+
self.args_buffer = ""
|
|
321
|
+
self.call_id = None
|
|
322
|
+
self.name = None
|
|
323
|
+
self.requires_approval = False
|
|
324
|
+
self.approval_request_id = None
|
|
325
|
+
return events
|
|
326
|
+
|
|
327
|
+
|
|
328
|
+
class _FunctionCallOutputStreamingState(_BaseStreamingState):
|
|
329
|
+
"""Handles function_call_output items streaming (non-chunked simple output)."""
|
|
330
|
+
|
|
331
|
+
def __init__(
|
|
332
|
+
self,
|
|
333
|
+
context: AgentRunContext,
|
|
334
|
+
call_id: Optional[str] = None,
|
|
335
|
+
output: Optional[list[str]] = None,
|
|
336
|
+
) -> None:
|
|
337
|
+
# Avoid mutable default argument (Ruff B006)
|
|
338
|
+
self.context = context
|
|
339
|
+
self.item_id = None
|
|
340
|
+
self.output_index = None
|
|
341
|
+
self.call_id = call_id
|
|
342
|
+
self.output = output if output is not None else []
|
|
343
|
+
|
|
344
|
+
def prework(self, ctx: Any) -> List[ResponseStreamEvent]:
|
|
345
|
+
events: List[ResponseStreamEvent] = []
|
|
346
|
+
if self.item_id is not None:
|
|
347
|
+
return events
|
|
348
|
+
self.item_id = self.context.id_generator.generate_function_output_id()
|
|
349
|
+
self.output_index = ctx._next_output_index
|
|
350
|
+
ctx._next_output_index += 1
|
|
351
|
+
|
|
352
|
+
self.call_id = self.call_id or str(uuid.uuid4())
|
|
353
|
+
item = FunctionToolCallOutputItemResource(
|
|
354
|
+
id=self.item_id,
|
|
355
|
+
status="in_progress",
|
|
356
|
+
call_id=self.call_id,
|
|
357
|
+
output="",
|
|
358
|
+
)
|
|
359
|
+
events.append(
|
|
360
|
+
ResponseOutputItemAddedEvent(
|
|
361
|
+
sequence_number=ctx.next_sequence(),
|
|
362
|
+
output_index=self.output_index,
|
|
363
|
+
item=item,
|
|
364
|
+
)
|
|
365
|
+
)
|
|
366
|
+
return events
|
|
367
|
+
|
|
368
|
+
def convert_content(self, ctx: Any, content: Any) -> List[ResponseStreamEvent]: # no delta events for now
|
|
369
|
+
events: List[ResponseStreamEvent] = []
|
|
370
|
+
# treat entire output as final
|
|
371
|
+
result = []
|
|
372
|
+
raw = getattr(content, "result", None)
|
|
373
|
+
if isinstance(raw, str):
|
|
374
|
+
result = [raw or self.output]
|
|
375
|
+
elif isinstance(raw, list):
|
|
376
|
+
for item in raw:
|
|
377
|
+
result.append(self._coerce_result_text(item))
|
|
378
|
+
self.output = json.dumps(result) if len(result) > 0 else ""
|
|
379
|
+
|
|
380
|
+
events.extend(self.afterwork(ctx))
|
|
381
|
+
return events
|
|
382
|
+
|
|
383
|
+
def _coerce_result_text(self, value: Any) -> str | dict:
|
|
384
|
+
"""
|
|
385
|
+
Return a string if value is already str or a TextContent-like object; else str(value).
|
|
386
|
+
|
|
387
|
+
:param value: The value to coerce.
|
|
388
|
+
:type value: Any
|
|
389
|
+
|
|
390
|
+
:return: The coerced string or dict.
|
|
391
|
+
:rtype: str | dict
|
|
392
|
+
"""
|
|
393
|
+
if value is None:
|
|
394
|
+
return ""
|
|
395
|
+
if isinstance(value, str):
|
|
396
|
+
return value
|
|
397
|
+
# Direct TextContent instance
|
|
398
|
+
if isinstance(value, TextContent):
|
|
399
|
+
content_payload = {"type": "text", "text": getattr(value, "text", "")}
|
|
400
|
+
return content_payload
|
|
401
|
+
|
|
402
|
+
return ""
|
|
403
|
+
|
|
404
|
+
def afterwork(self, ctx: Any) -> List[ResponseStreamEvent]:
|
|
405
|
+
events: List[ResponseStreamEvent] = []
|
|
406
|
+
if not self.item_id:
|
|
407
|
+
return events
|
|
408
|
+
# Ensure types conform: call_id must be str (guarantee non-None) and output is a single string
|
|
409
|
+
str_call_id = self.call_id or ""
|
|
410
|
+
single_output: str = cast(str, self.output[0]) if self.output else ""
|
|
411
|
+
done_item = FunctionToolCallOutputItemResource(
|
|
412
|
+
id=self.item_id,
|
|
413
|
+
status="completed",
|
|
414
|
+
call_id=str_call_id,
|
|
415
|
+
output=single_output,
|
|
416
|
+
)
|
|
417
|
+
assert self.output_index is not None
|
|
418
|
+
events.append(
|
|
419
|
+
ResponseOutputItemDoneEvent(
|
|
420
|
+
sequence_number=ctx.next_sequence(),
|
|
421
|
+
output_index=self.output_index,
|
|
422
|
+
item=done_item,
|
|
423
|
+
)
|
|
424
|
+
)
|
|
425
|
+
ctx._completed_output_items.append(
|
|
426
|
+
{
|
|
427
|
+
"id": self.item_id,
|
|
428
|
+
"type": "function_call_output",
|
|
429
|
+
"status": "completed",
|
|
430
|
+
"call_id": self.call_id,
|
|
431
|
+
"output": self.output,
|
|
432
|
+
}
|
|
433
|
+
)
|
|
434
|
+
self.item_id = None
|
|
435
|
+
self.output_index = None
|
|
436
|
+
return events
|
|
437
|
+
|
|
438
|
+
|
|
439
|
+
class AgentFrameworkOutputStreamingConverter:
|
|
440
|
+
"""Streaming converter using content-type-specific state handlers."""
|
|
441
|
+
|
|
442
|
+
def __init__(self, context: AgentRunContext) -> None:
|
|
443
|
+
self._context = context
|
|
444
|
+
# sequence numbers must start at 0 for first emitted event
|
|
445
|
+
self._sequence = 0
|
|
446
|
+
self._response_id = None
|
|
447
|
+
self._response_created_at = None
|
|
448
|
+
self._next_output_index = 0
|
|
449
|
+
self._last_completed_text = ""
|
|
450
|
+
self._active_state: Optional[_BaseStreamingState] = None
|
|
451
|
+
self._active_kind = None # "text" | "function_call" | "error"
|
|
452
|
+
# accumulate completed output items for final response
|
|
453
|
+
self._completed_output_items: List[dict] = []
|
|
454
|
+
|
|
455
|
+
def _ensure_response_started(self) -> None:
|
|
456
|
+
if not self._response_id:
|
|
457
|
+
self._response_id = self._context.response_id
|
|
458
|
+
if not self._response_created_at:
|
|
459
|
+
self._response_created_at = int(datetime.datetime.now(datetime.timezone.utc).timestamp())
|
|
460
|
+
|
|
461
|
+
def next_sequence(self) -> int:
|
|
462
|
+
self._sequence += 1
|
|
463
|
+
return self._sequence
|
|
464
|
+
|
|
465
|
+
def _switch_state(self, kind: str) -> List[ResponseStreamEvent]:
|
|
466
|
+
events: List[ResponseStreamEvent] = []
|
|
467
|
+
if self._active_state and self._active_kind != kind:
|
|
468
|
+
events.extend(self._active_state.afterwork(self))
|
|
469
|
+
self._active_state = None
|
|
470
|
+
self._active_kind = None
|
|
471
|
+
|
|
472
|
+
if self._active_state is None:
|
|
473
|
+
if kind == "text":
|
|
474
|
+
self._active_state = _TextContentStreamingState(self._context)
|
|
475
|
+
elif kind == "function_call":
|
|
476
|
+
self._active_state = _FunctionCallStreamingState(self._context)
|
|
477
|
+
elif kind == "function_call_output":
|
|
478
|
+
self._active_state = _FunctionCallOutputStreamingState(self._context)
|
|
479
|
+
else:
|
|
480
|
+
self._active_state = None
|
|
481
|
+
self._active_kind = kind
|
|
482
|
+
if self._active_state:
|
|
483
|
+
events.extend(self._active_state.prework(self))
|
|
484
|
+
return events
|
|
485
|
+
|
|
486
|
+
def transform_output_for_streaming(self, update: AgentRunResponseUpdate) -> List[ResponseStreamEvent]:
|
|
487
|
+
logger.debug(
|
|
488
|
+
"Transforming streaming update with %d contents",
|
|
489
|
+
len(update.contents) if getattr(update, "contents", None) else 0,
|
|
490
|
+
)
|
|
491
|
+
self._ensure_response_started()
|
|
492
|
+
events: List[ResponseStreamEvent] = []
|
|
493
|
+
|
|
494
|
+
if getattr(update, "contents", None):
|
|
495
|
+
for i, content in enumerate(update.contents):
|
|
496
|
+
logger.debug("Processing content %d: %s", i, type(content))
|
|
497
|
+
if isinstance(content, TextContent):
|
|
498
|
+
events.extend(self._switch_state("text"))
|
|
499
|
+
if isinstance(self._active_state, _TextContentStreamingState):
|
|
500
|
+
events.extend(self._active_state.convert_content(self, content))
|
|
501
|
+
elif isinstance(content, FunctionCallContent):
|
|
502
|
+
events.extend(self._switch_state("function_call"))
|
|
503
|
+
if isinstance(self._active_state, _FunctionCallStreamingState):
|
|
504
|
+
events.extend(self._active_state.convert_content(self, content))
|
|
505
|
+
elif isinstance(content, FunctionResultContent):
|
|
506
|
+
events.extend(self._switch_state("function_call_output"))
|
|
507
|
+
if isinstance(self._active_state, _FunctionCallOutputStreamingState):
|
|
508
|
+
call_id = getattr(content, "call_id", None)
|
|
509
|
+
if call_id:
|
|
510
|
+
self._active_state.call_id = call_id
|
|
511
|
+
events.extend(self._active_state.convert_content(self, content))
|
|
512
|
+
elif isinstance(content, FunctionApprovalRequestContent):
|
|
513
|
+
events.extend(self._switch_state("function_call"))
|
|
514
|
+
if isinstance(self._active_state, _FunctionCallStreamingState):
|
|
515
|
+
self._active_state.requires_approval = True
|
|
516
|
+
self._active_state.approval_request_id = getattr(content, "id", None)
|
|
517
|
+
events.extend(self._active_state.convert_content(self, content.function_call))
|
|
518
|
+
elif isinstance(content, ErrorContent):
|
|
519
|
+
# errors are stateless; flush current state and emit error
|
|
520
|
+
events.extend(self._switch_state("error"))
|
|
521
|
+
events.append(
|
|
522
|
+
ResponseErrorEvent(
|
|
523
|
+
sequence_number=self.next_sequence(),
|
|
524
|
+
code=getattr(content, "error_code", None) or "server_error",
|
|
525
|
+
message=getattr(content, "message", None) or "An error occurred",
|
|
526
|
+
param="",
|
|
527
|
+
)
|
|
528
|
+
)
|
|
529
|
+
return events
|
|
530
|
+
|
|
531
|
+
def finalize_last_content(self) -> List[ResponseStreamEvent]:
|
|
532
|
+
events: List[ResponseStreamEvent] = []
|
|
533
|
+
if self._active_state:
|
|
534
|
+
events.extend(self._active_state.afterwork(self))
|
|
535
|
+
self._active_state = None
|
|
536
|
+
self._active_kind = None
|
|
537
|
+
return events
|
|
538
|
+
|
|
539
|
+
def build_response(self, status: str) -> OpenAIResponse:
|
|
540
|
+
self._ensure_response_started()
|
|
541
|
+
agent_id = AgentIdGenerator.generate(self._context)
|
|
542
|
+
response_data = {
|
|
543
|
+
"object": "response",
|
|
544
|
+
"agent_id": agent_id,
|
|
545
|
+
"id": self._response_id,
|
|
546
|
+
"status": status,
|
|
547
|
+
"created_at": self._response_created_at,
|
|
548
|
+
"conversation": self._context.get_conversation_object(),
|
|
549
|
+
}
|
|
550
|
+
if status == "completed" and self._completed_output_items:
|
|
551
|
+
response_data["output"] = self._completed_output_items
|
|
552
|
+
return OpenAIResponse(response_data)
|
|
553
|
+
|
|
554
|
+
# High-level helpers to emit lifecycle events for streaming
|
|
555
|
+
def initial_events(self) -> List[ResponseStreamEvent]:
|
|
556
|
+
"""
|
|
557
|
+
Emit ResponseCreatedEvent and an initial ResponseInProgressEvent.
|
|
558
|
+
|
|
559
|
+
:return: List of initial response stream events.
|
|
560
|
+
:rtype: List[ResponseStreamEvent]
|
|
561
|
+
"""
|
|
562
|
+
self._ensure_response_started()
|
|
563
|
+
events: List[ResponseStreamEvent] = []
|
|
564
|
+
created_response = self.build_response(status="in_progress")
|
|
565
|
+
events.append(
|
|
566
|
+
ResponseCreatedEvent(
|
|
567
|
+
sequence_number=self.next_sequence(),
|
|
568
|
+
response=created_response,
|
|
569
|
+
)
|
|
570
|
+
)
|
|
571
|
+
events.append(
|
|
572
|
+
ResponseInProgressEvent(
|
|
573
|
+
sequence_number=self.next_sequence(),
|
|
574
|
+
response=self.build_response(status="in_progress"),
|
|
575
|
+
)
|
|
576
|
+
)
|
|
577
|
+
return events
|
|
578
|
+
|
|
579
|
+
def completion_events(self) -> List[ResponseStreamEvent]:
|
|
580
|
+
"""
|
|
581
|
+
Finalize any active content and emit a single ResponseCompletedEvent.
|
|
582
|
+
|
|
583
|
+
:return: List of completion response stream events.
|
|
584
|
+
:rtype: List[ResponseStreamEvent]
|
|
585
|
+
"""
|
|
586
|
+
self._ensure_response_started()
|
|
587
|
+
events: List[ResponseStreamEvent] = []
|
|
588
|
+
events.extend(self.finalize_last_content())
|
|
589
|
+
completed_response = self.build_response(status="completed")
|
|
590
|
+
events.append(
|
|
591
|
+
ResponseCompletedEvent(
|
|
592
|
+
sequence_number=self.next_sequence(),
|
|
593
|
+
response=completed_response,
|
|
594
|
+
)
|
|
595
|
+
)
|
|
596
|
+
return events
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
# ---------------------------------------------------------
|
|
2
|
+
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
|
+
# ---------------------------------------------------------
|
|
4
|
+
"""Helper utilities for constructing AgentId model instances.
|
|
5
|
+
|
|
6
|
+
Centralizes logic for safely building a `models.AgentId` from a request agent
|
|
7
|
+
object. We intentionally do not allow overriding the generated model's fixed
|
|
8
|
+
`type` literal ("agent_id"). If the provided object lacks a name, `None` is
|
|
9
|
+
returned so callers can decide how to handle absence.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from __future__ import annotations
|
|
13
|
+
|
|
14
|
+
from typing import Optional
|
|
15
|
+
|
|
16
|
+
from azure.ai.agentserver.core import AgentRunContext
|
|
17
|
+
from azure.ai.agentserver.core.models import projects
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class AgentIdGenerator:
|
|
21
|
+
@staticmethod
|
|
22
|
+
def generate(context: AgentRunContext) -> Optional[projects.AgentId]:
|
|
23
|
+
"""
|
|
24
|
+
Builds an AgentId model from the request agent object in the provided context.
|
|
25
|
+
|
|
26
|
+
:param context: The AgentRunContext containing the request.
|
|
27
|
+
:type context: AgentRunContext
|
|
28
|
+
|
|
29
|
+
:return: The constructed AgentId model, or None if the request lacks an agent name.
|
|
30
|
+
:rtype: Optional[projects.AgentId]
|
|
31
|
+
"""
|
|
32
|
+
agent = context.request.get("agent")
|
|
33
|
+
if not agent:
|
|
34
|
+
return None
|
|
35
|
+
|
|
36
|
+
agent_id = projects.AgentId(
|
|
37
|
+
{
|
|
38
|
+
"type": agent.type,
|
|
39
|
+
"name": agent.name,
|
|
40
|
+
"version": agent.version,
|
|
41
|
+
}
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
return agent_id
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
# ---------------------------------------------------------
|
|
2
|
+
# Copyright (c) Microsoft Corporation. All rights reserved.
|
|
3
|
+
# ---------------------------------------------------------
|
|
4
|
+
class Constants:
|
|
5
|
+
# streaming configuration
|
|
6
|
+
# Environment variable name to control idle timeout for streaming updates (seconds)
|
|
7
|
+
AGENTS_ADAPTER_STREAM_TIMEOUT_S = "AGENTS_ADAPTER_STREAM_TIMEOUT_S"
|
|
8
|
+
# Default idle timeout (seconds) when env var or request override not provided
|
|
9
|
+
DEFAULT_STREAM_TIMEOUT_S = 300.0
|
|
10
|
+
|
|
11
|
+
# model defaults
|
|
12
|
+
DEFAULT_TEMPERATURE = 1.0
|
|
13
|
+
DEFAULT_TOP_P = 1.0
|
|
File without changes
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: azure-ai-agentserver-agentframework
|
|
3
|
+
Version: 1.0.0b2
|
|
4
|
+
Summary: Agents server adapter for Azure AI
|
|
5
|
+
Author-email: Microsoft Corporation <azpysdkhelp@microsoft.com>
|
|
6
|
+
License-Expression: MIT
|
|
7
|
+
Project-URL: repository, https://github.com/Azure/azure-sdk-for-python
|
|
8
|
+
Keywords: azure,azure sdk
|
|
9
|
+
Classifier: Programming Language :: Python
|
|
10
|
+
Classifier: Programming Language :: Python :: 3 :: Only
|
|
11
|
+
Classifier: Programming Language :: Python :: 3
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
16
|
+
Requires-Python: >=3.10
|
|
17
|
+
Description-Content-Type: text/markdown
|
|
18
|
+
License-File: LICENSE
|
|
19
|
+
Requires-Dist: azure-ai-agentserver-core
|
|
20
|
+
Requires-Dist: agent-framework-azure-ai==1.0.0b251007
|
|
21
|
+
Requires-Dist: agent-framework-core==1.0.0b251007
|
|
22
|
+
Requires-Dist: opentelemetry-exporter-otlp-proto-grpc>=1.36.0
|
|
23
|
+
Dynamic: license-file
|
|
24
|
+
|
|
25
|
+
# Azure AI Agent Server Adapter for Agent-framework Python
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
## Getting started
|
|
30
|
+
|
|
31
|
+
```bash
|
|
32
|
+
pip install azure-ai-agentserver-agentframework
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
## Key concepts
|
|
37
|
+
|
|
38
|
+
Azure AI Agent Server wraps your Agent-framework agent, and host it on the cloud.
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
## Examples
|
|
42
|
+
|
|
43
|
+
```python
|
|
44
|
+
# your existing agent
|
|
45
|
+
from my_framework_agent import my_awesome_agent
|
|
46
|
+
|
|
47
|
+
# agent framework utils
|
|
48
|
+
from azure.ai.agentserver.agentframework import from_agent_framework
|
|
49
|
+
|
|
50
|
+
if __name__ == "__main__":
|
|
51
|
+
# with this simple line, your agent will be hosted on http://localhost:8088
|
|
52
|
+
from_agent_framework(my_awesome_agent).run()
|
|
53
|
+
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
## Troubleshooting
|
|
57
|
+
|
|
58
|
+
First run your agent with azure-ai-agentserver-agentframework locally.
|
|
59
|
+
|
|
60
|
+
If it works on local but failed on cloud. Check your logs in the application insight connected to your Azure AI Foundry Project.
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
## Next steps
|
|
64
|
+
|
|
65
|
+
Please visit [Samples](https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/agentserver/azure-ai-agentserver-agentframework/samples) folder. There are several samples for you to build your agent with azure-ai-agentserver
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
## Contributing
|
|
69
|
+
|
|
70
|
+
This project welcomes contributions and suggestions. Most contributions require
|
|
71
|
+
you to agree to a Contributor License Agreement (CLA) declaring that you have
|
|
72
|
+
the right to, and actually do, grant us the rights to use your contribution.
|
|
73
|
+
For details, visit https://cla.microsoft.com.
|
|
74
|
+
|
|
75
|
+
When you submit a pull request, a CLA-bot will automatically determine whether
|
|
76
|
+
you need to provide a CLA and decorate the PR appropriately (e.g., label,
|
|
77
|
+
comment). Simply follow the instructions provided by the bot. You will only
|
|
78
|
+
need to do this once across all repos using our CLA.
|
|
79
|
+
|
|
80
|
+
This project has adopted the
|
|
81
|
+
[Microsoft Open Source Code of Conduct][code_of_conduct]. For more information,
|
|
82
|
+
see the Code of Conduct FAQ or contact opencode@microsoft.com with any
|
|
83
|
+
additional questions or comments.
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
azure/ai/agentserver/__init__.py,sha256=bpT73UG7mZL_JjEqMwbYx6q69jA8J5Jcoul1LcDokhA,81
|
|
2
|
+
azure/ai/agentserver/agentframework/__init__.py,sha256=nIvOiay25c9ehMWWPWt3d4XyROqbfoxj_inEmoxnbCI,466
|
|
3
|
+
azure/ai/agentserver/agentframework/_version.py,sha256=g637Xd9Uf23mTrNOKxXc_oX7Wi-Zaz6tfo5NYxUwOr0,486
|
|
4
|
+
azure/ai/agentserver/agentframework/agent_framework.py,sha256=6xsuw3yADc3bDIwdo4_GfHSWrydSlJf9Z98AxeslxAU,6858
|
|
5
|
+
azure/ai/agentserver/agentframework/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
6
|
+
azure/ai/agentserver/agentframework/models/__init__.py,sha256=J_Cmr9IKDtiY7KQ4vjOy9-BwZ8AxVwUHKWesdhDCY_U,246
|
|
7
|
+
azure/ai/agentserver/agentframework/models/agent_framework_input_converters.py,sha256=o88ONqSmOaPtJqph4_WWmRswn-hr0kgX0JRo_97O8Og,5368
|
|
8
|
+
azure/ai/agentserver/agentframework/models/agent_framework_output_non_streaming_converter.py,sha256=XvqsFrNMfdCkbNOQhmMIheR_tK01aY05IafzrAzr_Fw,9145
|
|
9
|
+
azure/ai/agentserver/agentframework/models/agent_framework_output_streaming_converter.py,sha256=T7WGfE8-yXdvgQwwsmqA2KaJw3v6pbTUWiUOSrlCZmM,22666
|
|
10
|
+
azure/ai/agentserver/agentframework/models/agent_id_generator.py,sha256=tIT_729F4ykQwblo91Bhv6iILY35_N1P-zUPYfVlt1w,1485
|
|
11
|
+
azure/ai/agentserver/agentframework/models/constants.py,sha256=sEJkC4UMRzpMNXm7TJCFw7GzIAOxuaSBoxKfCoqbSGI,583
|
|
12
|
+
azure_ai_agentserver_agentframework-1.0.0b2.dist-info/licenses/LICENSE,sha256=_VMkgdgo4ToLE8y1mOAjOKNhd0BnWoYu5r3BVBto6T0,1073
|
|
13
|
+
azure_ai_agentserver_agentframework-1.0.0b2.dist-info/METADATA,sha256=G_cHrbiO9wA5BBmQH2_J1u9VwR70NipGwt-RPqEzLNw,2836
|
|
14
|
+
azure_ai_agentserver_agentframework-1.0.0b2.dist-info/WHEEL,sha256=1tXe9gY0PYatrMPMDd6jXqjfpz_B-Wqm32CPfRC58XU,91
|
|
15
|
+
azure_ai_agentserver_agentframework-1.0.0b2.dist-info/top_level.txt,sha256=S7DhWV9m80TBzAhOFjxDUiNbKszzoThbnrSz5MpbHSQ,6
|
|
16
|
+
azure_ai_agentserver_agentframework-1.0.0b2.dist-info/RECORD,,
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
Copyright (c) Microsoft Corporation.
|
|
2
|
+
|
|
3
|
+
MIT License
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
azure
|