azure-ai-agentserver-core 1.0.0b2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. azure/ai/agentserver/__init__.py +1 -0
  2. azure/ai/agentserver/core/__init__.py +14 -0
  3. azure/ai/agentserver/core/_version.py +9 -0
  4. azure/ai/agentserver/core/constants.py +13 -0
  5. azure/ai/agentserver/core/logger.py +161 -0
  6. azure/ai/agentserver/core/models/__init__.py +7 -0
  7. azure/ai/agentserver/core/models/_create_response.py +12 -0
  8. azure/ai/agentserver/core/models/openai/__init__.py +16 -0
  9. azure/ai/agentserver/core/models/projects/__init__.py +820 -0
  10. azure/ai/agentserver/core/models/projects/_enums.py +767 -0
  11. azure/ai/agentserver/core/models/projects/_models.py +15049 -0
  12. azure/ai/agentserver/core/models/projects/_patch.py +39 -0
  13. azure/ai/agentserver/core/models/projects/_patch_evaluations.py +48 -0
  14. azure/ai/agentserver/core/models/projects/_utils/__init__.py +6 -0
  15. azure/ai/agentserver/core/models/projects/_utils/model_base.py +1237 -0
  16. azure/ai/agentserver/core/models/projects/_utils/serialization.py +2030 -0
  17. azure/ai/agentserver/core/py.typed +0 -0
  18. azure/ai/agentserver/core/server/__init__.py +1 -0
  19. azure/ai/agentserver/core/server/base.py +324 -0
  20. azure/ai/agentserver/core/server/common/__init__.py +1 -0
  21. azure/ai/agentserver/core/server/common/agent_run_context.py +76 -0
  22. azure/ai/agentserver/core/server/common/id_generator/__init__.py +5 -0
  23. azure/ai/agentserver/core/server/common/id_generator/foundry_id_generator.py +136 -0
  24. azure/ai/agentserver/core/server/common/id_generator/id_generator.py +19 -0
  25. azure_ai_agentserver_core-1.0.0b2.dist-info/METADATA +149 -0
  26. azure_ai_agentserver_core-1.0.0b2.dist-info/RECORD +29 -0
  27. azure_ai_agentserver_core-1.0.0b2.dist-info/WHEEL +5 -0
  28. azure_ai_agentserver_core-1.0.0b2.dist-info/licenses/LICENSE +21 -0
  29. azure_ai_agentserver_core-1.0.0b2.dist-info/top_level.txt +1 -0
File without changes
@@ -0,0 +1 @@
1
+ __path__ = __import__("pkgutil").extend_path(__path__, __name__)
@@ -0,0 +1,324 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+ # pylint: disable=broad-exception-caught,unused-argument,logging-fstring-interpolation,too-many-statements,too-many-return-statements
5
+ import inspect
6
+ import json
7
+ import os
8
+ import traceback
9
+ from abc import abstractmethod
10
+ from typing import Any, AsyncGenerator, Generator, Union
11
+
12
+ import uvicorn
13
+ from opentelemetry import context as otel_context, trace
14
+ from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
15
+ from starlette.applications import Starlette
16
+ from starlette.middleware.base import BaseHTTPMiddleware
17
+ from starlette.middleware.cors import CORSMiddleware
18
+ from starlette.requests import Request
19
+ from starlette.responses import JSONResponse, Response, StreamingResponse
20
+ from starlette.routing import Route
21
+ from starlette.types import ASGIApp
22
+
23
+ from ..constants import Constants
24
+ from ..logger import APPINSIGHT_CONNSTR_ENV_NAME, get_logger, request_context
25
+ from ..models import (
26
+ Response as OpenAIResponse,
27
+ ResponseStreamEvent,
28
+ )
29
+ from .common.agent_run_context import AgentRunContext
30
+
31
+ logger = get_logger()
32
+ DEBUG_ERRORS = os.environ.get(Constants.AGENT_DEBUG_ERRORS, "false").lower() == "true"
33
+
34
+
35
+ class AgentRunContextMiddleware(BaseHTTPMiddleware):
36
+ def __init__(self, app: ASGIApp):
37
+ super().__init__(app)
38
+
39
+ async def dispatch(self, request: Request, call_next):
40
+ if request.url.path in ("/runs", "/responses"):
41
+ try:
42
+ self.set_request_id_to_context_var(request)
43
+ payload = await request.json()
44
+ except Exception as e:
45
+ logger.error(f"Invalid JSON payload: {e}")
46
+ return JSONResponse({"error": f"Invalid JSON payload: {e}"}, status_code=400)
47
+ try:
48
+ request.state.agent_run_context = AgentRunContext(payload)
49
+ self.set_run_context_to_context_var(request.state.agent_run_context)
50
+ except Exception as e:
51
+ logger.error(f"Context build failed: {e}.", exc_info=True)
52
+ return JSONResponse({"error": f"Context build failed: {e}"}, status_code=500)
53
+ return await call_next(request)
54
+
55
+ def set_request_id_to_context_var(self, request):
56
+ request_id = request.headers.get("X-Request-Id", None)
57
+ if request_id:
58
+ ctx = request_context.get() or {}
59
+ ctx["azure.ai.agentserver.x-request-id"] = request_id
60
+ request_context.set(ctx)
61
+
62
+ def set_run_context_to_context_var(self, run_context):
63
+ agent_id, agent_name = "", ""
64
+ agent_obj = run_context.get_agent_id_object()
65
+ if agent_obj:
66
+ agent_name = getattr(agent_obj, "name", "")
67
+ agent_version = getattr(agent_obj, "version", "")
68
+ agent_id = f"{agent_name}:{agent_version}"
69
+
70
+ res = {
71
+ "azure.ai.agentserver.response_id": run_context.response_id or "",
72
+ "azure.ai.agentserver.conversation_id": run_context.conversation_id or "",
73
+ "azure.ai.agentserver.streaming": str(run_context.stream or False),
74
+ "gen_ai.agent.id": agent_id,
75
+ "gen_ai.agent.name": agent_name,
76
+ "gen_ai.provider.name": "AzureAI Hosted Agents",
77
+ "gen_ai.response.id": run_context.response_id or "",
78
+ }
79
+ ctx = request_context.get() or {}
80
+ ctx.update(res)
81
+ request_context.set(ctx)
82
+
83
+
84
+ class FoundryCBAgent:
85
+ def __init__(self):
86
+ async def runs_endpoint(request):
87
+ # Set up tracing context and span
88
+ context = request.state.agent_run_context
89
+ ctx = request_context.get()
90
+ with self.tracer.start_as_current_span(
91
+ name=f"HostedAgents-{context.response_id}",
92
+ attributes=ctx,
93
+ kind=trace.SpanKind.SERVER,
94
+ ):
95
+ try:
96
+ logger.info("Start processing CreateResponse request.")
97
+
98
+ context_carrier = {}
99
+ TraceContextTextMapPropagator().inject(context_carrier)
100
+
101
+ resp = await self.agent_run(context)
102
+
103
+ if inspect.isgenerator(resp):
104
+ # Prefetch first event to allow 500 status if generation fails immediately
105
+ try:
106
+ first_event = next(resp)
107
+ except Exception as e: # noqa: BLE001
108
+ err_msg = _format_error(e)
109
+ logger.error("Generator initialization failed: %s\n%s", e, traceback.format_exc())
110
+ return JSONResponse({"error": err_msg}, status_code=500)
111
+
112
+ def gen():
113
+ ctx = TraceContextTextMapPropagator().extract(carrier=context_carrier)
114
+ token = otel_context.attach(ctx)
115
+ error_sent = False
116
+ try:
117
+ # yield prefetched first event
118
+ yield _event_to_sse_chunk(first_event)
119
+ for event in resp:
120
+ yield _event_to_sse_chunk(event)
121
+ except Exception as e: # noqa: BLE001
122
+ err_msg = _format_error(e)
123
+ logger.error("Error in non-async generator: %s\n%s", e, traceback.format_exc())
124
+ payload = {"error": err_msg}
125
+ yield f"event: error\ndata: {json.dumps(payload)}\n\n"
126
+ yield "data: [DONE]\n\n"
127
+ error_sent = True
128
+ finally:
129
+ logger.info("End of processing CreateResponse request.")
130
+ otel_context.detach(token)
131
+ if not error_sent:
132
+ yield "data: [DONE]\n\n"
133
+
134
+ return StreamingResponse(gen(), media_type="text/event-stream")
135
+ if inspect.isasyncgen(resp):
136
+ # Prefetch first async event to allow early 500
137
+ try:
138
+ first_event = await resp.__anext__()
139
+ except StopAsyncIteration:
140
+ # No items produced; treat as empty successful stream
141
+ def empty_gen():
142
+ yield "data: [DONE]\n\n"
143
+
144
+ return StreamingResponse(empty_gen(), media_type="text/event-stream")
145
+ except Exception as e: # noqa: BLE001
146
+ err_msg = _format_error(e)
147
+ logger.error("Async generator initialization failed: %s\n%s", e, traceback.format_exc())
148
+ return JSONResponse({"error": err_msg}, status_code=500)
149
+
150
+ async def gen_async():
151
+ ctx = TraceContextTextMapPropagator().extract(carrier=context_carrier)
152
+ token = otel_context.attach(ctx)
153
+ error_sent = False
154
+ try:
155
+ # yield prefetched first event
156
+ yield _event_to_sse_chunk(first_event)
157
+ async for event in resp:
158
+ yield _event_to_sse_chunk(event)
159
+ except Exception as e: # noqa: BLE001
160
+ err_msg = _format_error(e)
161
+ logger.error("Error in async generator: %s\n%s", e, traceback.format_exc())
162
+ payload = {"error": err_msg}
163
+ yield f"event: error\ndata: {json.dumps(payload)}\n\n"
164
+ yield "data: [DONE]\n\n"
165
+ error_sent = True
166
+ finally:
167
+ logger.info("End of processing CreateResponse request.")
168
+ otel_context.detach(token)
169
+ if not error_sent:
170
+ yield "data: [DONE]\n\n"
171
+
172
+ return StreamingResponse(gen_async(), media_type="text/event-stream")
173
+ logger.info("End of processing CreateResponse request.")
174
+ return JSONResponse(resp.as_dict())
175
+ except Exception as e:
176
+ # TODO: extract status code from exception
177
+ logger.error(f"Error processing CreateResponse request: {traceback.format_exc()}")
178
+ return JSONResponse({"error": str(e)}, status_code=500)
179
+
180
+ async def liveness_endpoint(request):
181
+ result = await self.agent_liveness(request)
182
+ return _to_response(result)
183
+
184
+ async def readiness_endpoint(request):
185
+ result = await self.agent_readiness(request)
186
+ return _to_response(result)
187
+
188
+ routes = [
189
+ Route("/runs", runs_endpoint, methods=["POST"], name="agent_run"),
190
+ Route("/responses", runs_endpoint, methods=["POST"], name="agent_response"),
191
+ Route("/liveness", liveness_endpoint, methods=["GET"], name="agent_liveness"),
192
+ Route("/readiness", readiness_endpoint, methods=["GET"], name="agent_readiness"),
193
+ ]
194
+
195
+ self.app = Starlette(routes=routes)
196
+ self.app.add_middleware(
197
+ CORSMiddleware,
198
+ allow_origins=["*"],
199
+ allow_credentials=True,
200
+ allow_methods=["*"],
201
+ allow_headers=["*"],
202
+ )
203
+ self.app.add_middleware(AgentRunContextMiddleware)
204
+
205
+ @self.app.on_event("startup")
206
+ async def attach_appinsights_logger():
207
+ import logging
208
+
209
+ for handler in logger.handlers:
210
+ if handler.name == "appinsights_handler":
211
+ for logger_name in ["uvicorn", "uvicorn.error", "uvicorn.access"]:
212
+ uv_logger = logging.getLogger(logger_name)
213
+ uv_logger.addHandler(handler)
214
+ uv_logger.setLevel(logger.level)
215
+ uv_logger.propagate = False
216
+
217
+ self.tracer = None
218
+
219
+ @abstractmethod
220
+ async def agent_run(
221
+ self, context: AgentRunContext
222
+ ) -> Union[OpenAIResponse, Generator[ResponseStreamEvent, Any, Any], AsyncGenerator[ResponseStreamEvent, Any]]:
223
+ raise NotImplementedError
224
+
225
+ async def agent_liveness(self, request) -> Union[Response, dict]:
226
+ return Response(status_code=200)
227
+
228
+ async def agent_readiness(self, request) -> Union[Response, dict]:
229
+ return {"status": "ready"}
230
+
231
+ async def run_async(
232
+ self,
233
+ port: int = int(os.environ.get("DEFAULT_AD_PORT", 8088)),
234
+ ) -> None:
235
+ """
236
+ Awaitable server starter for use **inside** an existing event loop.
237
+
238
+ :param port: Port to listen on.
239
+ :type port: int
240
+ """
241
+ self.init_tracing()
242
+ config = uvicorn.Config(self.app, host="0.0.0.0", port=port, loop="asyncio")
243
+ server = uvicorn.Server(config)
244
+ logger.info(f"Starting FoundryCBAgent server async on port {port}")
245
+ await server.serve()
246
+
247
+ def run(self, port: int = int(os.environ.get("DEFAULT_AD_PORT", 8088))) -> None:
248
+ """
249
+ Start a Starlette server on localhost:<port> exposing:
250
+ POST /runs
251
+ POST /responses
252
+ GET /liveness
253
+ GET /readiness
254
+
255
+ :param port: Port to listen on.
256
+ :type port: int
257
+ """
258
+ self.init_tracing()
259
+ logger.info(f"Starting FoundryCBAgent server on port {port}")
260
+ uvicorn.run(self.app, host="0.0.0.0", port=port)
261
+
262
+ def init_tracing(self):
263
+ exporter = os.environ.get(Constants.OTEL_EXPORTER_ENDPOINT)
264
+ app_insights_conn_str = os.environ.get(APPINSIGHT_CONNSTR_ENV_NAME)
265
+ if exporter or app_insights_conn_str:
266
+ from opentelemetry.sdk.resources import Resource
267
+ from opentelemetry.sdk.trace import TracerProvider
268
+
269
+ resource = Resource.create(self.get_trace_attributes())
270
+ provider = TracerProvider(resource=resource)
271
+ if exporter:
272
+ self.setup_otlp_exporter(exporter, provider)
273
+ if app_insights_conn_str:
274
+ self.setup_application_insights_exporter(app_insights_conn_str, provider)
275
+ trace.set_tracer_provider(provider)
276
+ self.init_tracing_internal(exporter_endpoint=exporter, app_insights_conn_str=app_insights_conn_str)
277
+ self.tracer = trace.get_tracer(__name__)
278
+
279
+ def get_trace_attributes(self):
280
+ return {
281
+ "service.name": "azure.ai.agentserver",
282
+ }
283
+
284
+ def init_tracing_internal(self, exporter_endpoint=None, app_insights_conn_str=None):
285
+ pass
286
+
287
+ def setup_application_insights_exporter(self, connection_string, provider):
288
+ from opentelemetry.sdk.trace.export import BatchSpanProcessor
289
+
290
+ from azure.monitor.opentelemetry.exporter import AzureMonitorTraceExporter
291
+
292
+ exporter_instance = AzureMonitorTraceExporter.from_connection_string(connection_string)
293
+ processor = BatchSpanProcessor(exporter_instance)
294
+ provider.add_span_processor(processor)
295
+ logger.info("Tracing setup with Application Insights exporter.")
296
+
297
+ def setup_otlp_exporter(self, endpoint, provider):
298
+ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
299
+ from opentelemetry.sdk.trace.export import BatchSpanProcessor
300
+
301
+ exporter_instance = OTLPSpanExporter(endpoint=endpoint)
302
+ processor = BatchSpanProcessor(exporter_instance)
303
+ provider.add_span_processor(processor)
304
+ logger.info(f"Tracing setup with OTLP exporter: {endpoint}")
305
+
306
+
307
+ def _event_to_sse_chunk(event: ResponseStreamEvent) -> str:
308
+ event_data = json.dumps(event.as_dict())
309
+ if event.type:
310
+ return f"event: {event.type}\ndata: {event_data}\n\n"
311
+ return f"data: {event_data}\n\n"
312
+
313
+
314
+ def _format_error(exc: Exception) -> str:
315
+ message = str(exc)
316
+ if message:
317
+ return message
318
+ if DEBUG_ERRORS:
319
+ return repr(exc)
320
+ return "Internal error"
321
+
322
+
323
+ def _to_response(result: Union[Response, dict]) -> Response:
324
+ return result if isinstance(result, Response) else JSONResponse(result)
@@ -0,0 +1 @@
1
+ __path__ = __import__("pkgutil").extend_path(__path__, __name__)
@@ -0,0 +1,76 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+ from ...logger import get_logger
5
+ from ...models import CreateResponse
6
+ from ...models.projects import AgentId, AgentReference, ResponseConversation1
7
+ from .id_generator.foundry_id_generator import FoundryIdGenerator
8
+ from .id_generator.id_generator import IdGenerator
9
+
10
+ logger = get_logger()
11
+
12
+
13
+ class AgentRunContext:
14
+ def __init__(self, payload: dict):
15
+ self._raw_payload = payload
16
+ self._request = _deserialize_create_response(payload)
17
+ self._id_generator = FoundryIdGenerator.from_request(payload)
18
+ self._response_id = self._id_generator.response_id
19
+ self._conversation_id = self._id_generator.conversation_id
20
+ self._stream = self.request.get("stream", False)
21
+
22
+ @property
23
+ def raw_payload(self) -> dict:
24
+ return self._raw_payload
25
+
26
+ @property
27
+ def request(self) -> CreateResponse:
28
+ return self._request
29
+
30
+ @property
31
+ def id_generator(self) -> IdGenerator:
32
+ return self._id_generator
33
+
34
+ @property
35
+ def response_id(self) -> str:
36
+ return self._response_id
37
+
38
+ @property
39
+ def conversation_id(self) -> str:
40
+ return self._conversation_id
41
+
42
+ @property
43
+ def stream(self) -> bool:
44
+ return self._stream
45
+
46
+ def get_agent_id_object(self) -> AgentId:
47
+ agent = self.request.get("agent")
48
+ if not agent:
49
+ return None # type: ignore
50
+ return AgentId(
51
+ {
52
+ "type": agent.type,
53
+ "name": agent.name,
54
+ "version": agent.version,
55
+ }
56
+ )
57
+
58
+ def get_conversation_object(self) -> ResponseConversation1:
59
+ if not self._conversation_id:
60
+ return None # type: ignore
61
+ return ResponseConversation1(id=self._conversation_id)
62
+
63
+
64
+ def _deserialize_create_response(payload: dict) -> CreateResponse:
65
+ _deserialized = CreateResponse(**payload)
66
+
67
+ raw_agent_reference = payload.get("agent")
68
+ if raw_agent_reference:
69
+ _deserialized["agent"] = _deserialize_agent_reference(raw_agent_reference)
70
+ return _deserialized
71
+
72
+
73
+ def _deserialize_agent_reference(payload: dict) -> AgentReference:
74
+ if not payload:
75
+ return None # type: ignore
76
+ return AgentReference(**payload)
@@ -0,0 +1,5 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+
5
+ __path__ = __import__("pkgutil").extend_path(__path__, __name__)
@@ -0,0 +1,136 @@
1
+ # pylint: disable=docstring-missing-return,docstring-missing-param,docstring-missing-rtype
2
+ # ---------------------------------------------------------
3
+ # Copyright (c) Microsoft Corporation. All rights reserved.
4
+ # ---------------------------------------------------------
5
+ from __future__ import annotations
6
+
7
+ import base64
8
+ import os
9
+ import re
10
+ from typing import Optional
11
+
12
+ from .id_generator import IdGenerator
13
+
14
+ _WATERMARK_RE = re.compile(r"^[A-Za-z0-9]*$")
15
+
16
+
17
+ class FoundryIdGenerator(IdGenerator):
18
+ """
19
+ Python port of the C# FoundryIdGenerator.
20
+
21
+ Notable behaviors preserved:
22
+ - Secure, alphanumeric entropy via base64 filtering, retrying until exact length.
23
+ - Watermark must be strictly alphanumeric; inserted mid-entropy.
24
+ - Only one delimiter (default "_") after the prefix; no delimiter between entropy and partition key.
25
+ - Partition key is the last N characters of the second ID segment (post-delimiter).
26
+ """
27
+
28
+ def __init__(self, response_id: Optional[str], conversation_id: Optional[str]):
29
+ self.response_id = response_id or self._new_id("resp")
30
+ self.conversation_id = conversation_id or self._new_id("conv")
31
+ self._partition_id = self._extract_partition_id(self.conversation_id)
32
+
33
+ @classmethod
34
+ def from_request(cls, payload: dict) -> "FoundryIdGenerator":
35
+ response_id = payload.get("metadata", {}).get("response_id", None)
36
+ conv_id_raw = payload.get("conversation", None)
37
+ if isinstance(conv_id_raw, str):
38
+ conv_id = conv_id_raw
39
+ elif isinstance(conv_id_raw, dict):
40
+ conv_id = conv_id_raw.get("id", None)
41
+ else:
42
+ conv_id = None
43
+ return cls(response_id, conv_id)
44
+
45
+ def generate(self, category: Optional[str] = None) -> str:
46
+ prefix = "id" if not category else category
47
+ return self._new_id(prefix, partition_key=self._partition_id)
48
+
49
+ # --- Static helpers (mirror C# private static methods) --------------------
50
+
51
+ @staticmethod
52
+ def _new_id(
53
+ prefix: str,
54
+ string_length: int = 32,
55
+ partition_key_length: int = 18,
56
+ infix: Optional[str] = "",
57
+ watermark: str = "",
58
+ delimiter: str = "_",
59
+ partition_key: Optional[str] = None,
60
+ partition_key_hint: str = "",
61
+ ) -> str:
62
+ """
63
+ Generates a new ID.
64
+
65
+ Format matches the C# logic:
66
+ f"{prefix}{delimiter}{infix}{partitionKey}{entropy}"
67
+ (i.e., exactly one delimiter after prefix; no delimiter between entropy and partition key)
68
+ """
69
+ entropy = FoundryIdGenerator._secure_entropy(string_length)
70
+
71
+ if partition_key is not None:
72
+ pkey = partition_key
73
+ elif partition_key_hint:
74
+ pkey = FoundryIdGenerator._extract_partition_id(
75
+ partition_key_hint,
76
+ string_length=string_length,
77
+ partition_key_length=partition_key_length,
78
+ delimiter=delimiter,
79
+ )
80
+ else:
81
+ pkey = FoundryIdGenerator._secure_entropy(partition_key_length)
82
+
83
+ if watermark:
84
+ if not _WATERMARK_RE.fullmatch(watermark):
85
+ raise ValueError(f"Only alphanumeric characters may be in watermark: {watermark}")
86
+ half = string_length // 2
87
+ entropy = f"{entropy[:half]}{watermark}{entropy[half:]}"
88
+
89
+ infix = infix or ""
90
+ prefix_part = f"{prefix}{delimiter}" if prefix else ""
91
+ return f"{prefix_part}{infix}{pkey}{entropy}"
92
+
93
+ @staticmethod
94
+ def _secure_entropy(string_length: int) -> str:
95
+ """
96
+ Generates a secure random alphanumeric string of exactly `string_length`.
97
+ Re-tries whole generation until the filtered base64 string is exactly the desired length,
98
+ matching the C# behavior.
99
+ """
100
+ if string_length < 1:
101
+ raise ValueError("Must be greater than or equal to 1")
102
+
103
+ while True:
104
+ # Use cryptographically secure bytes; base64 then filter to alnum.
105
+ buf = os.urandom(string_length)
106
+ encoded = base64.b64encode(buf).decode("ascii")
107
+ alnum = "".join(ch for ch in encoded if ch.isalnum())
108
+ if len(alnum) >= string_length:
109
+ return alnum[:string_length]
110
+ # else: retry, same as the C# loop which discards and regenerates
111
+
112
+ @staticmethod
113
+ def _extract_partition_id(
114
+ id_str: str,
115
+ string_length: int = 32,
116
+ partition_key_length: int = 18,
117
+ delimiter: str = "_",
118
+ ) -> str:
119
+ """
120
+ Extracts partition key from an existing ID.
121
+
122
+ Expected shape (per C# logic): "<prefix>_<infix+partitionKey+entropy>"
123
+ We take the last `partition_key_length` characters from the *second* segment.
124
+ """
125
+ if not id_str:
126
+ raise ValueError("Id cannot be null or empty")
127
+
128
+ parts = [p for p in id_str.split(delimiter) if p] # remove empty entries like C# Split(..., RemoveEmptyEntries)
129
+ if len(parts) < 2:
130
+ raise ValueError(f"Id '{id_str}' does not contain a valid partition key.")
131
+
132
+ segment = parts[1]
133
+ if len(segment) < string_length + partition_key_length:
134
+ raise ValueError(f"Id '{id_str}' does not contain a valid id.")
135
+
136
+ return segment[-partition_key_length:]
@@ -0,0 +1,19 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+ from abc import ABC, abstractmethod
5
+ from typing import Optional
6
+
7
+
8
+ class IdGenerator(ABC):
9
+ @abstractmethod
10
+ def generate(self, category: Optional[str] = None) -> str: ...
11
+
12
+ def generate_function_call_id(self) -> str:
13
+ return self.generate("func")
14
+
15
+ def generate_function_output_id(self) -> str:
16
+ return self.generate("funcout")
17
+
18
+ def generate_message_id(self) -> str:
19
+ return self.generate("msg")