agent-framework-azure-ai-search 1.0.0b251218__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,16 @@
1
+ # Copyright (c) Microsoft. All rights reserved.
2
+
3
+ import importlib.metadata
4
+
5
+ from ._search_provider import AzureAISearchContextProvider, AzureAISearchSettings
6
+
7
+ try:
8
+ __version__ = importlib.metadata.version(__name__)
9
+ except importlib.metadata.PackageNotFoundError:
10
+ __version__ = "0.0.0" # Fallback for development mode
11
+
12
+ __all__ = [
13
+ "AzureAISearchContextProvider",
14
+ "AzureAISearchSettings",
15
+ "__version__",
16
+ ]
@@ -0,0 +1,990 @@
1
+ # Copyright (c) Microsoft. All rights reserved.
2
+
3
+
4
+ import sys
5
+ from collections.abc import Awaitable, Callable, MutableSequence
6
+ from typing import TYPE_CHECKING, Any, ClassVar, Literal
7
+
8
+ from agent_framework import AGENT_FRAMEWORK_USER_AGENT, ChatMessage, Context, ContextProvider, Role
9
+ from agent_framework._logging import get_logger
10
+ from agent_framework._pydantic import AFBaseSettings
11
+ from agent_framework.exceptions import ServiceInitializationError
12
+ from azure.core.credentials import AzureKeyCredential
13
+ from azure.core.credentials_async import AsyncTokenCredential
14
+ from azure.core.exceptions import ResourceNotFoundError
15
+ from azure.search.documents.aio import SearchClient
16
+ from azure.search.documents.indexes.aio import SearchIndexClient
17
+ from azure.search.documents.indexes.models import (
18
+ AzureOpenAIVectorizerParameters,
19
+ KnowledgeBase,
20
+ KnowledgeBaseAzureOpenAIModel,
21
+ KnowledgeRetrievalLowReasoningEffort,
22
+ KnowledgeRetrievalMediumReasoningEffort,
23
+ KnowledgeRetrievalMinimalReasoningEffort,
24
+ KnowledgeRetrievalOutputMode,
25
+ KnowledgeRetrievalReasoningEffort,
26
+ KnowledgeSourceReference,
27
+ SearchIndexKnowledgeSource,
28
+ SearchIndexKnowledgeSourceParameters,
29
+ )
30
+ from azure.search.documents.models import (
31
+ QueryCaptionType,
32
+ QueryType,
33
+ VectorizableTextQuery,
34
+ VectorizedQuery,
35
+ )
36
+ from pydantic import SecretStr, ValidationError
37
+
38
+ # Type checking imports for optional agentic mode dependencies
39
+ if TYPE_CHECKING:
40
+ from azure.search.documents.knowledgebases.aio import KnowledgeBaseRetrievalClient
41
+ from azure.search.documents.knowledgebases.models import (
42
+ KnowledgeBaseMessage,
43
+ KnowledgeBaseMessageTextContent,
44
+ KnowledgeBaseRetrievalRequest,
45
+ KnowledgeRetrievalIntent,
46
+ KnowledgeRetrievalSemanticIntent,
47
+ )
48
+ from azure.search.documents.knowledgebases.models import (
49
+ KnowledgeRetrievalLowReasoningEffort as KBRetrievalLowReasoningEffort,
50
+ )
51
+ from azure.search.documents.knowledgebases.models import (
52
+ KnowledgeRetrievalMediumReasoningEffort as KBRetrievalMediumReasoningEffort,
53
+ )
54
+ from azure.search.documents.knowledgebases.models import (
55
+ KnowledgeRetrievalMinimalReasoningEffort as KBRetrievalMinimalReasoningEffort,
56
+ )
57
+ from azure.search.documents.knowledgebases.models import (
58
+ KnowledgeRetrievalOutputMode as KBRetrievalOutputMode,
59
+ )
60
+ from azure.search.documents.knowledgebases.models import (
61
+ KnowledgeRetrievalReasoningEffort as KBRetrievalReasoningEffort,
62
+ )
63
+
64
+ # Runtime imports for agentic mode (optional dependency)
65
+ try:
66
+ from azure.search.documents.knowledgebases.aio import KnowledgeBaseRetrievalClient
67
+ from azure.search.documents.knowledgebases.models import (
68
+ KnowledgeBaseMessage,
69
+ KnowledgeBaseMessageTextContent,
70
+ KnowledgeBaseRetrievalRequest,
71
+ KnowledgeRetrievalIntent,
72
+ KnowledgeRetrievalSemanticIntent,
73
+ )
74
+ from azure.search.documents.knowledgebases.models import (
75
+ KnowledgeRetrievalLowReasoningEffort as KBRetrievalLowReasoningEffort,
76
+ )
77
+ from azure.search.documents.knowledgebases.models import (
78
+ KnowledgeRetrievalMediumReasoningEffort as KBRetrievalMediumReasoningEffort,
79
+ )
80
+ from azure.search.documents.knowledgebases.models import (
81
+ KnowledgeRetrievalMinimalReasoningEffort as KBRetrievalMinimalReasoningEffort,
82
+ )
83
+ from azure.search.documents.knowledgebases.models import (
84
+ KnowledgeRetrievalOutputMode as KBRetrievalOutputMode,
85
+ )
86
+ from azure.search.documents.knowledgebases.models import (
87
+ KnowledgeRetrievalReasoningEffort as KBRetrievalReasoningEffort,
88
+ )
89
+
90
+ _agentic_retrieval_available = True
91
+ except ImportError:
92
+ _agentic_retrieval_available = False
93
+
94
+ if sys.version_info >= (3, 11):
95
+ from typing import Self # pragma: no cover
96
+ else:
97
+ from typing_extensions import Self # pragma: no cover
98
+
99
+ if sys.version_info >= (3, 12):
100
+ from typing import override # type: ignore # pragma: no cover
101
+ else:
102
+ from typing_extensions import override # type: ignore[import] # pragma: no cover
103
+
104
+ """Azure AI Search Context Provider for Agent Framework.
105
+
106
+ This module provides context providers for Azure AI Search integration with two modes:
107
+ - Agentic: Recommended for most scenarios. Uses Knowledge Bases for query planning and
108
+ multi-hop reasoning. Slightly slower with more token consumption, but more accurate.
109
+ - Semantic: Fast hybrid search (vector + keyword) with semantic ranker. Best for simple
110
+ queries where speed is critical.
111
+
112
+ See: https://techcommunity.microsoft.com/blog/azure-ai-foundry-blog/foundry-iq-boost-response-relevance-by-36-with-agentic-retrieval/4470720
113
+ """
114
+
115
+
116
+ # Module-level constants
117
+ logger = get_logger("agent_framework.azure")
118
+ _DEFAULT_AGENTIC_MESSAGE_HISTORY_COUNT = 10
119
+
120
+
121
+ class AzureAISearchSettings(AFBaseSettings):
122
+ """Settings for Azure AI Search Context Provider with auto-loading from environment.
123
+
124
+ The settings are first loaded from environment variables with the prefix 'AZURE_SEARCH_'.
125
+ If the environment variables are not found, the settings can be loaded from a .env file.
126
+
127
+ Keyword Args:
128
+ endpoint: Azure AI Search endpoint URL.
129
+ Can be set via environment variable AZURE_SEARCH_ENDPOINT.
130
+ index_name: Name of the search index.
131
+ Can be set via environment variable AZURE_SEARCH_INDEX_NAME.
132
+ knowledge_base_name: Name of an existing Knowledge Base (for agentic mode).
133
+ Can be set via environment variable AZURE_SEARCH_KNOWLEDGE_BASE_NAME.
134
+ api_key: API key for authentication (optional, use managed identity if not provided).
135
+ Can be set via environment variable AZURE_SEARCH_API_KEY.
136
+ env_file_path: If provided, the .env settings are read from this file path location.
137
+ env_file_encoding: The encoding of the .env file, defaults to 'utf-8'.
138
+
139
+ Examples:
140
+ .. code-block:: python
141
+
142
+ from agent_framework_aisearch import AzureAISearchSettings
143
+
144
+ # Using environment variables
145
+ # Set AZURE_SEARCH_ENDPOINT=https://mysearch.search.windows.net
146
+ # Set AZURE_SEARCH_INDEX_NAME=my-index
147
+ settings = AzureAISearchSettings()
148
+
149
+ # Or passing parameters directly
150
+ settings = AzureAISearchSettings(
151
+ endpoint="https://mysearch.search.windows.net",
152
+ index_name="my-index",
153
+ )
154
+
155
+ # Or loading from a .env file
156
+ settings = AzureAISearchSettings(env_file_path="path/to/.env")
157
+ """
158
+
159
+ env_prefix: ClassVar[str] = "AZURE_SEARCH_"
160
+
161
+ endpoint: str | None = None
162
+ index_name: str | None = None
163
+ knowledge_base_name: str | None = None
164
+ api_key: SecretStr | None = None
165
+
166
+
167
+ class AzureAISearchContextProvider(ContextProvider):
168
+ """Azure AI Search Context Provider with hybrid search and semantic ranking.
169
+
170
+ This provider retrieves relevant documents from Azure AI Search to provide context
171
+ to the AI agent. It supports two modes:
172
+
173
+ - **agentic**: Recommended for most scenarios. Uses Knowledge Bases for query planning
174
+ and multi-hop reasoning. Slightly slower with more token consumption, but provides
175
+ more accurate results (up to 36% improvement in response relevance).
176
+ - **semantic** (default): Fast hybrid search combining vector and keyword search
177
+ with semantic reranking. Best for simple queries where speed is critical.
178
+
179
+ Examples:
180
+ Using environment variables (recommended):
181
+
182
+ .. code-block:: python
183
+
184
+ from agent_framework_aisearch import AzureAISearchContextProvider
185
+ from azure.identity.aio import DefaultAzureCredential
186
+
187
+ # Set AZURE_SEARCH_ENDPOINT and AZURE_SEARCH_INDEX_NAME in environment
188
+ search_provider = AzureAISearchContextProvider(credential=DefaultAzureCredential())
189
+
190
+ Semantic hybrid search with API key:
191
+
192
+ .. code-block:: python
193
+
194
+ # Direct API key string
195
+ search_provider = AzureAISearchContextProvider(
196
+ endpoint="https://mysearch.search.windows.net",
197
+ index_name="my-index",
198
+ api_key="my-api-key",
199
+ mode="semantic",
200
+ )
201
+
202
+ Loading from .env file:
203
+
204
+ .. code-block:: python
205
+
206
+ # Load settings from a .env file
207
+ search_provider = AzureAISearchContextProvider(
208
+ credential=DefaultAzureCredential(), env_file_path="path/to/.env"
209
+ )
210
+
211
+ Agentic retrieval for complex queries:
212
+
213
+ .. code-block:: python
214
+
215
+ # Use agentic mode for multi-hop reasoning
216
+ # Note: azure_openai_resource_url is the OpenAI endpoint for Knowledge Base model calls,
217
+ # which is different from azure_ai_project_endpoint (the AI Foundry project endpoint)
218
+ search_provider = AzureAISearchContextProvider(
219
+ endpoint="https://mysearch.search.windows.net",
220
+ index_name="my-index",
221
+ credential=DefaultAzureCredential(),
222
+ mode="agentic",
223
+ azure_openai_resource_url="https://myresource.openai.azure.com",
224
+ model_deployment_name="gpt-4o",
225
+ knowledge_base_name="my-knowledge-base",
226
+ )
227
+ """
228
+
229
+ _DEFAULT_SEARCH_CONTEXT_PROMPT = "Use the following context to answer the question:"
230
+
231
+ def __init__(
232
+ self,
233
+ endpoint: str | None = None,
234
+ index_name: str | None = None,
235
+ api_key: str | AzureKeyCredential | None = None,
236
+ credential: AsyncTokenCredential | None = None,
237
+ *,
238
+ mode: Literal["semantic", "agentic"] = "semantic",
239
+ top_k: int = 5,
240
+ semantic_configuration_name: str | None = None,
241
+ vector_field_name: str | None = None,
242
+ embedding_function: Callable[[str], Awaitable[list[float]]] | None = None,
243
+ context_prompt: str | None = None,
244
+ # Agentic mode parameters (Knowledge Base)
245
+ azure_openai_resource_url: str | None = None,
246
+ model_deployment_name: str | None = None,
247
+ model_name: str | None = None,
248
+ knowledge_base_name: str | None = None,
249
+ retrieval_instructions: str | None = None,
250
+ azure_openai_api_key: str | None = None,
251
+ knowledge_base_output_mode: Literal["extractive_data", "answer_synthesis"] = "extractive_data",
252
+ retrieval_reasoning_effort: Literal["minimal", "medium", "low"] = "minimal",
253
+ agentic_message_history_count: int = _DEFAULT_AGENTIC_MESSAGE_HISTORY_COUNT,
254
+ env_file_path: str | None = None,
255
+ env_file_encoding: str | None = None,
256
+ ) -> None:
257
+ """Initialize Azure AI Search Context Provider.
258
+
259
+ Args:
260
+ endpoint: Azure AI Search endpoint URL.
261
+ Can also be set via environment variable AZURE_SEARCH_ENDPOINT.
262
+ index_name: Name of the search index to query.
263
+ Can also be set via environment variable AZURE_SEARCH_INDEX_NAME.
264
+ api_key: API key for authentication (string or AzureKeyCredential).
265
+ Can also be set via environment variable AZURE_SEARCH_API_KEY.
266
+ credential: AsyncTokenCredential for managed identity authentication.
267
+ Use this for Entra ID authentication instead of api_key.
268
+ mode: Search mode - "semantic" for hybrid search with semantic ranking (fast)
269
+ or "agentic" for multi-hop reasoning (slower). Default: "semantic".
270
+ top_k: Maximum number of documents to retrieve. Only applies to semantic mode.
271
+ In agentic mode, the server-side Knowledge Base determines retrieval based on
272
+ query complexity and reasoning effort. Default: 5.
273
+ semantic_configuration_name: Name of semantic configuration in the index.
274
+ Required for semantic ranking. If None, uses index default.
275
+ vector_field_name: Name of the vector field in the index for hybrid search.
276
+ Required if using vector search. Default: None (keyword search only).
277
+ embedding_function: Async function to generate embeddings for vector search.
278
+ Signature: async def embed(text: str) -> list[float]
279
+ Required if vector_field_name is specified and no server-side vectorization.
280
+ context_prompt: Custom prompt to prepend to retrieved context.
281
+ Default: "Use the following context to answer the question:"
282
+ azure_openai_resource_url: Azure OpenAI resource URL for Knowledge Base model calls.
283
+ Required when using agentic mode with index_name (to auto-create Knowledge Base).
284
+ Not required when using an existing knowledge_base_name.
285
+ Example: "https://myresource.openai.azure.com"
286
+ model_deployment_name: Model deployment name in Azure OpenAI for Knowledge Base.
287
+ Required when using agentic mode with index_name (to auto-create Knowledge Base).
288
+ Not required when using an existing knowledge_base_name.
289
+ model_name: The underlying model name (e.g., "gpt-4o", "gpt-4o-mini").
290
+ If not provided, defaults to model_deployment_name. Used for Knowledge Base configuration.
291
+ knowledge_base_name: Name of an existing Knowledge Base to use.
292
+ Required for agentic mode if not providing index_name.
293
+ Supports KBs with any source type (web, blob, index, etc.).
294
+ retrieval_instructions: Custom instructions for the Knowledge Base's
295
+ retrieval planning. Only used in agentic mode.
296
+ azure_openai_api_key: Azure OpenAI API key for Knowledge Base to call the model.
297
+ Only needed when using API key authentication instead of managed identity.
298
+ knowledge_base_output_mode: Output mode for Knowledge Base retrieval. Only used in agentic mode.
299
+ "extractive_data": Returns raw chunks without synthesis (default, recommended for agent integration).
300
+ "answer_synthesis": Returns synthesized answer from the LLM.
301
+ Some knowledge sources require answer_synthesis mode. Default: "extractive_data".
302
+ retrieval_reasoning_effort: Reasoning effort for Knowledge Base query planning. Only used in agentic mode.
303
+ "minimal": Fastest, basic query planning.
304
+ "medium": Moderate reasoning with some query decomposition.
305
+ "low": Lower reasoning effort than medium.
306
+ Default: "minimal".
307
+ agentic_message_history_count: Number of recent messages from conversation history to send to
308
+ the Knowledge Base. This context helps with query planning in agentic mode, allowing the
309
+ Knowledge Base to understand the conversation flow and generate better retrieval queries.
310
+ There is no technical limit - adjust based on your use case. Default: 10.
311
+ env_file_path: Path to environment file for loading settings.
312
+ env_file_encoding: Encoding of the environment file.
313
+
314
+ Examples:
315
+ .. code-block:: python
316
+
317
+ from agent_framework_aisearch import AzureAISearchContextProvider
318
+ from azure.identity.aio import DefaultAzureCredential
319
+
320
+ # Using environment variables
321
+ # Set AZURE_SEARCH_ENDPOINT=https://mysearch.search.windows.net
322
+ # Set AZURE_SEARCH_INDEX_NAME=my-index
323
+ credential = DefaultAzureCredential()
324
+ provider = AzureAISearchContextProvider(credential=credential)
325
+
326
+ # Or passing parameters directly
327
+ provider = AzureAISearchContextProvider(
328
+ endpoint="https://mysearch.search.windows.net",
329
+ index_name="my-index",
330
+ credential=credential,
331
+ )
332
+
333
+ # Or loading from a .env file
334
+ provider = AzureAISearchContextProvider(credential=credential, env_file_path="path/to/.env")
335
+ """
336
+ # Load settings from environment/file
337
+ try:
338
+ settings = AzureAISearchSettings(
339
+ endpoint=endpoint,
340
+ index_name=index_name,
341
+ knowledge_base_name=knowledge_base_name,
342
+ api_key=api_key if isinstance(api_key, str) else None,
343
+ env_file_path=env_file_path,
344
+ env_file_encoding=env_file_encoding,
345
+ )
346
+ except ValidationError as ex:
347
+ raise ServiceInitializationError("Failed to create Azure AI Search settings.", ex) from ex
348
+
349
+ # Validate required parameters
350
+ if not settings.endpoint:
351
+ raise ServiceInitializationError(
352
+ "Azure AI Search endpoint is required. Set via 'endpoint' parameter "
353
+ "or 'AZURE_SEARCH_ENDPOINT' environment variable."
354
+ )
355
+
356
+ # Validate index_name and knowledge_base_name based on mode
357
+ # Note: settings.* contains the resolved value (explicit param OR env var)
358
+ if mode == "semantic":
359
+ # Semantic mode: always requires index_name
360
+ if not settings.index_name:
361
+ raise ServiceInitializationError(
362
+ "Azure AI Search index name is required for semantic mode. "
363
+ "Set via 'index_name' parameter or 'AZURE_SEARCH_INDEX_NAME' environment variable."
364
+ )
365
+ elif mode == "agentic":
366
+ # Agentic mode: requires exactly ONE of index_name or knowledge_base_name
367
+ if settings.index_name and settings.knowledge_base_name:
368
+ raise ServiceInitializationError(
369
+ "For agentic mode, provide either 'index_name' OR 'knowledge_base_name', not both. "
370
+ "Use 'index_name' to auto-create a Knowledge Base, or 'knowledge_base_name' to use an existing one."
371
+ )
372
+ if not settings.index_name and not settings.knowledge_base_name:
373
+ raise ServiceInitializationError(
374
+ "For agentic mode, provide either 'index_name' (to auto-create Knowledge Base) "
375
+ "or 'knowledge_base_name' (to use existing Knowledge Base). "
376
+ "Set via parameters or environment variables "
377
+ "AZURE_SEARCH_INDEX_NAME / AZURE_SEARCH_KNOWLEDGE_BASE_NAME."
378
+ )
379
+ # If using index_name to create KB, model config is required
380
+ if settings.index_name and not model_deployment_name:
381
+ raise ServiceInitializationError(
382
+ "model_deployment_name is required for agentic mode when creating Knowledge Base from index. "
383
+ "This is the Azure OpenAI deployment used by the Knowledge Base for query planning."
384
+ )
385
+
386
+ # Determine the credential to use
387
+ resolved_credential: AzureKeyCredential | AsyncTokenCredential
388
+ if credential:
389
+ # AsyncTokenCredential takes precedence
390
+ resolved_credential = credential
391
+ elif isinstance(api_key, AzureKeyCredential):
392
+ resolved_credential = api_key
393
+ elif settings.api_key:
394
+ resolved_credential = AzureKeyCredential(settings.api_key.get_secret_value())
395
+ else:
396
+ raise ServiceInitializationError(
397
+ "Azure credential is required. Provide 'api_key' or 'credential' parameter "
398
+ "or set 'AZURE_SEARCH_API_KEY' environment variable."
399
+ )
400
+
401
+ self.endpoint = settings.endpoint
402
+ self.index_name = settings.index_name
403
+ self.credential = resolved_credential
404
+ self.mode = mode
405
+ self.top_k = top_k
406
+ self.semantic_configuration_name = semantic_configuration_name
407
+ self.vector_field_name = vector_field_name
408
+ self.embedding_function = embedding_function
409
+ self.context_prompt = context_prompt or self._DEFAULT_SEARCH_CONTEXT_PROMPT
410
+
411
+ # Agentic mode parameters (Knowledge Base)
412
+ self.azure_openai_resource_url = azure_openai_resource_url
413
+ self.azure_openai_deployment_name = model_deployment_name
414
+ # If model_name not provided, default to deployment name
415
+ self.model_name = model_name or model_deployment_name
416
+ # Use resolved KB name (from explicit param or env var)
417
+ self.knowledge_base_name = settings.knowledge_base_name
418
+ self.retrieval_instructions = retrieval_instructions
419
+ self.azure_openai_api_key = azure_openai_api_key
420
+ self.knowledge_base_output_mode = knowledge_base_output_mode
421
+ self.retrieval_reasoning_effort = retrieval_reasoning_effort
422
+ self.agentic_message_history_count = agentic_message_history_count
423
+
424
+ # Determine if using existing Knowledge Base or auto-creating from index
425
+ # Since validation ensures exactly one of index_name/knowledge_base_name for agentic mode:
426
+ # - knowledge_base_name provided: use existing KB
427
+ # - index_name provided: auto-create KB from index
428
+ self._use_existing_knowledge_base = False
429
+ if mode == "agentic":
430
+ if settings.knowledge_base_name:
431
+ # Use existing KB directly (supports any source type: web, blob, index, etc.)
432
+ self._use_existing_knowledge_base = True
433
+ else:
434
+ # Auto-generate KB name from index name
435
+ self.knowledge_base_name = f"{settings.index_name}-kb"
436
+
437
+ # Auto-discover vector field if not specified
438
+ self._auto_discovered_vector_field = False
439
+ self._use_vectorizable_query = False # Will be set to True if server-side vectorization detected
440
+ if not vector_field_name and mode == "semantic":
441
+ # Attempt to auto-discover vector field from index schema
442
+ # This will be done lazily on first search to avoid blocking initialization
443
+ pass
444
+
445
+ # Validation
446
+ if vector_field_name and not embedding_function:
447
+ raise ValueError("embedding_function is required when vector_field_name is specified")
448
+
449
+ if mode == "agentic":
450
+ if not _agentic_retrieval_available:
451
+ raise ImportError(
452
+ "Agentic retrieval requires azure-search-documents >= 11.7.0b1 with Knowledge Base support. "
453
+ "Please upgrade: pip install azure-search-documents>=11.7.0b1"
454
+ )
455
+ # Only require OpenAI resource URL if NOT using existing KB
456
+ # (existing KB already has its model configuration)
457
+ # Note: model_deployment_name is already validated at initialization
458
+ if not self._use_existing_knowledge_base and not self.azure_openai_resource_url:
459
+ raise ValueError(
460
+ "azure_openai_resource_url is required for agentic mode when creating Knowledge Base from index. "
461
+ "This should be your Azure OpenAI endpoint (e.g., 'https://myresource.openai.azure.com')"
462
+ )
463
+
464
+ # Create search client for semantic mode (only if index_name is available)
465
+ self._search_client: SearchClient | None = None
466
+ if self.index_name:
467
+ self._search_client = SearchClient(
468
+ endpoint=self.endpoint,
469
+ index_name=self.index_name,
470
+ credential=self.credential,
471
+ user_agent=AGENT_FRAMEWORK_USER_AGENT,
472
+ )
473
+
474
+ # Create index client and retrieval client for agentic mode (Knowledge Base)
475
+ self._index_client: SearchIndexClient | None = None
476
+ self._retrieval_client: KnowledgeBaseRetrievalClient | None = None
477
+ if mode == "agentic":
478
+ self._index_client = SearchIndexClient(
479
+ endpoint=self.endpoint,
480
+ credential=self.credential,
481
+ user_agent=AGENT_FRAMEWORK_USER_AGENT,
482
+ )
483
+ # Retrieval client will be created after Knowledge Base initialization
484
+
485
+ self._knowledge_base_initialized = False
486
+
487
+ async def __aenter__(self) -> Self:
488
+ """Async context manager entry."""
489
+ return self
490
+
491
+ async def __aexit__(
492
+ self,
493
+ exc_type: type[BaseException] | None,
494
+ exc_val: BaseException | None,
495
+ exc_tb: Any,
496
+ ) -> None:
497
+ """Async context manager exit - cleanup clients.
498
+
499
+ Args:
500
+ exc_type: Exception type if an error occurred.
501
+ exc_val: Exception value if an error occurred.
502
+ exc_tb: Exception traceback if an error occurred.
503
+ """
504
+ # Close retrieval client if it was created
505
+ if self._retrieval_client is not None:
506
+ await self._retrieval_client.close()
507
+ self._retrieval_client = None
508
+
509
+ @override
510
+ async def invoking(
511
+ self,
512
+ messages: ChatMessage | MutableSequence[ChatMessage],
513
+ **kwargs: Any,
514
+ ) -> Context:
515
+ """Retrieve relevant context from Azure AI Search before model invocation.
516
+
517
+ Args:
518
+ messages: User messages to use for context retrieval.
519
+ **kwargs: Additional arguments (unused).
520
+
521
+ Returns:
522
+ Context object with retrieved documents as messages.
523
+ """
524
+ # Convert to list and filter to USER/ASSISTANT messages with text only
525
+ messages_list = [messages] if isinstance(messages, ChatMessage) else list(messages)
526
+
527
+ filtered_messages = [
528
+ msg
529
+ for msg in messages_list
530
+ if msg and msg.text and msg.text.strip() and msg.role in [Role.USER, Role.ASSISTANT]
531
+ ]
532
+
533
+ if not filtered_messages:
534
+ return Context()
535
+
536
+ # Perform search based on mode
537
+ if self.mode == "semantic":
538
+ # Semantic mode: flatten messages to single query
539
+ query = "\n".join(msg.text for msg in filtered_messages)
540
+ search_result_parts = await self._semantic_search(query)
541
+ else: # agentic
542
+ # Agentic mode: pass recent messages as conversation history
543
+ recent_messages = filtered_messages[-self.agentic_message_history_count :]
544
+ search_result_parts = await self._agentic_search(recent_messages)
545
+
546
+ # Format results as context - return multiple messages for each result part
547
+ if not search_result_parts:
548
+ return Context()
549
+
550
+ # Create context messages: first message with prompt, then one message per result part
551
+ context_messages = [ChatMessage(role=Role.USER, text=self.context_prompt)]
552
+ context_messages.extend([ChatMessage(role=Role.USER, text=part) for part in search_result_parts])
553
+
554
+ return Context(messages=context_messages)
555
+
556
+ def _find_vector_fields(self, index: Any) -> list[str]:
557
+ """Find all fields that can store vectors (have dimensions defined).
558
+
559
+ Args:
560
+ index: SearchIndex object from Azure Search.
561
+
562
+ Returns:
563
+ List of vector field names.
564
+ """
565
+ return [
566
+ field.name
567
+ for field in index.fields
568
+ if field.vector_search_dimensions is not None and field.vector_search_dimensions > 0
569
+ ]
570
+
571
+ def _find_vectorizable_fields(self, index: Any, vector_fields: list[str]) -> list[str]:
572
+ """Find vector fields that have auto-vectorization configured.
573
+
574
+ These are fields that have a vectorizer in their profile, meaning the index
575
+ can automatically vectorize text queries without needing a client-side embedding function.
576
+
577
+ Args:
578
+ index: SearchIndex object from Azure Search.
579
+ vector_fields: List of vector field names.
580
+
581
+ Returns:
582
+ List of vectorizable field names (subset of vector_fields).
583
+ """
584
+ vectorizable_fields: list[str] = []
585
+
586
+ # Check if index has vector search configuration
587
+ if not index.vector_search or not index.vector_search.profiles:
588
+ return vectorizable_fields
589
+
590
+ # For each vector field, check if it has a vectorizer configured
591
+ for field in index.fields:
592
+ if field.name in vector_fields and field.vector_search_profile_name:
593
+ # Find the profile for this field
594
+ profile = next(
595
+ (p for p in index.vector_search.profiles if p.name == field.vector_search_profile_name), None
596
+ )
597
+
598
+ if profile and hasattr(profile, "vectorizer_name") and profile.vectorizer_name:
599
+ # This field has server-side vectorization configured
600
+ vectorizable_fields.append(field.name)
601
+
602
+ return vectorizable_fields
603
+
604
+ async def _auto_discover_vector_field(self) -> None:
605
+ """Auto-discover vector field from index schema.
606
+
607
+ Attempts to find vector fields in the index and detect which have server-side
608
+ vectorization configured. Prioritizes vectorizable fields (which can auto-embed text)
609
+ over regular vector fields (which require client-side embedding).
610
+ """
611
+ if self._auto_discovered_vector_field or self.vector_field_name:
612
+ return # Already discovered or manually specified
613
+
614
+ try:
615
+ # Use existing index client or create temporary one
616
+ if not self._index_client:
617
+ self._index_client = SearchIndexClient(
618
+ endpoint=self.endpoint,
619
+ credential=self.credential,
620
+ user_agent=AGENT_FRAMEWORK_USER_AGENT,
621
+ )
622
+ index_client = self._index_client
623
+
624
+ # Get index schema (index_name is guaranteed to be set for semantic mode)
625
+ if not self.index_name:
626
+ logger.warning("Cannot auto-discover vector field: index_name is not set.")
627
+ self._auto_discovered_vector_field = True
628
+ return
629
+
630
+ index = await index_client.get_index(self.index_name)
631
+
632
+ # Step 1: Find all vector fields
633
+ vector_fields = self._find_vector_fields(index)
634
+
635
+ if not vector_fields:
636
+ # No vector fields found - keyword search only
637
+ logger.info(f"No vector fields found in index '{self.index_name}'. Using keyword-only search.")
638
+ self._auto_discovered_vector_field = True
639
+ return
640
+
641
+ # Step 2: Find which vector fields have server-side vectorization
642
+ vectorizable_fields = self._find_vectorizable_fields(index, vector_fields)
643
+
644
+ # Step 3: Decide which field to use
645
+ if vectorizable_fields:
646
+ # Prefer vectorizable fields (server-side embedding)
647
+ if len(vectorizable_fields) == 1:
648
+ self.vector_field_name = vectorizable_fields[0]
649
+ self._auto_discovered_vector_field = True
650
+ self._use_vectorizable_query = True # Use VectorizableTextQuery
651
+ logger.info(
652
+ f"Auto-discovered vectorizable field '{self.vector_field_name}' "
653
+ f"with server-side vectorization. No embedding_function needed."
654
+ )
655
+ else:
656
+ # Multiple vectorizable fields
657
+ logger.warning(
658
+ f"Multiple vectorizable fields found: {vectorizable_fields}. "
659
+ f"Please specify vector_field_name explicitly. Using keyword-only search."
660
+ )
661
+ elif len(vector_fields) == 1:
662
+ # Single vector field without vectorizer - needs client-side embedding
663
+ self.vector_field_name = vector_fields[0]
664
+ self._auto_discovered_vector_field = True
665
+ self._use_vectorizable_query = False
666
+
667
+ if not self.embedding_function:
668
+ logger.warning(
669
+ f"Auto-discovered vector field '{self.vector_field_name}' without server-side vectorization. "
670
+ f"Provide embedding_function for vector search, or it will fall back to keyword-only search."
671
+ )
672
+ self.vector_field_name = None
673
+ else:
674
+ # Multiple vector fields without vectorizers
675
+ logger.warning(
676
+ f"Multiple vector fields found: {vector_fields}. "
677
+ f"Please specify vector_field_name explicitly. Using keyword-only search."
678
+ )
679
+
680
+ except Exception as e:
681
+ # Log warning but continue with keyword search
682
+ logger.warning(f"Failed to auto-discover vector field: {e}. Using keyword-only search.")
683
+
684
+ self._auto_discovered_vector_field = True # Mark as attempted
685
+
686
+ async def _semantic_search(self, query: str) -> list[str]:
687
+ """Perform semantic hybrid search with semantic ranking.
688
+
689
+ This is the recommended mode for most use cases. It combines:
690
+ - Vector search (if embedding_function provided)
691
+ - Keyword search (BM25)
692
+ - Semantic reranking (if semantic_configuration_name provided)
693
+
694
+ Args:
695
+ query: Search query text.
696
+
697
+ Returns:
698
+ List of formatted search result strings, one per document.
699
+ """
700
+ # Auto-discover vector field if not already done
701
+ await self._auto_discover_vector_field()
702
+
703
+ vector_queries: list[VectorizableTextQuery | VectorizedQuery] = []
704
+
705
+ # Build vector query based on server-side vectorization or client-side embedding
706
+ if self.vector_field_name:
707
+ # Use larger k for vector query when semantic reranker is enabled for better ranking quality
708
+ vector_k = max(self.top_k, 50) if self.semantic_configuration_name else self.top_k
709
+
710
+ if self._use_vectorizable_query:
711
+ # Server-side vectorization: Index will auto-embed the text query
712
+ vector_queries = [
713
+ VectorizableTextQuery(
714
+ text=query,
715
+ k_nearest_neighbors=vector_k,
716
+ fields=self.vector_field_name,
717
+ )
718
+ ]
719
+ elif self.embedding_function:
720
+ # Client-side embedding: We provide the vector
721
+ query_vector = await self.embedding_function(query)
722
+ vector_queries = [
723
+ VectorizedQuery(
724
+ vector=query_vector,
725
+ k_nearest_neighbors=vector_k,
726
+ fields=self.vector_field_name,
727
+ )
728
+ ]
729
+ # else: vector_field_name is set but no vectorization available - skip vector search
730
+
731
+ # Build search parameters
732
+ search_params: dict[str, Any] = {
733
+ "search_text": query,
734
+ "top": self.top_k,
735
+ }
736
+
737
+ if vector_queries:
738
+ search_params["vector_queries"] = vector_queries
739
+
740
+ # Add semantic ranking if configured
741
+ if self.semantic_configuration_name:
742
+ search_params["query_type"] = QueryType.SEMANTIC
743
+ search_params["semantic_configuration_name"] = self.semantic_configuration_name
744
+ search_params["query_caption"] = QueryCaptionType.EXTRACTIVE
745
+
746
+ # Execute search (search client is guaranteed to exist for semantic mode)
747
+ if not self._search_client:
748
+ raise RuntimeError("Search client is not initialized. This should not happen in semantic mode.")
749
+
750
+ results = await self._search_client.search(**search_params) # type: ignore[reportUnknownVariableType]
751
+
752
+ # Format results with citations
753
+ formatted_results: list[str] = []
754
+ async for doc in results: # type: ignore[reportUnknownVariableType]
755
+ # Extract document ID for citation
756
+ doc_id = doc.get("id") or doc.get("@search.id") # type: ignore[reportUnknownVariableType]
757
+
758
+ # Use full document chunks with citation
759
+ doc_text: str = self._extract_document_text(doc, doc_id=doc_id) # type: ignore[reportUnknownArgumentType]
760
+ if doc_text:
761
+ formatted_results.append(doc_text) # type: ignore[reportUnknownArgumentType]
762
+
763
+ return formatted_results
764
+
765
+ async def _ensure_knowledge_base(self) -> None:
766
+ """Ensure Knowledge Base and knowledge source are created or use existing KB.
767
+
768
+ This method is idempotent - it will only create resources if they don't exist.
769
+
770
+ Note: Azure SDK uses KnowledgeAgent classes internally, but the feature
771
+ is marketed as "Knowledge Bases" in Azure AI Search.
772
+ """
773
+ if self._knowledge_base_initialized:
774
+ return
775
+
776
+ # Runtime validation
777
+ if not self.knowledge_base_name:
778
+ raise ValueError("knowledge_base_name is required for agentic mode")
779
+
780
+ knowledge_base_name = self.knowledge_base_name
781
+
782
+ # Path 1: Use existing Knowledge Base directly (no index needed)
783
+ # This supports KB with any source type (web, blob, index, etc.)
784
+ if self._use_existing_knowledge_base:
785
+ # Just create the retrieval client - KB already exists with its own sources
786
+ if _agentic_retrieval_available and self._retrieval_client is None:
787
+ self._retrieval_client = KnowledgeBaseRetrievalClient(
788
+ endpoint=self.endpoint,
789
+ knowledge_base_name=knowledge_base_name,
790
+ credential=self.credential,
791
+ user_agent=AGENT_FRAMEWORK_USER_AGENT,
792
+ )
793
+ self._knowledge_base_initialized = True
794
+ return
795
+
796
+ # Path 2: Auto-create Knowledge Base from search index
797
+ # Requires index_client and OpenAI configuration
798
+ if not self._index_client:
799
+ raise ValueError("Index client is required when creating Knowledge Base from index")
800
+ if not self.azure_openai_resource_url:
801
+ raise ValueError("azure_openai_resource_url is required when creating Knowledge Base from index")
802
+ if not self.azure_openai_deployment_name:
803
+ raise ValueError("model_deployment_name is required when creating Knowledge Base from index")
804
+ if not self.index_name:
805
+ raise ValueError("index_name is required when creating Knowledge Base from index")
806
+
807
+ # Step 1: Create or get knowledge source from index
808
+ knowledge_source_name = f"{self.index_name}-source"
809
+
810
+ try:
811
+ # Try to get existing knowledge source
812
+ await self._index_client.get_knowledge_source(knowledge_source_name)
813
+ except ResourceNotFoundError:
814
+ # Create new knowledge source if it doesn't exist
815
+ knowledge_source = SearchIndexKnowledgeSource(
816
+ name=knowledge_source_name,
817
+ description=f"Knowledge source for {self.index_name} search index",
818
+ search_index_parameters=SearchIndexKnowledgeSourceParameters(
819
+ search_index_name=self.index_name,
820
+ ),
821
+ )
822
+ await self._index_client.create_knowledge_source(knowledge_source)
823
+
824
+ # Step 2: Create or update Knowledge Base
825
+ # Always create/update to ensure configuration is current
826
+ aoai_params = AzureOpenAIVectorizerParameters(
827
+ resource_url=self.azure_openai_resource_url,
828
+ deployment_name=self.azure_openai_deployment_name,
829
+ model_name=self.model_name,
830
+ api_key=self.azure_openai_api_key,
831
+ )
832
+
833
+ # Map output mode string to SDK enum
834
+ output_mode = (
835
+ KnowledgeRetrievalOutputMode.EXTRACTIVE_DATA
836
+ if self.knowledge_base_output_mode == "extractive_data"
837
+ else KnowledgeRetrievalOutputMode.ANSWER_SYNTHESIS
838
+ )
839
+
840
+ # Map reasoning effort string to SDK class
841
+ reasoning_effort_map: dict[str, KnowledgeRetrievalReasoningEffort] = {
842
+ "minimal": KnowledgeRetrievalMinimalReasoningEffort(),
843
+ "medium": KnowledgeRetrievalMediumReasoningEffort(),
844
+ "low": KnowledgeRetrievalLowReasoningEffort(),
845
+ }
846
+ reasoning_effort = reasoning_effort_map[self.retrieval_reasoning_effort]
847
+
848
+ knowledge_base = KnowledgeBase(
849
+ name=knowledge_base_name,
850
+ description=f"Knowledge Base for multi-hop retrieval across {self.index_name}",
851
+ knowledge_sources=[
852
+ KnowledgeSourceReference(
853
+ name=knowledge_source_name,
854
+ )
855
+ ],
856
+ models=[KnowledgeBaseAzureOpenAIModel(azure_open_ai_parameters=aoai_params)],
857
+ output_mode=output_mode,
858
+ retrieval_reasoning_effort=reasoning_effort,
859
+ )
860
+ await self._index_client.create_or_update_knowledge_base(knowledge_base)
861
+
862
+ self._knowledge_base_initialized = True
863
+
864
+ # Create retrieval client now that Knowledge Base is initialized
865
+ if _agentic_retrieval_available and self._retrieval_client is None:
866
+ self._retrieval_client = KnowledgeBaseRetrievalClient(
867
+ endpoint=self.endpoint,
868
+ knowledge_base_name=knowledge_base_name,
869
+ credential=self.credential,
870
+ user_agent=AGENT_FRAMEWORK_USER_AGENT,
871
+ )
872
+
873
+ async def _agentic_search(self, messages: list[ChatMessage]) -> list[str]:
874
+ """Perform agentic retrieval with multi-hop reasoning using Knowledge Bases.
875
+
876
+ This mode uses query planning and is slightly slower than semantic search,
877
+ but provides more accurate results through intelligent retrieval.
878
+
879
+ This method uses Azure AI Search Knowledge Bases which:
880
+ 1. Analyze the query and plan sub-queries
881
+ 2. Retrieve relevant documents across multiple sources
882
+ 3. Perform multi-hop reasoning with an LLM
883
+ 4. Synthesize a comprehensive answer with references
884
+
885
+ Args:
886
+ messages: Conversation history to use for retrieval context.
887
+
888
+ Returns:
889
+ List of answer parts from the Knowledge Base, one per content item.
890
+ """
891
+ # Ensure Knowledge Base is initialized
892
+ await self._ensure_knowledge_base()
893
+
894
+ # Map reasoning effort string to SDK class (for retrieval requests)
895
+ reasoning_effort_map: dict[str, KBRetrievalReasoningEffort] = {
896
+ "minimal": KBRetrievalMinimalReasoningEffort(),
897
+ "medium": KBRetrievalMediumReasoningEffort(),
898
+ "low": KBRetrievalLowReasoningEffort(),
899
+ }
900
+ reasoning_effort = reasoning_effort_map[self.retrieval_reasoning_effort]
901
+
902
+ # Map output mode string to SDK enum (for retrieval requests)
903
+ output_mode = (
904
+ KBRetrievalOutputMode.EXTRACTIVE_DATA
905
+ if self.knowledge_base_output_mode == "extractive_data"
906
+ else KBRetrievalOutputMode.ANSWER_SYNTHESIS
907
+ )
908
+
909
+ # For minimal reasoning, use intents API; for medium/low, use messages API
910
+ if self.retrieval_reasoning_effort == "minimal":
911
+ # Minimal reasoning uses intents with a single search query
912
+ query = "\n".join(msg.text for msg in messages if msg.text)
913
+ intents: list[KnowledgeRetrievalIntent] = [KnowledgeRetrievalSemanticIntent(search=query)]
914
+ retrieval_request = KnowledgeBaseRetrievalRequest(
915
+ intents=intents,
916
+ retrieval_reasoning_effort=reasoning_effort,
917
+ output_mode=output_mode,
918
+ include_activity=True,
919
+ )
920
+ else:
921
+ # Medium/low reasoning uses messages with conversation history
922
+ kb_messages = [
923
+ KnowledgeBaseMessage(
924
+ role=msg.role.value if hasattr(msg.role, "value") else str(msg.role),
925
+ content=[KnowledgeBaseMessageTextContent(text=msg.text)],
926
+ )
927
+ for msg in messages
928
+ if msg.text
929
+ ]
930
+ retrieval_request = KnowledgeBaseRetrievalRequest(
931
+ messages=kb_messages,
932
+ retrieval_reasoning_effort=reasoning_effort,
933
+ output_mode=output_mode,
934
+ include_activity=True,
935
+ )
936
+
937
+ # Use reusable retrieval client
938
+ if not self._retrieval_client:
939
+ raise RuntimeError("Retrieval client not initialized. Ensure Knowledge Base is set up correctly.")
940
+
941
+ # Perform retrieval via Knowledge Base
942
+ retrieval_result = await self._retrieval_client.retrieve(retrieval_request=retrieval_request)
943
+
944
+ # Extract answer parts from response
945
+ if retrieval_result.response and len(retrieval_result.response) > 0:
946
+ # Get the assistant's response (last message)
947
+ assistant_message = retrieval_result.response[-1]
948
+ if assistant_message.content:
949
+ # Extract all text content items as separate parts
950
+ answer_parts: list[str] = []
951
+ for content_item in assistant_message.content:
952
+ # Check if this is a text content item
953
+ if isinstance(content_item, KnowledgeBaseMessageTextContent) and content_item.text:
954
+ answer_parts.append(content_item.text)
955
+
956
+ if answer_parts:
957
+ return answer_parts
958
+
959
+ # Fallback if no answer generated
960
+ return ["No results found from Knowledge Base."]
961
+
962
+ def _extract_document_text(self, doc: dict[str, Any], doc_id: str | None = None) -> str:
963
+ """Extract readable text from a search document with optional citation.
964
+
965
+ Args:
966
+ doc: Search result document.
967
+ doc_id: Optional document ID for citation.
968
+
969
+ Returns:
970
+ Formatted document text with citation if doc_id provided.
971
+ """
972
+ # Try common text field names
973
+ text = ""
974
+ for field in ["content", "text", "description", "body", "chunk"]:
975
+ if doc.get(field):
976
+ text = str(doc[field])
977
+ break
978
+
979
+ # Fallback: concatenate all string fields
980
+ if not text:
981
+ text_parts: list[str] = []
982
+ for key, value in doc.items():
983
+ if isinstance(value, str) and not key.startswith("@") and key != "id":
984
+ text_parts.append(f"{key}: {value}")
985
+ text = " | ".join(text_parts) if text_parts else ""
986
+
987
+ # Add citation if document ID provided
988
+ if doc_id and text:
989
+ return f"[Source: {doc_id}] {text}"
990
+ return text
@@ -0,0 +1,49 @@
1
+ Metadata-Version: 2.4
2
+ Name: agent-framework-azure-ai-search
3
+ Version: 1.0.0b251218
4
+ Summary: Azure AI Search integration for Microsoft Agent Framework.
5
+ Author-email: Microsoft <af-support@microsoft.com>
6
+ Requires-Python: >=3.10
7
+ Description-Content-Type: text/markdown
8
+ Classifier: License :: OSI Approved :: MIT License
9
+ Classifier: Development Status :: 4 - Beta
10
+ Classifier: Intended Audience :: Developers
11
+ Classifier: Programming Language :: Python :: 3
12
+ Classifier: Programming Language :: Python :: 3.10
13
+ Classifier: Programming Language :: Python :: 3.11
14
+ Classifier: Programming Language :: Python :: 3.12
15
+ Classifier: Programming Language :: Python :: 3.13
16
+ Classifier: Programming Language :: Python :: 3.14
17
+ Classifier: Typing :: Typed
18
+ License-File: LICENSE
19
+ Requires-Dist: agent-framework-core
20
+ Requires-Dist: azure-search-documents==11.7.0b2
21
+ Project-URL: homepage, https://aka.ms/agent-framework
22
+ Project-URL: issues, https://github.com/microsoft/agent-framework/issues
23
+ Project-URL: release_notes, https://github.com/microsoft/agent-framework/releases?q=tag%3Apython-1&expanded=true
24
+ Project-URL: source, https://github.com/microsoft/agent-framework/tree/main/python
25
+
26
+ # Get Started with Microsoft Agent Framework Azure AI Search
27
+
28
+ Please install this package via pip:
29
+
30
+ ```bash
31
+ pip install agent-framework-azure-ai-search --pre
32
+ ```
33
+
34
+ ## Azure AI Search Integration
35
+
36
+ The Azure AI Search integration provides context providers for RAG (Retrieval Augmented Generation) capabilities with two modes:
37
+
38
+ - **Semantic Mode**: Fast hybrid search (vector + keyword) with semantic ranking
39
+ - **Agentic Mode**: Multi-hop reasoning using Knowledge Bases for complex queries
40
+
41
+ ### Basic Usage Example
42
+
43
+ See the [Azure AI Search context provider examples](https://github.com/microsoft/agent-framework/tree/main/python/samples/getting_started/agents/azure_ai/) which demonstrate:
44
+
45
+ - Semantic search with hybrid (vector + keyword) queries
46
+ - Agentic mode with Knowledge Bases for complex multi-hop reasoning
47
+ - Environment variable configuration with Settings class
48
+ - API key and managed identity authentication
49
+
@@ -0,0 +1,6 @@
1
+ agent_framework_azure_ai_search/__init__.py,sha256=DFgo7no3TSriJPXg5FVoDBWj5FpmnQ0seKd55ebxOKQ,425
2
+ agent_framework_azure_ai_search/_search_provider.py,sha256=88p6Hog5CXOwduTAs-vsymsxBPRXlpd-67y_dJyEax0,46345
3
+ agent_framework_azure_ai_search-1.0.0b251218.dist-info/licenses/LICENSE,sha256=ws_MuBL-SCEBqPBFl9_FqZkaaydIJmxHrJG2parhU4M,1141
4
+ agent_framework_azure_ai_search-1.0.0b251218.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
5
+ agent_framework_azure_ai_search-1.0.0b251218.dist-info/METADATA,sha256=8N1AVaO3edJ1Si29rin0u4VZftcg_xr179_7pIJ5ilo,2083
6
+ agent_framework_azure_ai_search-1.0.0b251218.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: flit 3.12.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) Microsoft Corporation.
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE