srx-lib-azure 0.1.8__tar.gz → 0.3.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- srx_lib_azure-0.3.0/PKG-INFO +134 -0
- srx_lib_azure-0.3.0/README.md +114 -0
- {srx_lib_azure-0.1.8 → srx_lib_azure-0.3.0}/pyproject.toml +18 -2
- srx_lib_azure-0.3.0/src/srx_lib_azure/__init__.py +23 -0
- srx_lib_azure-0.3.0/src/srx_lib_azure/document.py +262 -0
- srx_lib_azure-0.3.0/src/srx_lib_azure/speech.py +296 -0
- srx_lib_azure-0.1.8/PKG-INFO +0 -70
- srx_lib_azure-0.1.8/README.md +0 -58
- srx_lib_azure-0.1.8/src/srx_lib_azure/__init__.py +0 -5
- {srx_lib_azure-0.1.8 → srx_lib_azure-0.3.0}/.github/workflows/publish.yml +0 -0
- {srx_lib_azure-0.1.8 → srx_lib_azure-0.3.0}/.gitignore +0 -0
- {srx_lib_azure-0.1.8 → srx_lib_azure-0.3.0}/src/srx_lib_azure/blob.py +0 -0
- {srx_lib_azure-0.1.8 → srx_lib_azure-0.3.0}/src/srx_lib_azure/email.py +0 -0
- {srx_lib_azure-0.1.8 → srx_lib_azure-0.3.0}/src/srx_lib_azure/table.py +0 -0
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: srx-lib-azure
|
|
3
|
+
Version: 0.3.0
|
|
4
|
+
Summary: Azure helpers for SRX services: Blob, Email, Table, Document Intelligence, Speech Services
|
|
5
|
+
Author-email: SRX <dev@srx.id>
|
|
6
|
+
Requires-Python: >=3.12
|
|
7
|
+
Requires-Dist: azure-ai-documentintelligence>=1.0.0
|
|
8
|
+
Requires-Dist: azure-communication-email>=1.0.0
|
|
9
|
+
Requires-Dist: azure-data-tables>=12.7.0
|
|
10
|
+
Requires-Dist: azure-storage-blob>=12.22.0
|
|
11
|
+
Requires-Dist: loguru>=0.7.2
|
|
12
|
+
Provides-Extra: all
|
|
13
|
+
Requires-Dist: azure-ai-documentintelligence>=1.0.0; extra == 'all'
|
|
14
|
+
Requires-Dist: azure-cognitiveservices-speech>=1.41.1; extra == 'all'
|
|
15
|
+
Provides-Extra: document
|
|
16
|
+
Requires-Dist: azure-ai-documentintelligence>=1.0.0; extra == 'document'
|
|
17
|
+
Provides-Extra: speech
|
|
18
|
+
Requires-Dist: azure-cognitiveservices-speech>=1.41.1; extra == 'speech'
|
|
19
|
+
Description-Content-Type: text/markdown
|
|
20
|
+
|
|
21
|
+
# srx-lib-azure
|
|
22
|
+
|
|
23
|
+
Lightweight wrappers over Azure SDKs used across SRX services.
|
|
24
|
+
|
|
25
|
+
What it includes:
|
|
26
|
+
- **Blob**: upload/download helpers, SAS URL generation
|
|
27
|
+
- **Email** (Azure Communication Services): simple async sender
|
|
28
|
+
- **Table**: simple CRUD helpers
|
|
29
|
+
- **Document Intelligence** (OCR): document analysis from URLs or bytes
|
|
30
|
+
|
|
31
|
+
## Install
|
|
32
|
+
|
|
33
|
+
PyPI (public):
|
|
34
|
+
|
|
35
|
+
- `pip install srx-lib-azure`
|
|
36
|
+
|
|
37
|
+
uv (pyproject):
|
|
38
|
+
```
|
|
39
|
+
[project]
|
|
40
|
+
dependencies = ["srx-lib-azure>=0.1.0"]
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
## Usage
|
|
44
|
+
|
|
45
|
+
Blob:
|
|
46
|
+
```
|
|
47
|
+
from srx_lib_azure.blob import AzureBlobService
|
|
48
|
+
blob = AzureBlobService()
|
|
49
|
+
url = await blob.upload_file(upload_file, "documents/report.pdf")
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
Email:
|
|
53
|
+
```
|
|
54
|
+
from srx_lib_azure.email import EmailService
|
|
55
|
+
svc = EmailService()
|
|
56
|
+
await svc.send_notification("user@example.com", "Subject", "Hello", html=False)
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
Table:
|
|
60
|
+
```
|
|
61
|
+
from srx_lib_azure.table import AzureTableService
|
|
62
|
+
store = AzureTableService()
|
|
63
|
+
store.ensure_table("events")
|
|
64
|
+
store.upsert_entity("events", {"PartitionKey":"p","RowKey":"r","EventType":"x"})
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
Document Intelligence (OCR):
|
|
68
|
+
```python
|
|
69
|
+
from srx_lib_azure import AzureDocumentIntelligenceService
|
|
70
|
+
|
|
71
|
+
# Initialize with endpoint and key
|
|
72
|
+
doc_service = AzureDocumentIntelligenceService(
|
|
73
|
+
endpoint="https://your-resource.cognitiveservices.azure.com/",
|
|
74
|
+
key="your-api-key"
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
# Analyze document from URL
|
|
78
|
+
result = await doc_service.analyze_document_from_url(
|
|
79
|
+
url="https://example.com/document.pdf",
|
|
80
|
+
model_id="prebuilt-read" # or "prebuilt-layout", "prebuilt-invoice", etc.
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
# Analyze document from bytes
|
|
84
|
+
with open("document.pdf", "rb") as f:
|
|
85
|
+
content = f.read()
|
|
86
|
+
result = await doc_service.analyze_document_from_bytes(
|
|
87
|
+
file_content=content,
|
|
88
|
+
model_id="prebuilt-read"
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
# Result structure:
|
|
92
|
+
# {
|
|
93
|
+
# "success": True/False,
|
|
94
|
+
# "content": "extracted text...",
|
|
95
|
+
# "pages": [{"page_number": 1, "width": 8.5, ...}, ...],
|
|
96
|
+
# "page_count": 10,
|
|
97
|
+
# "confidence": 0.98,
|
|
98
|
+
# "model_id": "prebuilt-read",
|
|
99
|
+
# "metadata": {...},
|
|
100
|
+
# "error": None # or error message if failed
|
|
101
|
+
# }
|
|
102
|
+
```
|
|
103
|
+
|
|
104
|
+
## Environment Variables
|
|
105
|
+
|
|
106
|
+
- **Blob & Table**: `AZURE_STORAGE_CONNECTION_STRING` (required)
|
|
107
|
+
- **Email (ACS)**: `ACS_CONNECTION_STRING`, `EMAIL_SENDER`
|
|
108
|
+
- **Document Intelligence**: `AZURE_DOCUMENT_INTELLIGENCE_ENDPOINT`, `AZURE_DOCUMENT_INTELLIGENCE_KEY`
|
|
109
|
+
- **Optional**: `AZURE_STORAGE_ACCOUNT_KEY`, `AZURE_BLOB_URL`, `AZURE_SAS_TOKEN`
|
|
110
|
+
|
|
111
|
+
## Optional Dependencies
|
|
112
|
+
|
|
113
|
+
All services are optional and won't break if their dependencies aren't installed:
|
|
114
|
+
|
|
115
|
+
```bash
|
|
116
|
+
# Base installation (includes all services by default)
|
|
117
|
+
pip install srx-lib-azure
|
|
118
|
+
|
|
119
|
+
# Or install only what you need - document intelligence is optional
|
|
120
|
+
pip install srx-lib-azure[document] # Adds Document Intelligence support
|
|
121
|
+
|
|
122
|
+
# Install with all optional dependencies
|
|
123
|
+
pip install srx-lib-azure[all]
|
|
124
|
+
```
|
|
125
|
+
|
|
126
|
+
If you import a service without its required Azure SDK, it will log a warning but won't crash.
|
|
127
|
+
|
|
128
|
+
## Release
|
|
129
|
+
|
|
130
|
+
Tag `vX.Y.Z` to publish to GitHub Packages via Actions.
|
|
131
|
+
|
|
132
|
+
## License
|
|
133
|
+
|
|
134
|
+
Proprietary © SRX
|
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
# srx-lib-azure
|
|
2
|
+
|
|
3
|
+
Lightweight wrappers over Azure SDKs used across SRX services.
|
|
4
|
+
|
|
5
|
+
What it includes:
|
|
6
|
+
- **Blob**: upload/download helpers, SAS URL generation
|
|
7
|
+
- **Email** (Azure Communication Services): simple async sender
|
|
8
|
+
- **Table**: simple CRUD helpers
|
|
9
|
+
- **Document Intelligence** (OCR): document analysis from URLs or bytes
|
|
10
|
+
|
|
11
|
+
## Install
|
|
12
|
+
|
|
13
|
+
PyPI (public):
|
|
14
|
+
|
|
15
|
+
- `pip install srx-lib-azure`
|
|
16
|
+
|
|
17
|
+
uv (pyproject):
|
|
18
|
+
```
|
|
19
|
+
[project]
|
|
20
|
+
dependencies = ["srx-lib-azure>=0.1.0"]
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
## Usage
|
|
24
|
+
|
|
25
|
+
Blob:
|
|
26
|
+
```
|
|
27
|
+
from srx_lib_azure.blob import AzureBlobService
|
|
28
|
+
blob = AzureBlobService()
|
|
29
|
+
url = await blob.upload_file(upload_file, "documents/report.pdf")
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
Email:
|
|
33
|
+
```
|
|
34
|
+
from srx_lib_azure.email import EmailService
|
|
35
|
+
svc = EmailService()
|
|
36
|
+
await svc.send_notification("user@example.com", "Subject", "Hello", html=False)
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
Table:
|
|
40
|
+
```
|
|
41
|
+
from srx_lib_azure.table import AzureTableService
|
|
42
|
+
store = AzureTableService()
|
|
43
|
+
store.ensure_table("events")
|
|
44
|
+
store.upsert_entity("events", {"PartitionKey":"p","RowKey":"r","EventType":"x"})
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
Document Intelligence (OCR):
|
|
48
|
+
```python
|
|
49
|
+
from srx_lib_azure import AzureDocumentIntelligenceService
|
|
50
|
+
|
|
51
|
+
# Initialize with endpoint and key
|
|
52
|
+
doc_service = AzureDocumentIntelligenceService(
|
|
53
|
+
endpoint="https://your-resource.cognitiveservices.azure.com/",
|
|
54
|
+
key="your-api-key"
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
# Analyze document from URL
|
|
58
|
+
result = await doc_service.analyze_document_from_url(
|
|
59
|
+
url="https://example.com/document.pdf",
|
|
60
|
+
model_id="prebuilt-read" # or "prebuilt-layout", "prebuilt-invoice", etc.
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
# Analyze document from bytes
|
|
64
|
+
with open("document.pdf", "rb") as f:
|
|
65
|
+
content = f.read()
|
|
66
|
+
result = await doc_service.analyze_document_from_bytes(
|
|
67
|
+
file_content=content,
|
|
68
|
+
model_id="prebuilt-read"
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
# Result structure:
|
|
72
|
+
# {
|
|
73
|
+
# "success": True/False,
|
|
74
|
+
# "content": "extracted text...",
|
|
75
|
+
# "pages": [{"page_number": 1, "width": 8.5, ...}, ...],
|
|
76
|
+
# "page_count": 10,
|
|
77
|
+
# "confidence": 0.98,
|
|
78
|
+
# "model_id": "prebuilt-read",
|
|
79
|
+
# "metadata": {...},
|
|
80
|
+
# "error": None # or error message if failed
|
|
81
|
+
# }
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
## Environment Variables
|
|
85
|
+
|
|
86
|
+
- **Blob & Table**: `AZURE_STORAGE_CONNECTION_STRING` (required)
|
|
87
|
+
- **Email (ACS)**: `ACS_CONNECTION_STRING`, `EMAIL_SENDER`
|
|
88
|
+
- **Document Intelligence**: `AZURE_DOCUMENT_INTELLIGENCE_ENDPOINT`, `AZURE_DOCUMENT_INTELLIGENCE_KEY`
|
|
89
|
+
- **Optional**: `AZURE_STORAGE_ACCOUNT_KEY`, `AZURE_BLOB_URL`, `AZURE_SAS_TOKEN`
|
|
90
|
+
|
|
91
|
+
## Optional Dependencies
|
|
92
|
+
|
|
93
|
+
All services are optional and won't break if their dependencies aren't installed:
|
|
94
|
+
|
|
95
|
+
```bash
|
|
96
|
+
# Base installation (includes all services by default)
|
|
97
|
+
pip install srx-lib-azure
|
|
98
|
+
|
|
99
|
+
# Or install only what you need - document intelligence is optional
|
|
100
|
+
pip install srx-lib-azure[document] # Adds Document Intelligence support
|
|
101
|
+
|
|
102
|
+
# Install with all optional dependencies
|
|
103
|
+
pip install srx-lib-azure[all]
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
If you import a service without its required Azure SDK, it will log a warning but won't crash.
|
|
107
|
+
|
|
108
|
+
## Release
|
|
109
|
+
|
|
110
|
+
Tag `vX.Y.Z` to publish to GitHub Packages via Actions.
|
|
111
|
+
|
|
112
|
+
## License
|
|
113
|
+
|
|
114
|
+
Proprietary © SRX
|
|
@@ -4,8 +4,8 @@ build-backend = "hatchling.build"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "srx-lib-azure"
|
|
7
|
-
version = "0.
|
|
8
|
-
description = "Azure helpers for SRX services: Blob, Email, Table"
|
|
7
|
+
version = "0.3.0"
|
|
8
|
+
description = "Azure helpers for SRX services: Blob, Email, Table, Document Intelligence, Speech Services"
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
requires-python = ">=3.12"
|
|
11
11
|
authors = [{ name = "SRX", email = "dev@srx.id" }]
|
|
@@ -14,6 +14,22 @@ dependencies = [
|
|
|
14
14
|
"azure-storage-blob>=12.22.0",
|
|
15
15
|
"azure-communication-email>=1.0.0",
|
|
16
16
|
"azure-data-tables>=12.7.0",
|
|
17
|
+
"azure-ai-documentintelligence>=1.0.0",
|
|
18
|
+
]
|
|
19
|
+
|
|
20
|
+
[project.optional-dependencies]
|
|
21
|
+
# Optional extra for Document Intelligence (OCR)
|
|
22
|
+
document = [
|
|
23
|
+
"azure-ai-documentintelligence>=1.0.0",
|
|
24
|
+
]
|
|
25
|
+
# Optional extra for Speech Services (audio transcription)
|
|
26
|
+
speech = [
|
|
27
|
+
"azure-cognitiveservices-speech>=1.41.1",
|
|
28
|
+
]
|
|
29
|
+
# Install all optional dependencies
|
|
30
|
+
all = [
|
|
31
|
+
"azure-ai-documentintelligence>=1.0.0",
|
|
32
|
+
"azure-cognitiveservices-speech>=1.41.1",
|
|
17
33
|
]
|
|
18
34
|
|
|
19
35
|
[tool.hatch.build.targets.wheel]
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
from .blob import AzureBlobService
|
|
2
|
+
from .document import AzureDocumentIntelligenceService
|
|
3
|
+
from .email import EmailService
|
|
4
|
+
from .table import AzureTableService
|
|
5
|
+
|
|
6
|
+
# Optional import - only available if speech extra is installed
|
|
7
|
+
try:
|
|
8
|
+
from .speech import AzureSpeechService
|
|
9
|
+
__all__ = [
|
|
10
|
+
"AzureBlobService",
|
|
11
|
+
"AzureDocumentIntelligenceService",
|
|
12
|
+
"AzureTableService",
|
|
13
|
+
"EmailService",
|
|
14
|
+
"AzureSpeechService",
|
|
15
|
+
]
|
|
16
|
+
except ImportError:
|
|
17
|
+
# Speech SDK not installed - service not available
|
|
18
|
+
__all__ = [
|
|
19
|
+
"AzureBlobService",
|
|
20
|
+
"AzureDocumentIntelligenceService",
|
|
21
|
+
"AzureTableService",
|
|
22
|
+
"EmailService",
|
|
23
|
+
]
|
|
@@ -0,0 +1,262 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import io
|
|
3
|
+
import asyncio
|
|
4
|
+
from typing import Dict, Any, Optional
|
|
5
|
+
|
|
6
|
+
from loguru import logger
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
from azure.ai.documentintelligence import DocumentIntelligenceClient
|
|
10
|
+
from azure.ai.documentintelligence.models import AnalyzeDocumentRequest, AnalyzeResult
|
|
11
|
+
from azure.core.credentials import AzureKeyCredential
|
|
12
|
+
from azure.core.exceptions import (
|
|
13
|
+
ClientAuthenticationError,
|
|
14
|
+
HttpResponseError,
|
|
15
|
+
ServiceRequestError,
|
|
16
|
+
)
|
|
17
|
+
except Exception: # pragma: no cover - optional dependency at import time
|
|
18
|
+
DocumentIntelligenceClient = None # type: ignore
|
|
19
|
+
AnalyzeDocumentRequest = None # type: ignore
|
|
20
|
+
AnalyzeResult = None # type: ignore
|
|
21
|
+
AzureKeyCredential = None # type: ignore
|
|
22
|
+
ClientAuthenticationError = None # type: ignore
|
|
23
|
+
HttpResponseError = None # type: ignore
|
|
24
|
+
ServiceRequestError = None # type: ignore
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class AzureDocumentIntelligenceService:
|
|
28
|
+
"""Wrapper for Azure Document Intelligence (OCR/Document Analysis).
|
|
29
|
+
|
|
30
|
+
Does not raise on missing configuration to keep the library optional.
|
|
31
|
+
If not configured, analysis calls return error responses with descriptive messages.
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
def __init__(
|
|
35
|
+
self,
|
|
36
|
+
*,
|
|
37
|
+
endpoint: Optional[str] = None,
|
|
38
|
+
key: Optional[str] = None,
|
|
39
|
+
warn_if_unconfigured: bool = False,
|
|
40
|
+
):
|
|
41
|
+
"""Initialize Document Intelligence service.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
endpoint: Azure Document Intelligence endpoint URL
|
|
45
|
+
key: Azure Document Intelligence API key
|
|
46
|
+
warn_if_unconfigured: Whether to log a warning if not configured
|
|
47
|
+
"""
|
|
48
|
+
self.endpoint = endpoint or os.getenv("AZURE_DOCUMENT_INTELLIGENCE_ENDPOINT")
|
|
49
|
+
self.key = key or os.getenv("AZURE_DOCUMENT_INTELLIGENCE_KEY")
|
|
50
|
+
|
|
51
|
+
if not self.endpoint or not self.key or DocumentIntelligenceClient is None:
|
|
52
|
+
self.client = None
|
|
53
|
+
if warn_if_unconfigured:
|
|
54
|
+
logger.warning(
|
|
55
|
+
"AzureDocumentIntelligenceService not configured "
|
|
56
|
+
"(missing endpoint/key or azure-ai-documentintelligence SDK). "
|
|
57
|
+
"Calls will return error responses."
|
|
58
|
+
)
|
|
59
|
+
else:
|
|
60
|
+
try:
|
|
61
|
+
self.client = DocumentIntelligenceClient(
|
|
62
|
+
endpoint=self.endpoint, credential=AzureKeyCredential(self.key)
|
|
63
|
+
)
|
|
64
|
+
except Exception as e:
|
|
65
|
+
self.client = None
|
|
66
|
+
logger.warning("DocumentIntelligenceClient initialization failed: %s", e)
|
|
67
|
+
|
|
68
|
+
async def analyze_document_from_url(
|
|
69
|
+
self, url: str, model_id: str = "prebuilt-read"
|
|
70
|
+
) -> Dict[str, Any]:
|
|
71
|
+
"""Analyze a document from a URL using Azure Document Intelligence.
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
url: URL of the document to analyze (must be accessible to Azure)
|
|
75
|
+
model_id: Model to use (default: "prebuilt-read" for OCR)
|
|
76
|
+
Other options: "prebuilt-layout", "prebuilt-invoice", etc.
|
|
77
|
+
|
|
78
|
+
Returns:
|
|
79
|
+
Dict with analysis results:
|
|
80
|
+
- success (bool): Whether analysis succeeded
|
|
81
|
+
- content (str | None): Extracted text content
|
|
82
|
+
- pages (list[dict] | None): Page information
|
|
83
|
+
- page_count (int | None): Total number of pages
|
|
84
|
+
- confidence (float | None): Average OCR confidence (0-1)
|
|
85
|
+
- model_id (str | None): Model used
|
|
86
|
+
- metadata (dict | None): Additional metadata
|
|
87
|
+
- error (str | None): Error message if failed
|
|
88
|
+
"""
|
|
89
|
+
if not self.client:
|
|
90
|
+
logger.warning("Document analysis from URL skipped: service not configured")
|
|
91
|
+
return {
|
|
92
|
+
"success": False,
|
|
93
|
+
"error": "Document Intelligence service not configured",
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
try:
|
|
97
|
+
logger.info(f"Starting document analysis from URL: {url} (model: {model_id})")
|
|
98
|
+
|
|
99
|
+
# Run the blocking operation in a thread pool
|
|
100
|
+
poller = await asyncio.to_thread(
|
|
101
|
+
self.client.begin_analyze_document,
|
|
102
|
+
model_id,
|
|
103
|
+
AnalyzeDocumentRequest(url_source=url),
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
# Wait for the result
|
|
107
|
+
result: AnalyzeResult = await asyncio.to_thread(poller.result)
|
|
108
|
+
|
|
109
|
+
logger.info(
|
|
110
|
+
f"Document analysis completed (model: {model_id}, pages: {len(result.pages or [])})"
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
return self._format_result(result, model_id)
|
|
114
|
+
|
|
115
|
+
except ClientAuthenticationError as e:
|
|
116
|
+
logger.error(f"Authentication failed for document analysis: {e}")
|
|
117
|
+
return {"success": False, "error": f"Authentication failed: {e}"}
|
|
118
|
+
except HttpResponseError as e:
|
|
119
|
+
logger.error(f"Azure service error analyzing document: {e.status_code} - {e.message}")
|
|
120
|
+
return {
|
|
121
|
+
"success": False,
|
|
122
|
+
"error": f"Azure service error ({e.status_code}): {e.message}",
|
|
123
|
+
}
|
|
124
|
+
except ServiceRequestError as e:
|
|
125
|
+
logger.error(f"Network error analyzing document: {e}")
|
|
126
|
+
return {"success": False, "error": f"Network error: {e}"}
|
|
127
|
+
except Exception as e:
|
|
128
|
+
logger.error(f"Unexpected error analyzing document from URL: {e}")
|
|
129
|
+
return {"success": False, "error": f"Unexpected error: {e}"}
|
|
130
|
+
|
|
131
|
+
async def analyze_document_from_bytes(
|
|
132
|
+
self, file_content: bytes, model_id: str = "prebuilt-read"
|
|
133
|
+
) -> Dict[str, Any]:
|
|
134
|
+
"""Analyze a document from bytes using Azure Document Intelligence.
|
|
135
|
+
|
|
136
|
+
Args:
|
|
137
|
+
file_content: Document content as bytes (PDF, image, etc.)
|
|
138
|
+
model_id: Model to use (default: "prebuilt-read" for OCR)
|
|
139
|
+
|
|
140
|
+
Returns:
|
|
141
|
+
Dict with analysis results (same format as analyze_document_from_url)
|
|
142
|
+
"""
|
|
143
|
+
if not self.client:
|
|
144
|
+
logger.warning("Document analysis from bytes skipped: service not configured")
|
|
145
|
+
return {
|
|
146
|
+
"success": False,
|
|
147
|
+
"error": "Document Intelligence service not configured",
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
try:
|
|
151
|
+
logger.info(
|
|
152
|
+
f"Starting document analysis from bytes (size: {len(file_content)} bytes, model: {model_id})"
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
# Create a file-like object from bytes
|
|
156
|
+
file_stream = io.BytesIO(file_content)
|
|
157
|
+
|
|
158
|
+
# Run the blocking operation in a thread pool
|
|
159
|
+
poller = await asyncio.to_thread(
|
|
160
|
+
self.client.begin_analyze_document,
|
|
161
|
+
model_id,
|
|
162
|
+
body=file_stream,
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
# Wait for the result
|
|
166
|
+
result: AnalyzeResult = await asyncio.to_thread(poller.result)
|
|
167
|
+
|
|
168
|
+
logger.info(
|
|
169
|
+
f"Document analysis completed (model: {model_id}, pages: {len(result.pages or [])})"
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
return self._format_result(result, model_id)
|
|
173
|
+
|
|
174
|
+
except ClientAuthenticationError as e:
|
|
175
|
+
logger.error(f"Authentication failed for document analysis: {e}")
|
|
176
|
+
return {"success": False, "error": f"Authentication failed: {e}"}
|
|
177
|
+
except HttpResponseError as e:
|
|
178
|
+
logger.error(f"Azure service error analyzing document: {e.status_code} - {e.message}")
|
|
179
|
+
return {
|
|
180
|
+
"success": False,
|
|
181
|
+
"error": f"Azure service error ({e.status_code}): {e.message}",
|
|
182
|
+
}
|
|
183
|
+
except ServiceRequestError as e:
|
|
184
|
+
logger.error(f"Network error analyzing document: {e}")
|
|
185
|
+
return {"success": False, "error": f"Network error: {e}"}
|
|
186
|
+
except Exception as e:
|
|
187
|
+
logger.error(f"Unexpected error analyzing document from bytes: {e}")
|
|
188
|
+
return {"success": False, "error": f"Unexpected error: {e}"}
|
|
189
|
+
|
|
190
|
+
def _format_result(self, result: AnalyzeResult, model_id: str) -> Dict[str, Any]:
|
|
191
|
+
"""Format the AnalyzeResult into a dict response.
|
|
192
|
+
|
|
193
|
+
Args:
|
|
194
|
+
result: Azure Document Intelligence AnalyzeResult
|
|
195
|
+
model_id: Model ID used for analysis
|
|
196
|
+
|
|
197
|
+
Returns:
|
|
198
|
+
Formatted dict with extracted content and metadata
|
|
199
|
+
"""
|
|
200
|
+
# Extract all text content
|
|
201
|
+
content_parts: list[str] = []
|
|
202
|
+
pages_info: list[Dict[str, Any]] = []
|
|
203
|
+
total_confidence = 0.0
|
|
204
|
+
confidence_count = 0
|
|
205
|
+
|
|
206
|
+
if result.pages:
|
|
207
|
+
for page in result.pages:
|
|
208
|
+
# Collect page info
|
|
209
|
+
page_info = {
|
|
210
|
+
"page_number": page.page_number,
|
|
211
|
+
"width": page.width,
|
|
212
|
+
"height": page.height,
|
|
213
|
+
"unit": page.unit,
|
|
214
|
+
"lines_count": len(page.lines or []),
|
|
215
|
+
"words_count": len(page.words or []),
|
|
216
|
+
}
|
|
217
|
+
pages_info.append(page_info)
|
|
218
|
+
|
|
219
|
+
# Extract text from lines
|
|
220
|
+
if page.lines:
|
|
221
|
+
for line in page.lines:
|
|
222
|
+
content_parts.append(line.content)
|
|
223
|
+
# Track confidence if available
|
|
224
|
+
if hasattr(line, "confidence") and line.confidence is not None:
|
|
225
|
+
total_confidence += line.confidence
|
|
226
|
+
confidence_count += 1
|
|
227
|
+
|
|
228
|
+
# Combine all content with newlines
|
|
229
|
+
full_content = "\n".join(content_parts)
|
|
230
|
+
|
|
231
|
+
# Calculate average confidence
|
|
232
|
+
avg_confidence = total_confidence / confidence_count if confidence_count > 0 else None
|
|
233
|
+
|
|
234
|
+
# Build metadata
|
|
235
|
+
metadata: Dict[str, Any] = {
|
|
236
|
+
"content_format": (
|
|
237
|
+
result.content_format if hasattr(result, "content_format") else None
|
|
238
|
+
),
|
|
239
|
+
"api_version": result.api_version if hasattr(result, "api_version") else None,
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
# Add languages if detected
|
|
243
|
+
if hasattr(result, "languages") and result.languages:
|
|
244
|
+
metadata["languages"] = [
|
|
245
|
+
{"locale": lang.locale, "confidence": lang.confidence} for lang in result.languages
|
|
246
|
+
]
|
|
247
|
+
|
|
248
|
+
# Add styles if detected (e.g., handwriting)
|
|
249
|
+
if hasattr(result, "styles") and result.styles:
|
|
250
|
+
metadata["has_handwriting"] = any(
|
|
251
|
+
style.is_handwritten for style in result.styles if hasattr(style, "is_handwritten")
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
return {
|
|
255
|
+
"success": True,
|
|
256
|
+
"content": full_content if full_content else None,
|
|
257
|
+
"pages": pages_info if pages_info else None,
|
|
258
|
+
"page_count": len(pages_info) if pages_info else None,
|
|
259
|
+
"confidence": avg_confidence,
|
|
260
|
+
"model_id": model_id,
|
|
261
|
+
"metadata": metadata,
|
|
262
|
+
}
|
|
@@ -0,0 +1,296 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import os
|
|
3
|
+
import subprocess
|
|
4
|
+
import tempfile
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Optional
|
|
7
|
+
|
|
8
|
+
from loguru import logger
|
|
9
|
+
|
|
10
|
+
# Optional import - gracefully handle if azure-cognitiveservices-speech is not installed
|
|
11
|
+
try:
|
|
12
|
+
import azure.cognitiveservices.speech as speechsdk
|
|
13
|
+
SPEECH_SDK_AVAILABLE = True
|
|
14
|
+
except ImportError:
|
|
15
|
+
SPEECH_SDK_AVAILABLE = False
|
|
16
|
+
logger.warning(
|
|
17
|
+
"azure-cognitiveservices-speech not installed. "
|
|
18
|
+
"Install with: pip install srx-lib-azure[speech]"
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class AzureSpeechService:
|
|
23
|
+
"""Azure Speech Service for audio transcription.
|
|
24
|
+
|
|
25
|
+
Provides audio-to-text transcription using Azure Cognitive Services Speech SDK.
|
|
26
|
+
Supports continuous recognition for longer audio files and language selection.
|
|
27
|
+
|
|
28
|
+
Configuration can be passed explicitly via constructor or fallback to environment variables.
|
|
29
|
+
Operations will error if SDK is not installed or required credentials are missing.
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
def __init__(
|
|
33
|
+
self,
|
|
34
|
+
*,
|
|
35
|
+
speech_key: Optional[str] = None,
|
|
36
|
+
speech_region: Optional[str] = None,
|
|
37
|
+
speech_endpoint: Optional[str] = None,
|
|
38
|
+
warn_if_unconfigured: bool = False,
|
|
39
|
+
) -> None:
|
|
40
|
+
"""Initialize Azure Speech Service.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
speech_key: Azure Speech API key (falls back to AZURE_SPEECH_KEY env var)
|
|
44
|
+
speech_region: Azure region (falls back to AZURE_SPEECH_REGION env var)
|
|
45
|
+
speech_endpoint: Optional custom endpoint (falls back to AZURE_SPEECH_ENDPOINT env var)
|
|
46
|
+
warn_if_unconfigured: Whether to warn at initialization if not configured
|
|
47
|
+
"""
|
|
48
|
+
self.speech_key = speech_key or os.getenv("AZURE_SPEECH_KEY")
|
|
49
|
+
self.speech_region = speech_region or os.getenv("AZURE_SPEECH_REGION")
|
|
50
|
+
self.speech_endpoint = speech_endpoint or os.getenv("AZURE_SPEECH_ENDPOINT")
|
|
51
|
+
|
|
52
|
+
if warn_if_unconfigured and not self.speech_key:
|
|
53
|
+
logger.warning(
|
|
54
|
+
"Azure Speech credentials not configured; transcription operations may fail."
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
def _check_availability(self) -> None:
|
|
58
|
+
"""Check if Speech SDK is available and credentials are configured."""
|
|
59
|
+
if not SPEECH_SDK_AVAILABLE:
|
|
60
|
+
raise RuntimeError(
|
|
61
|
+
"azure-cognitiveservices-speech package not installed. "
|
|
62
|
+
"Install with: pip install srx-lib-azure[speech]"
|
|
63
|
+
)
|
|
64
|
+
if not self.speech_key:
|
|
65
|
+
raise RuntimeError(
|
|
66
|
+
"Azure Speech credentials not configured. "
|
|
67
|
+
"Provide speech_key or set AZURE_SPEECH_KEY environment variable."
|
|
68
|
+
)
|
|
69
|
+
if not self.speech_region and not self.speech_endpoint:
|
|
70
|
+
raise RuntimeError(
|
|
71
|
+
"Azure Speech region or endpoint not configured. "
|
|
72
|
+
"Provide speech_region or speech_endpoint, or set AZURE_SPEECH_REGION environment variable."
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
def _preprocess_audio(self, input_path: str) -> str:
|
|
76
|
+
"""Convert audio to 16kHz mono WAV format for optimal Azure Speech processing.
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
input_path: Path to input audio file
|
|
80
|
+
|
|
81
|
+
Returns:
|
|
82
|
+
Path to preprocessed WAV file
|
|
83
|
+
|
|
84
|
+
Raises:
|
|
85
|
+
RuntimeError: If ffmpeg is not available or conversion fails
|
|
86
|
+
"""
|
|
87
|
+
try:
|
|
88
|
+
# Check if ffmpeg is available
|
|
89
|
+
subprocess.run(
|
|
90
|
+
["ffmpeg", "-version"],
|
|
91
|
+
capture_output=True,
|
|
92
|
+
check=True,
|
|
93
|
+
)
|
|
94
|
+
except (subprocess.CalledProcessError, FileNotFoundError) as e:
|
|
95
|
+
raise RuntimeError(
|
|
96
|
+
"ffmpeg not found. Please install ffmpeg for audio preprocessing."
|
|
97
|
+
) from e
|
|
98
|
+
|
|
99
|
+
# Create temporary WAV file
|
|
100
|
+
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tf:
|
|
101
|
+
output_path = tf.name
|
|
102
|
+
|
|
103
|
+
try:
|
|
104
|
+
# Convert to 16kHz mono WAV
|
|
105
|
+
subprocess.run(
|
|
106
|
+
[
|
|
107
|
+
"ffmpeg",
|
|
108
|
+
"-i",
|
|
109
|
+
input_path,
|
|
110
|
+
"-ar",
|
|
111
|
+
"16000", # 16kHz sample rate
|
|
112
|
+
"-ac",
|
|
113
|
+
"1", # Mono
|
|
114
|
+
"-y", # Overwrite output file
|
|
115
|
+
output_path,
|
|
116
|
+
],
|
|
117
|
+
capture_output=True,
|
|
118
|
+
check=True,
|
|
119
|
+
)
|
|
120
|
+
logger.info(f"Preprocessed audio: {input_path} -> {output_path}")
|
|
121
|
+
return output_path
|
|
122
|
+
except subprocess.CalledProcessError as e:
|
|
123
|
+
# Clean up on error
|
|
124
|
+
if os.path.exists(output_path):
|
|
125
|
+
os.unlink(output_path)
|
|
126
|
+
raise RuntimeError(f"Audio preprocessing failed: {e.stderr.decode()}") from e
|
|
127
|
+
|
|
128
|
+
async def transcribe_audio_to_markdown(
|
|
129
|
+
self,
|
|
130
|
+
audio_path: str,
|
|
131
|
+
language: str = "id-ID",
|
|
132
|
+
preprocess: bool = True,
|
|
133
|
+
) -> str:
|
|
134
|
+
"""Transcribe audio file to markdown-formatted text.
|
|
135
|
+
|
|
136
|
+
Args:
|
|
137
|
+
audio_path: Path to audio file (mp3, m4a, wav, etc.)
|
|
138
|
+
language: BCP-47 language code (default: 'id-ID' for Indonesian)
|
|
139
|
+
Common codes: 'en-US', 'id-ID', 'ms-MY', 'zh-CN', 'ja-JP'
|
|
140
|
+
preprocess: Whether to preprocess audio to 16kHz mono WAV (recommended)
|
|
141
|
+
|
|
142
|
+
Returns:
|
|
143
|
+
Markdown-formatted transcription text
|
|
144
|
+
|
|
145
|
+
Raises:
|
|
146
|
+
RuntimeError: If SDK not available, credentials missing, or transcription fails
|
|
147
|
+
"""
|
|
148
|
+
self._check_availability()
|
|
149
|
+
|
|
150
|
+
# Preprocess audio if requested
|
|
151
|
+
wav_path = audio_path
|
|
152
|
+
cleanup_wav = False
|
|
153
|
+
if preprocess:
|
|
154
|
+
wav_path = self._preprocess_audio(audio_path)
|
|
155
|
+
cleanup_wav = True
|
|
156
|
+
|
|
157
|
+
try:
|
|
158
|
+
# Configure Azure Speech
|
|
159
|
+
if self.speech_endpoint:
|
|
160
|
+
speech_config = speechsdk.SpeechConfig(
|
|
161
|
+
subscription=self.speech_key,
|
|
162
|
+
endpoint=self.speech_endpoint,
|
|
163
|
+
)
|
|
164
|
+
else:
|
|
165
|
+
speech_config = speechsdk.SpeechConfig(
|
|
166
|
+
subscription=self.speech_key,
|
|
167
|
+
region=self.speech_region,
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
# Configure audio input
|
|
171
|
+
audio_config = speechsdk.audio.AudioConfig(filename=wav_path)
|
|
172
|
+
|
|
173
|
+
# Create speech recognizer with language
|
|
174
|
+
recognizer = speechsdk.SpeechRecognizer(
|
|
175
|
+
speech_config=speech_config,
|
|
176
|
+
audio_config=audio_config,
|
|
177
|
+
language=language,
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
# Event-driven continuous recognition
|
|
181
|
+
paragraphs: list[str] = []
|
|
182
|
+
current: list[str] = []
|
|
183
|
+
done = asyncio.get_event_loop().create_future()
|
|
184
|
+
|
|
185
|
+
def recognizing_handler(evt):
|
|
186
|
+
"""Handle intermediate recognition results."""
|
|
187
|
+
if evt.result.reason == speechsdk.ResultReason.RecognizingSpeech:
|
|
188
|
+
logger.debug(f"Recognizing: {evt.result.text}")
|
|
189
|
+
|
|
190
|
+
def recognized_handler(evt):
|
|
191
|
+
"""Handle final recognition results."""
|
|
192
|
+
if evt.result.reason == speechsdk.ResultReason.RecognizedSpeech:
|
|
193
|
+
text = evt.result.text.strip()
|
|
194
|
+
if text:
|
|
195
|
+
current.append(text)
|
|
196
|
+
logger.debug(f"Recognized: {text}")
|
|
197
|
+
elif evt.result.reason == speechsdk.ResultReason.NoMatch:
|
|
198
|
+
logger.debug("No speech recognized")
|
|
199
|
+
|
|
200
|
+
def session_stopped(evt):
|
|
201
|
+
"""Handle session stop."""
|
|
202
|
+
logger.info("Session stopped")
|
|
203
|
+
if current:
|
|
204
|
+
paragraphs.append(" ".join(current))
|
|
205
|
+
if not done.done():
|
|
206
|
+
done.set_result(True)
|
|
207
|
+
|
|
208
|
+
def canceled(evt):
|
|
209
|
+
"""Handle cancellation."""
|
|
210
|
+
if evt.reason == speechsdk.CancellationReason.Error:
|
|
211
|
+
error_msg = f"Transcription error: {evt.error_details}"
|
|
212
|
+
logger.error(error_msg)
|
|
213
|
+
if not done.done():
|
|
214
|
+
done.set_exception(RuntimeError(error_msg))
|
|
215
|
+
else:
|
|
216
|
+
logger.info("Transcription canceled")
|
|
217
|
+
if not done.done():
|
|
218
|
+
done.set_result(True)
|
|
219
|
+
|
|
220
|
+
# Connect event handlers
|
|
221
|
+
recognizer.recognizing.connect(recognizing_handler)
|
|
222
|
+
recognizer.recognized.connect(recognized_handler)
|
|
223
|
+
recognizer.session_stopped.connect(session_stopped)
|
|
224
|
+
recognizer.canceled.connect(canceled)
|
|
225
|
+
|
|
226
|
+
# Start continuous recognition
|
|
227
|
+
logger.info(f"Starting transcription for {audio_path} (language: {language})")
|
|
228
|
+
recognizer.start_continuous_recognition_async().get()
|
|
229
|
+
|
|
230
|
+
# Wait for completion (max 15 minutes timeout)
|
|
231
|
+
try:
|
|
232
|
+
await asyncio.wait_for(done, timeout=900)
|
|
233
|
+
except asyncio.TimeoutError:
|
|
234
|
+
raise RuntimeError("Transcription timeout (15 minutes exceeded)")
|
|
235
|
+
|
|
236
|
+
# Stop recognition
|
|
237
|
+
recognizer.stop_continuous_recognition_async().get()
|
|
238
|
+
|
|
239
|
+
# Format as markdown with bullet points
|
|
240
|
+
if not paragraphs:
|
|
241
|
+
logger.warning("No transcription results")
|
|
242
|
+
return ""
|
|
243
|
+
|
|
244
|
+
markdown = "\n\n".join(f"- {para}" for para in paragraphs)
|
|
245
|
+
logger.info(f"Transcription completed: {len(paragraphs)} paragraphs")
|
|
246
|
+
return markdown
|
|
247
|
+
|
|
248
|
+
finally:
|
|
249
|
+
# Clean up preprocessed WAV file
|
|
250
|
+
if cleanup_wav and os.path.exists(wav_path):
|
|
251
|
+
try:
|
|
252
|
+
os.unlink(wav_path)
|
|
253
|
+
logger.debug(f"Cleaned up temporary file: {wav_path}")
|
|
254
|
+
except Exception as e:
|
|
255
|
+
logger.warning(f"Failed to clean up {wav_path}: {e}")
|
|
256
|
+
|
|
257
|
+
async def transcribe_audio_bytes(
|
|
258
|
+
self,
|
|
259
|
+
audio_bytes: bytes,
|
|
260
|
+
file_extension: str = ".mp3",
|
|
261
|
+
language: str = "id-ID",
|
|
262
|
+
) -> str:
|
|
263
|
+
"""Transcribe audio from bytes to markdown-formatted text.
|
|
264
|
+
|
|
265
|
+
Args:
|
|
266
|
+
audio_bytes: Audio file content as bytes
|
|
267
|
+
file_extension: File extension (for format detection)
|
|
268
|
+
language: BCP-47 language code (default: 'id-ID' for Indonesian)
|
|
269
|
+
|
|
270
|
+
Returns:
|
|
271
|
+
Markdown-formatted transcription text
|
|
272
|
+
|
|
273
|
+
Raises:
|
|
274
|
+
RuntimeError: If SDK not available, credentials missing, or transcription fails
|
|
275
|
+
"""
|
|
276
|
+
# Write bytes to temporary file
|
|
277
|
+
with tempfile.NamedTemporaryFile(
|
|
278
|
+
suffix=file_extension,
|
|
279
|
+
delete=False,
|
|
280
|
+
) as tf:
|
|
281
|
+
tf.write(audio_bytes)
|
|
282
|
+
temp_path = tf.name
|
|
283
|
+
|
|
284
|
+
try:
|
|
285
|
+
return await self.transcribe_audio_to_markdown(
|
|
286
|
+
temp_path,
|
|
287
|
+
language=language,
|
|
288
|
+
preprocess=True,
|
|
289
|
+
)
|
|
290
|
+
finally:
|
|
291
|
+
# Clean up temporary file
|
|
292
|
+
if os.path.exists(temp_path):
|
|
293
|
+
try:
|
|
294
|
+
os.unlink(temp_path)
|
|
295
|
+
except Exception as e:
|
|
296
|
+
logger.warning(f"Failed to clean up {temp_path}: {e}")
|
srx_lib_azure-0.1.8/PKG-INFO
DELETED
|
@@ -1,70 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.4
|
|
2
|
-
Name: srx-lib-azure
|
|
3
|
-
Version: 0.1.8
|
|
4
|
-
Summary: Azure helpers for SRX services: Blob, Email, Table
|
|
5
|
-
Author-email: SRX <dev@srx.id>
|
|
6
|
-
Requires-Python: >=3.12
|
|
7
|
-
Requires-Dist: azure-communication-email>=1.0.0
|
|
8
|
-
Requires-Dist: azure-data-tables>=12.7.0
|
|
9
|
-
Requires-Dist: azure-storage-blob>=12.22.0
|
|
10
|
-
Requires-Dist: loguru>=0.7.2
|
|
11
|
-
Description-Content-Type: text/markdown
|
|
12
|
-
|
|
13
|
-
# srx-lib-azure
|
|
14
|
-
|
|
15
|
-
Lightweight wrappers over Azure SDKs used across SRX services.
|
|
16
|
-
|
|
17
|
-
What it includes:
|
|
18
|
-
- Blob: upload/download helpers, SAS URL generation
|
|
19
|
-
- Email (Azure Communication Services): simple async sender
|
|
20
|
-
- Table: simple CRUD helpers
|
|
21
|
-
|
|
22
|
-
## Install
|
|
23
|
-
|
|
24
|
-
PyPI (public):
|
|
25
|
-
|
|
26
|
-
- `pip install srx-lib-azure`
|
|
27
|
-
|
|
28
|
-
uv (pyproject):
|
|
29
|
-
```
|
|
30
|
-
[project]
|
|
31
|
-
dependencies = ["srx-lib-azure>=0.1.0"]
|
|
32
|
-
```
|
|
33
|
-
|
|
34
|
-
## Usage
|
|
35
|
-
|
|
36
|
-
Blob:
|
|
37
|
-
```
|
|
38
|
-
from srx_lib_azure.blob import AzureBlobService
|
|
39
|
-
blob = AzureBlobService()
|
|
40
|
-
url = await blob.upload_file(upload_file, "documents/report.pdf")
|
|
41
|
-
```
|
|
42
|
-
|
|
43
|
-
Email:
|
|
44
|
-
```
|
|
45
|
-
from srx_lib_azure.email import EmailService
|
|
46
|
-
svc = EmailService()
|
|
47
|
-
await svc.send_notification("user@example.com", "Subject", "Hello", html=False)
|
|
48
|
-
```
|
|
49
|
-
|
|
50
|
-
Table:
|
|
51
|
-
```
|
|
52
|
-
from srx_lib_azure.table import AzureTableService
|
|
53
|
-
store = AzureTableService()
|
|
54
|
-
store.ensure_table("events")
|
|
55
|
-
store.upsert_entity("events", {"PartitionKey":"p","RowKey":"r","EventType":"x"})
|
|
56
|
-
```
|
|
57
|
-
|
|
58
|
-
## Environment Variables
|
|
59
|
-
|
|
60
|
-
- Blob & Table: `AZURE_STORAGE_CONNECTION_STRING` (required)
|
|
61
|
-
- Email (ACS): `ACS_CONNECTION_STRING`, `EMAIL_SENDER`
|
|
62
|
-
- Optional: `AZURE_STORAGE_ACCOUNT_KEY`, `AZURE_BLOB_URL`, `AZURE_SAS_TOKEN`
|
|
63
|
-
|
|
64
|
-
## Release
|
|
65
|
-
|
|
66
|
-
Tag `vX.Y.Z` to publish to GitHub Packages via Actions.
|
|
67
|
-
|
|
68
|
-
## License
|
|
69
|
-
|
|
70
|
-
Proprietary © SRX
|
srx_lib_azure-0.1.8/README.md
DELETED
|
@@ -1,58 +0,0 @@
|
|
|
1
|
-
# srx-lib-azure
|
|
2
|
-
|
|
3
|
-
Lightweight wrappers over Azure SDKs used across SRX services.
|
|
4
|
-
|
|
5
|
-
What it includes:
|
|
6
|
-
- Blob: upload/download helpers, SAS URL generation
|
|
7
|
-
- Email (Azure Communication Services): simple async sender
|
|
8
|
-
- Table: simple CRUD helpers
|
|
9
|
-
|
|
10
|
-
## Install
|
|
11
|
-
|
|
12
|
-
PyPI (public):
|
|
13
|
-
|
|
14
|
-
- `pip install srx-lib-azure`
|
|
15
|
-
|
|
16
|
-
uv (pyproject):
|
|
17
|
-
```
|
|
18
|
-
[project]
|
|
19
|
-
dependencies = ["srx-lib-azure>=0.1.0"]
|
|
20
|
-
```
|
|
21
|
-
|
|
22
|
-
## Usage
|
|
23
|
-
|
|
24
|
-
Blob:
|
|
25
|
-
```
|
|
26
|
-
from srx_lib_azure.blob import AzureBlobService
|
|
27
|
-
blob = AzureBlobService()
|
|
28
|
-
url = await blob.upload_file(upload_file, "documents/report.pdf")
|
|
29
|
-
```
|
|
30
|
-
|
|
31
|
-
Email:
|
|
32
|
-
```
|
|
33
|
-
from srx_lib_azure.email import EmailService
|
|
34
|
-
svc = EmailService()
|
|
35
|
-
await svc.send_notification("user@example.com", "Subject", "Hello", html=False)
|
|
36
|
-
```
|
|
37
|
-
|
|
38
|
-
Table:
|
|
39
|
-
```
|
|
40
|
-
from srx_lib_azure.table import AzureTableService
|
|
41
|
-
store = AzureTableService()
|
|
42
|
-
store.ensure_table("events")
|
|
43
|
-
store.upsert_entity("events", {"PartitionKey":"p","RowKey":"r","EventType":"x"})
|
|
44
|
-
```
|
|
45
|
-
|
|
46
|
-
## Environment Variables
|
|
47
|
-
|
|
48
|
-
- Blob & Table: `AZURE_STORAGE_CONNECTION_STRING` (required)
|
|
49
|
-
- Email (ACS): `ACS_CONNECTION_STRING`, `EMAIL_SENDER`
|
|
50
|
-
- Optional: `AZURE_STORAGE_ACCOUNT_KEY`, `AZURE_BLOB_URL`, `AZURE_SAS_TOKEN`
|
|
51
|
-
|
|
52
|
-
## Release
|
|
53
|
-
|
|
54
|
-
Tag `vX.Y.Z` to publish to GitHub Packages via Actions.
|
|
55
|
-
|
|
56
|
-
## License
|
|
57
|
-
|
|
58
|
-
Proprietary © SRX
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|