remdb 0.3.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rem/__init__.py +2 -0
- rem/agentic/README.md +650 -0
- rem/agentic/__init__.py +39 -0
- rem/agentic/agents/README.md +155 -0
- rem/agentic/agents/__init__.py +8 -0
- rem/agentic/context.py +148 -0
- rem/agentic/context_builder.py +329 -0
- rem/agentic/mcp/__init__.py +0 -0
- rem/agentic/mcp/tool_wrapper.py +107 -0
- rem/agentic/otel/__init__.py +5 -0
- rem/agentic/otel/setup.py +151 -0
- rem/agentic/providers/phoenix.py +674 -0
- rem/agentic/providers/pydantic_ai.py +572 -0
- rem/agentic/query.py +117 -0
- rem/agentic/query_helper.py +89 -0
- rem/agentic/schema.py +396 -0
- rem/agentic/serialization.py +245 -0
- rem/agentic/tools/__init__.py +5 -0
- rem/agentic/tools/rem_tools.py +231 -0
- rem/api/README.md +420 -0
- rem/api/main.py +324 -0
- rem/api/mcp_router/prompts.py +182 -0
- rem/api/mcp_router/resources.py +536 -0
- rem/api/mcp_router/server.py +213 -0
- rem/api/mcp_router/tools.py +584 -0
- rem/api/routers/auth.py +229 -0
- rem/api/routers/chat/__init__.py +5 -0
- rem/api/routers/chat/completions.py +281 -0
- rem/api/routers/chat/json_utils.py +76 -0
- rem/api/routers/chat/models.py +124 -0
- rem/api/routers/chat/streaming.py +185 -0
- rem/auth/README.md +258 -0
- rem/auth/__init__.py +26 -0
- rem/auth/middleware.py +100 -0
- rem/auth/providers/__init__.py +13 -0
- rem/auth/providers/base.py +376 -0
- rem/auth/providers/google.py +163 -0
- rem/auth/providers/microsoft.py +237 -0
- rem/cli/README.md +455 -0
- rem/cli/__init__.py +8 -0
- rem/cli/commands/README.md +126 -0
- rem/cli/commands/__init__.py +3 -0
- rem/cli/commands/ask.py +566 -0
- rem/cli/commands/configure.py +497 -0
- rem/cli/commands/db.py +493 -0
- rem/cli/commands/dreaming.py +324 -0
- rem/cli/commands/experiments.py +1302 -0
- rem/cli/commands/mcp.py +66 -0
- rem/cli/commands/process.py +245 -0
- rem/cli/commands/schema.py +183 -0
- rem/cli/commands/serve.py +106 -0
- rem/cli/dreaming.py +363 -0
- rem/cli/main.py +96 -0
- rem/config.py +237 -0
- rem/mcp_server.py +41 -0
- rem/models/core/__init__.py +49 -0
- rem/models/core/core_model.py +64 -0
- rem/models/core/engram.py +333 -0
- rem/models/core/experiment.py +628 -0
- rem/models/core/inline_edge.py +132 -0
- rem/models/core/rem_query.py +243 -0
- rem/models/entities/__init__.py +43 -0
- rem/models/entities/file.py +57 -0
- rem/models/entities/image_resource.py +88 -0
- rem/models/entities/message.py +35 -0
- rem/models/entities/moment.py +123 -0
- rem/models/entities/ontology.py +191 -0
- rem/models/entities/ontology_config.py +131 -0
- rem/models/entities/resource.py +95 -0
- rem/models/entities/schema.py +87 -0
- rem/models/entities/user.py +85 -0
- rem/py.typed +0 -0
- rem/schemas/README.md +507 -0
- rem/schemas/__init__.py +6 -0
- rem/schemas/agents/README.md +92 -0
- rem/schemas/agents/core/moment-builder.yaml +178 -0
- rem/schemas/agents/core/rem-query-agent.yaml +226 -0
- rem/schemas/agents/core/resource-affinity-assessor.yaml +99 -0
- rem/schemas/agents/core/simple-assistant.yaml +19 -0
- rem/schemas/agents/core/user-profile-builder.yaml +163 -0
- rem/schemas/agents/examples/contract-analyzer.yaml +317 -0
- rem/schemas/agents/examples/contract-extractor.yaml +134 -0
- rem/schemas/agents/examples/cv-parser.yaml +263 -0
- rem/schemas/agents/examples/hello-world.yaml +37 -0
- rem/schemas/agents/examples/query.yaml +54 -0
- rem/schemas/agents/examples/simple.yaml +21 -0
- rem/schemas/agents/examples/test.yaml +29 -0
- rem/schemas/agents/rem.yaml +128 -0
- rem/schemas/evaluators/hello-world/default.yaml +77 -0
- rem/schemas/evaluators/rem/faithfulness.yaml +219 -0
- rem/schemas/evaluators/rem/lookup-correctness.yaml +182 -0
- rem/schemas/evaluators/rem/retrieval-precision.yaml +199 -0
- rem/schemas/evaluators/rem/retrieval-recall.yaml +211 -0
- rem/schemas/evaluators/rem/search-correctness.yaml +192 -0
- rem/services/__init__.py +16 -0
- rem/services/audio/INTEGRATION.md +308 -0
- rem/services/audio/README.md +376 -0
- rem/services/audio/__init__.py +15 -0
- rem/services/audio/chunker.py +354 -0
- rem/services/audio/transcriber.py +259 -0
- rem/services/content/README.md +1269 -0
- rem/services/content/__init__.py +5 -0
- rem/services/content/providers.py +801 -0
- rem/services/content/service.py +676 -0
- rem/services/dreaming/README.md +230 -0
- rem/services/dreaming/__init__.py +53 -0
- rem/services/dreaming/affinity_service.py +336 -0
- rem/services/dreaming/moment_service.py +264 -0
- rem/services/dreaming/ontology_service.py +54 -0
- rem/services/dreaming/user_model_service.py +297 -0
- rem/services/dreaming/utils.py +39 -0
- rem/services/embeddings/__init__.py +11 -0
- rem/services/embeddings/api.py +120 -0
- rem/services/embeddings/worker.py +421 -0
- rem/services/fs/README.md +662 -0
- rem/services/fs/__init__.py +62 -0
- rem/services/fs/examples.py +206 -0
- rem/services/fs/examples_paths.py +204 -0
- rem/services/fs/git_provider.py +935 -0
- rem/services/fs/local_provider.py +760 -0
- rem/services/fs/parsing-hooks-examples.md +172 -0
- rem/services/fs/paths.py +276 -0
- rem/services/fs/provider.py +460 -0
- rem/services/fs/s3_provider.py +1042 -0
- rem/services/fs/service.py +186 -0
- rem/services/git/README.md +1075 -0
- rem/services/git/__init__.py +17 -0
- rem/services/git/service.py +469 -0
- rem/services/phoenix/EXPERIMENT_DESIGN.md +1146 -0
- rem/services/phoenix/README.md +453 -0
- rem/services/phoenix/__init__.py +46 -0
- rem/services/phoenix/client.py +686 -0
- rem/services/phoenix/config.py +88 -0
- rem/services/phoenix/prompt_labels.py +477 -0
- rem/services/postgres/README.md +575 -0
- rem/services/postgres/__init__.py +23 -0
- rem/services/postgres/migration_service.py +427 -0
- rem/services/postgres/pydantic_to_sqlalchemy.py +232 -0
- rem/services/postgres/register_type.py +352 -0
- rem/services/postgres/repository.py +337 -0
- rem/services/postgres/schema_generator.py +379 -0
- rem/services/postgres/service.py +802 -0
- rem/services/postgres/sql_builder.py +354 -0
- rem/services/rem/README.md +304 -0
- rem/services/rem/__init__.py +23 -0
- rem/services/rem/exceptions.py +71 -0
- rem/services/rem/executor.py +293 -0
- rem/services/rem/parser.py +145 -0
- rem/services/rem/queries.py +196 -0
- rem/services/rem/query.py +371 -0
- rem/services/rem/service.py +527 -0
- rem/services/session/README.md +374 -0
- rem/services/session/__init__.py +6 -0
- rem/services/session/compression.py +360 -0
- rem/services/session/reload.py +77 -0
- rem/settings.py +1235 -0
- rem/sql/002_install_models.sql +1068 -0
- rem/sql/background_indexes.sql +42 -0
- rem/sql/install_models.sql +1038 -0
- rem/sql/migrations/001_install.sql +503 -0
- rem/sql/migrations/002_install_models.sql +1202 -0
- rem/utils/AGENTIC_CHUNKING.md +597 -0
- rem/utils/README.md +583 -0
- rem/utils/__init__.py +43 -0
- rem/utils/agentic_chunking.py +622 -0
- rem/utils/batch_ops.py +343 -0
- rem/utils/chunking.py +108 -0
- rem/utils/clip_embeddings.py +276 -0
- rem/utils/dict_utils.py +98 -0
- rem/utils/embeddings.py +423 -0
- rem/utils/examples/embeddings_example.py +305 -0
- rem/utils/examples/sql_types_example.py +202 -0
- rem/utils/markdown.py +16 -0
- rem/utils/model_helpers.py +236 -0
- rem/utils/schema_loader.py +336 -0
- rem/utils/sql_types.py +348 -0
- rem/utils/user_id.py +81 -0
- rem/utils/vision.py +330 -0
- rem/workers/README.md +506 -0
- rem/workers/__init__.py +5 -0
- rem/workers/dreaming.py +502 -0
- rem/workers/engram_processor.py +312 -0
- rem/workers/sqs_file_processor.py +193 -0
- remdb-0.3.7.dist-info/METADATA +1473 -0
- remdb-0.3.7.dist-info/RECORD +187 -0
- remdb-0.3.7.dist-info/WHEEL +4 -0
- remdb-0.3.7.dist-info/entry_points.txt +2 -0
rem/utils/vision.py
ADDED
|
@@ -0,0 +1,330 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Vision utility for image analysis using multiple LLM providers.
|
|
3
|
+
|
|
4
|
+
Lightweight implementation supporting three providers:
|
|
5
|
+
- Anthropic Claude (claude-3-5-sonnet-20241022 or newer)
|
|
6
|
+
- Google Gemini (gemini-2.0-flash-exp or newer)
|
|
7
|
+
- OpenAI-compatible (gpt-4o, gpt-4-turbo, or compatible endpoints)
|
|
8
|
+
|
|
9
|
+
Handles image encoding and multimodal LLM requests for generating
|
|
10
|
+
markdown descriptions of images.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import base64
|
|
14
|
+
import os
|
|
15
|
+
from enum import Enum
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
from typing import Optional
|
|
18
|
+
|
|
19
|
+
import requests
|
|
20
|
+
from loguru import logger
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class VisionProvider(str, Enum):
|
|
24
|
+
"""Supported vision providers."""
|
|
25
|
+
|
|
26
|
+
ANTHROPIC = "anthropic"
|
|
27
|
+
GEMINI = "gemini"
|
|
28
|
+
OPENAI = "openai"
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class VisionResult:
|
|
32
|
+
"""Result from image vision analysis."""
|
|
33
|
+
|
|
34
|
+
def __init__(
|
|
35
|
+
self,
|
|
36
|
+
description: str,
|
|
37
|
+
provider: VisionProvider,
|
|
38
|
+
model: str,
|
|
39
|
+
confidence: float = 0.9,
|
|
40
|
+
):
|
|
41
|
+
"""
|
|
42
|
+
Initialize vision result.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
description: Markdown description of the image
|
|
46
|
+
provider: Vision provider used
|
|
47
|
+
model: Model name used
|
|
48
|
+
confidence: Confidence score (0.0-1.0)
|
|
49
|
+
"""
|
|
50
|
+
self.description = description
|
|
51
|
+
self.provider = provider
|
|
52
|
+
self.model = model
|
|
53
|
+
self.confidence = confidence
|
|
54
|
+
|
|
55
|
+
def __repr__(self) -> str:
|
|
56
|
+
return f"VisionResult(provider={self.provider.value}, model={self.model}, chars={len(self.description)})"
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class ImageAnalyzer:
|
|
60
|
+
"""
|
|
61
|
+
Analyze images using vision-enabled LLMs.
|
|
62
|
+
|
|
63
|
+
Supports three providers with automatic provider selection based on API keys.
|
|
64
|
+
"""
|
|
65
|
+
|
|
66
|
+
def __init__(
|
|
67
|
+
self,
|
|
68
|
+
provider: VisionProvider = VisionProvider.ANTHROPIC,
|
|
69
|
+
api_key: Optional[str] = None,
|
|
70
|
+
model: Optional[str] = None,
|
|
71
|
+
base_url: Optional[str] = None,
|
|
72
|
+
):
|
|
73
|
+
"""
|
|
74
|
+
Initialize image analyzer.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
provider: Vision provider to use
|
|
78
|
+
api_key: API key (from env if None)
|
|
79
|
+
model: Model name (provider default if None)
|
|
80
|
+
base_url: Custom base URL (for OpenAI-compatible endpoints)
|
|
81
|
+
"""
|
|
82
|
+
self.provider = provider
|
|
83
|
+
|
|
84
|
+
# Get API key from settings if not provided
|
|
85
|
+
if api_key is None:
|
|
86
|
+
from ..settings import settings
|
|
87
|
+
if provider == VisionProvider.ANTHROPIC:
|
|
88
|
+
api_key = settings.llm.anthropic_api_key
|
|
89
|
+
elif provider == VisionProvider.GEMINI:
|
|
90
|
+
# Gemini uses same key as Google
|
|
91
|
+
api_key = settings.llm.anthropic_api_key # TODO: Add gemini_api_key to settings
|
|
92
|
+
elif provider == VisionProvider.OPENAI:
|
|
93
|
+
api_key = settings.llm.openai_api_key
|
|
94
|
+
|
|
95
|
+
if not api_key:
|
|
96
|
+
logger.warning(f"No API key found for {provider.value} - vision analysis will fail")
|
|
97
|
+
|
|
98
|
+
self.api_key = api_key
|
|
99
|
+
|
|
100
|
+
# Set default models
|
|
101
|
+
if model is None:
|
|
102
|
+
if provider == VisionProvider.ANTHROPIC:
|
|
103
|
+
model = "claude-3-5-sonnet-20241022"
|
|
104
|
+
elif provider == VisionProvider.GEMINI:
|
|
105
|
+
model = "gemini-2.0-flash-exp"
|
|
106
|
+
elif provider == VisionProvider.OPENAI:
|
|
107
|
+
model = "gpt-4o"
|
|
108
|
+
|
|
109
|
+
self.model = model
|
|
110
|
+
self.base_url = base_url
|
|
111
|
+
|
|
112
|
+
def analyze_image(
|
|
113
|
+
self,
|
|
114
|
+
image_path: str | Path,
|
|
115
|
+
prompt: str = "Describe this image in detail as markdown. Include key visual elements, text, diagrams, and context.",
|
|
116
|
+
) -> VisionResult:
|
|
117
|
+
"""
|
|
118
|
+
Analyze image and generate markdown description.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
image_path: Path to image file
|
|
122
|
+
prompt: Analysis prompt for the LLM
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
VisionResult with markdown description
|
|
126
|
+
|
|
127
|
+
Raises:
|
|
128
|
+
ValueError: If API key missing or file invalid
|
|
129
|
+
RuntimeError: If API request fails
|
|
130
|
+
"""
|
|
131
|
+
if not self.api_key:
|
|
132
|
+
raise ValueError(f"API key required for {self.provider.value} vision analysis")
|
|
133
|
+
|
|
134
|
+
image_path = Path(image_path)
|
|
135
|
+
if not image_path.exists():
|
|
136
|
+
raise FileNotFoundError(f"Image file not found: {image_path}")
|
|
137
|
+
|
|
138
|
+
# Read and encode image
|
|
139
|
+
with open(image_path, "rb") as f:
|
|
140
|
+
image_bytes = f.read()
|
|
141
|
+
|
|
142
|
+
# Detect media type
|
|
143
|
+
suffix = image_path.suffix.lower()
|
|
144
|
+
media_type_map = {
|
|
145
|
+
".png": "image/png",
|
|
146
|
+
".jpg": "image/jpeg",
|
|
147
|
+
".jpeg": "image/jpeg",
|
|
148
|
+
".gif": "image/gif",
|
|
149
|
+
".webp": "image/webp",
|
|
150
|
+
}
|
|
151
|
+
media_type = media_type_map.get(suffix, "image/png")
|
|
152
|
+
|
|
153
|
+
logger.info(f"Analyzing {image_path.name} with {self.provider.value} ({self.model})")
|
|
154
|
+
|
|
155
|
+
# Route to provider-specific implementation
|
|
156
|
+
if self.provider == VisionProvider.ANTHROPIC:
|
|
157
|
+
description = self._analyze_anthropic(image_bytes, media_type, prompt)
|
|
158
|
+
elif self.provider == VisionProvider.GEMINI:
|
|
159
|
+
description = self._analyze_gemini(image_bytes, media_type, prompt)
|
|
160
|
+
elif self.provider == VisionProvider.OPENAI:
|
|
161
|
+
description = self._analyze_openai(image_bytes, media_type, prompt)
|
|
162
|
+
else:
|
|
163
|
+
raise ValueError(f"Unsupported provider: {self.provider}")
|
|
164
|
+
|
|
165
|
+
logger.info(f"✓ Vision analysis complete: {len(description)} characters")
|
|
166
|
+
|
|
167
|
+
return VisionResult(
|
|
168
|
+
description=description,
|
|
169
|
+
provider=self.provider,
|
|
170
|
+
model=self.model,
|
|
171
|
+
confidence=0.9,
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
def _analyze_anthropic(
|
|
175
|
+
self,
|
|
176
|
+
image_bytes: bytes,
|
|
177
|
+
media_type: str,
|
|
178
|
+
prompt: str,
|
|
179
|
+
) -> str:
|
|
180
|
+
"""Analyze image using Anthropic Claude."""
|
|
181
|
+
# Encode image to base64
|
|
182
|
+
image_b64 = base64.b64encode(image_bytes).decode("utf-8")
|
|
183
|
+
|
|
184
|
+
# Build request
|
|
185
|
+
headers = {
|
|
186
|
+
"x-api-key": self.api_key,
|
|
187
|
+
"anthropic-version": "2023-06-01",
|
|
188
|
+
"content-type": "application/json",
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
body = {
|
|
192
|
+
"model": self.model,
|
|
193
|
+
"max_tokens": 2048,
|
|
194
|
+
"messages": [
|
|
195
|
+
{
|
|
196
|
+
"role": "user",
|
|
197
|
+
"content": [
|
|
198
|
+
{
|
|
199
|
+
"type": "image",
|
|
200
|
+
"source": {
|
|
201
|
+
"type": "base64",
|
|
202
|
+
"media_type": media_type,
|
|
203
|
+
"data": image_b64,
|
|
204
|
+
},
|
|
205
|
+
},
|
|
206
|
+
{
|
|
207
|
+
"type": "text",
|
|
208
|
+
"text": prompt,
|
|
209
|
+
},
|
|
210
|
+
],
|
|
211
|
+
}
|
|
212
|
+
],
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
response = requests.post(
|
|
216
|
+
"https://api.anthropic.com/v1/messages",
|
|
217
|
+
headers=headers,
|
|
218
|
+
json=body,
|
|
219
|
+
timeout=60.0,
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
if response.status_code != 200:
|
|
223
|
+
error_detail = response.text
|
|
224
|
+
logger.error(f"Anthropic API error: {response.status_code} - {error_detail}")
|
|
225
|
+
raise RuntimeError(f"Vision analysis failed: {response.status_code} - {error_detail}")
|
|
226
|
+
|
|
227
|
+
result = response.json()
|
|
228
|
+
return result["content"][0]["text"]
|
|
229
|
+
|
|
230
|
+
def _analyze_gemini(
|
|
231
|
+
self,
|
|
232
|
+
image_bytes: bytes,
|
|
233
|
+
media_type: str,
|
|
234
|
+
prompt: str,
|
|
235
|
+
) -> str:
|
|
236
|
+
"""Analyze image using Google Gemini."""
|
|
237
|
+
# Encode image to base64
|
|
238
|
+
image_b64 = base64.b64encode(image_bytes).decode("utf-8")
|
|
239
|
+
|
|
240
|
+
# Build request (Gemini REST API)
|
|
241
|
+
url = f"https://generativelanguage.googleapis.com/v1beta/models/{self.model}:generateContent"
|
|
242
|
+
params = {"key": self.api_key}
|
|
243
|
+
|
|
244
|
+
body = {
|
|
245
|
+
"contents": [
|
|
246
|
+
{
|
|
247
|
+
"parts": [
|
|
248
|
+
{
|
|
249
|
+
"inline_data": {
|
|
250
|
+
"mime_type": media_type,
|
|
251
|
+
"data": image_b64,
|
|
252
|
+
}
|
|
253
|
+
},
|
|
254
|
+
{"text": prompt},
|
|
255
|
+
]
|
|
256
|
+
}
|
|
257
|
+
]
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
response = requests.post(
|
|
261
|
+
url,
|
|
262
|
+
params=params,
|
|
263
|
+
json=body,
|
|
264
|
+
timeout=60.0,
|
|
265
|
+
)
|
|
266
|
+
|
|
267
|
+
if response.status_code != 200:
|
|
268
|
+
error_detail = response.text
|
|
269
|
+
logger.error(f"Gemini API error: {response.status_code} - {error_detail}")
|
|
270
|
+
raise RuntimeError(f"Vision analysis failed: {response.status_code} - {error_detail}")
|
|
271
|
+
|
|
272
|
+
result = response.json()
|
|
273
|
+
return result["candidates"][0]["content"]["parts"][0]["text"]
|
|
274
|
+
|
|
275
|
+
def _analyze_openai(
|
|
276
|
+
self,
|
|
277
|
+
image_bytes: bytes,
|
|
278
|
+
media_type: str,
|
|
279
|
+
prompt: str,
|
|
280
|
+
) -> str:
|
|
281
|
+
"""Analyze image using OpenAI or OpenAI-compatible endpoint."""
|
|
282
|
+
# Encode image to base64
|
|
283
|
+
image_b64 = base64.b64encode(image_bytes).decode("utf-8")
|
|
284
|
+
|
|
285
|
+
# Build request
|
|
286
|
+
headers = {
|
|
287
|
+
"Authorization": f"Bearer {self.api_key}",
|
|
288
|
+
"Content-Type": "application/json",
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
# Use custom base URL if provided, otherwise use OpenAI
|
|
292
|
+
base_url = self.base_url or "https://api.openai.com/v1"
|
|
293
|
+
url = f"{base_url}/chat/completions"
|
|
294
|
+
|
|
295
|
+
body = {
|
|
296
|
+
"model": self.model,
|
|
297
|
+
"messages": [
|
|
298
|
+
{
|
|
299
|
+
"role": "user",
|
|
300
|
+
"content": [
|
|
301
|
+
{
|
|
302
|
+
"type": "image_url",
|
|
303
|
+
"image_url": {
|
|
304
|
+
"url": f"data:{media_type};base64,{image_b64}",
|
|
305
|
+
},
|
|
306
|
+
},
|
|
307
|
+
{
|
|
308
|
+
"type": "text",
|
|
309
|
+
"text": prompt,
|
|
310
|
+
},
|
|
311
|
+
],
|
|
312
|
+
}
|
|
313
|
+
],
|
|
314
|
+
"max_tokens": 2048,
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
response = requests.post(
|
|
318
|
+
url,
|
|
319
|
+
headers=headers,
|
|
320
|
+
json=body,
|
|
321
|
+
timeout=60.0,
|
|
322
|
+
)
|
|
323
|
+
|
|
324
|
+
if response.status_code != 200:
|
|
325
|
+
error_detail = response.text
|
|
326
|
+
logger.error(f"OpenAI API error: {response.status_code} - {error_detail}")
|
|
327
|
+
raise RuntimeError(f"Vision analysis failed: {response.status_code} - {error_detail}")
|
|
328
|
+
|
|
329
|
+
result = response.json()
|
|
330
|
+
return result["choices"][0]["message"]["content"]
|