pltr-cli 0.11.0__py3-none-any.whl → 0.13.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. pltr/__init__.py +1 -1
  2. pltr/cli.py +40 -0
  3. pltr/commands/admin.py +565 -11
  4. pltr/commands/aip_agents.py +333 -0
  5. pltr/commands/connectivity.py +309 -1
  6. pltr/commands/cp.py +103 -0
  7. pltr/commands/dataset.py +104 -4
  8. pltr/commands/functions.py +503 -0
  9. pltr/commands/language_models.py +515 -0
  10. pltr/commands/mediasets.py +176 -0
  11. pltr/commands/models.py +362 -0
  12. pltr/commands/ontology.py +44 -13
  13. pltr/commands/orchestration.py +167 -11
  14. pltr/commands/project.py +231 -22
  15. pltr/commands/resource.py +416 -17
  16. pltr/commands/space.py +25 -303
  17. pltr/commands/sql.py +54 -7
  18. pltr/commands/streams.py +616 -0
  19. pltr/commands/third_party_applications.py +82 -0
  20. pltr/services/admin.py +331 -3
  21. pltr/services/aip_agents.py +147 -0
  22. pltr/services/base.py +104 -1
  23. pltr/services/connectivity.py +139 -0
  24. pltr/services/copy.py +391 -0
  25. pltr/services/dataset.py +77 -4
  26. pltr/services/folder.py +6 -1
  27. pltr/services/functions.py +223 -0
  28. pltr/services/language_models.py +281 -0
  29. pltr/services/mediasets.py +144 -9
  30. pltr/services/models.py +179 -0
  31. pltr/services/ontology.py +48 -1
  32. pltr/services/orchestration.py +133 -1
  33. pltr/services/project.py +213 -39
  34. pltr/services/resource.py +229 -60
  35. pltr/services/space.py +24 -175
  36. pltr/services/sql.py +44 -20
  37. pltr/services/streams.py +290 -0
  38. pltr/services/third_party_applications.py +53 -0
  39. pltr/utils/formatting.py +195 -1
  40. pltr/utils/pagination.py +325 -0
  41. {pltr_cli-0.11.0.dist-info → pltr_cli-0.13.0.dist-info}/METADATA +55 -4
  42. pltr_cli-0.13.0.dist-info/RECORD +70 -0
  43. {pltr_cli-0.11.0.dist-info → pltr_cli-0.13.0.dist-info}/WHEEL +1 -1
  44. pltr_cli-0.11.0.dist-info/RECORD +0 -55
  45. {pltr_cli-0.11.0.dist-info → pltr_cli-0.13.0.dist-info}/entry_points.txt +0 -0
  46. {pltr_cli-0.11.0.dist-info → pltr_cli-0.13.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,223 @@
1
+ """
2
+ Functions service wrapper for Foundry SDK.
3
+ Provides access to Functions query execution and value type operations.
4
+ """
5
+
6
+ from typing import Any, Dict, Optional
7
+ from .base import BaseService
8
+
9
+
10
+ class FunctionsService(BaseService):
11
+ """Service wrapper for Foundry Functions operations."""
12
+
13
+ def _get_service(self) -> Any:
14
+ """Get the Foundry Functions service."""
15
+ return self.client.functions
16
+
17
+ # ===== Query Operations =====
18
+
19
+ def get_query(
20
+ self,
21
+ query_api_name: str,
22
+ preview: bool = False,
23
+ version: Optional[str] = None,
24
+ ) -> Dict[str, Any]:
25
+ """
26
+ Get query metadata by API name.
27
+
28
+ Args:
29
+ query_api_name: Query API name (e.g., "myQuery")
30
+ preview: Enable preview mode (default: False)
31
+ version: Optional query version (e.g., "1.0.0")
32
+ If not specified, returns latest version
33
+
34
+ Returns:
35
+ Query information dictionary containing:
36
+ - rid: Query resource identifier
37
+ - apiName: Query API name
38
+ - version: Query version
39
+ - parameters: Query parameters with types
40
+ - output: Output structure definition
41
+
42
+ Raises:
43
+ RuntimeError: If the operation fails
44
+
45
+ Example:
46
+ >>> service = FunctionsService()
47
+ >>> query = service.get_query("myQuery")
48
+ >>> print(query['apiName'])
49
+ """
50
+ try:
51
+ query = self.service.Query.get(
52
+ query_api_name, preview=preview, version=version
53
+ )
54
+ return self._serialize_response(query)
55
+ except Exception as e:
56
+ raise RuntimeError(f"Failed to get query '{query_api_name}': {e}")
57
+
58
+ def get_query_by_rid(
59
+ self, query_rid: str, preview: bool = False, version: Optional[str] = None
60
+ ) -> Dict[str, Any]:
61
+ """
62
+ Get query metadata by RID.
63
+
64
+ Args:
65
+ query_rid: Query Resource Identifier
66
+ Format: ri.functions.main.query.<id>
67
+ preview: Enable preview mode (default: False)
68
+ version: Optional query version (e.g., "1.0.0")
69
+ If not specified, returns latest version
70
+
71
+ Returns:
72
+ Query information dictionary containing:
73
+ - rid: Query resource identifier
74
+ - apiName: Query API name
75
+ - version: Query version
76
+ - parameters: Query parameters with types
77
+ - output: Output structure definition
78
+
79
+ Raises:
80
+ RuntimeError: If the operation fails
81
+
82
+ Example:
83
+ >>> service = FunctionsService()
84
+ >>> query = service.get_query_by_rid("ri.functions.main.query.abc123")
85
+ >>> print(query['rid'])
86
+ """
87
+ try:
88
+ query = self.service.Query.get_by_rid(
89
+ query_rid, preview=preview, version=version
90
+ )
91
+ return self._serialize_response(query)
92
+ except Exception as e:
93
+ raise RuntimeError(f"Failed to get query {query_rid}: {e}")
94
+
95
+ def execute_query(
96
+ self,
97
+ query_api_name: str,
98
+ parameters: Optional[Dict[str, Any]] = None,
99
+ preview: bool = False,
100
+ version: Optional[str] = None,
101
+ ) -> Dict[str, Any]:
102
+ """
103
+ Execute a query by API name with parameters.
104
+
105
+ Args:
106
+ query_api_name: Query API name (e.g., "myQuery")
107
+ parameters: Query parameters as dictionary with DataValue encoding
108
+ Examples:
109
+ - Primitives: {"limit": 10, "name": "John"}
110
+ - Arrays: {"ids": [1, 2, 3]}
111
+ - Structs: {"config": {"enabled": true}}
112
+ - Dates: {"date": "2024-01-01"} (ISO 8601)
113
+ - Timestamps: {"created": "2021-01-04T05:00:00Z"} (ISO 8601)
114
+ preview: Enable preview mode (default: False)
115
+ version: Optional query version (e.g., "1.0.0")
116
+ If not specified, executes latest version
117
+
118
+ Returns:
119
+ Query execution result dictionary
120
+
121
+ Raises:
122
+ RuntimeError: If the operation fails
123
+
124
+ Example:
125
+ >>> service = FunctionsService()
126
+ >>> result = service.execute_query(
127
+ ... "myQuery",
128
+ ... parameters={"limit": 10, "filter": "active"}
129
+ ... )
130
+ >>> print(result)
131
+ """
132
+ try:
133
+ result = self.service.Query.execute(
134
+ query_api_name,
135
+ parameters=parameters or {},
136
+ preview=preview,
137
+ version=version,
138
+ )
139
+ return self._serialize_response(result)
140
+ except Exception as e:
141
+ raise RuntimeError(f"Failed to execute query '{query_api_name}': {e}")
142
+
143
+ def execute_query_by_rid(
144
+ self,
145
+ query_rid: str,
146
+ parameters: Optional[Dict[str, Any]] = None,
147
+ preview: bool = False,
148
+ version: Optional[str] = None,
149
+ ) -> Dict[str, Any]:
150
+ """
151
+ Execute a query by RID with parameters.
152
+
153
+ Args:
154
+ query_rid: Query Resource Identifier
155
+ Format: ri.functions.main.query.<id>
156
+ parameters: Query parameters as dictionary with DataValue encoding
157
+ Examples:
158
+ - Primitives: {"limit": 10, "name": "John"}
159
+ - Arrays: {"ids": [1, 2, 3]}
160
+ - Structs: {"config": {"enabled": true}}
161
+ - Dates: {"date": "2024-01-01"} (ISO 8601)
162
+ - Timestamps: {"created": "2021-01-04T05:00:00Z"} (ISO 8601)
163
+ preview: Enable preview mode (default: False)
164
+ version: Optional query version (e.g., "1.0.0")
165
+ If not specified, executes latest version
166
+
167
+ Returns:
168
+ Query execution result dictionary
169
+
170
+ Raises:
171
+ RuntimeError: If the operation fails
172
+
173
+ Example:
174
+ >>> service = FunctionsService()
175
+ >>> result = service.execute_query_by_rid(
176
+ ... "ri.functions.main.query.abc123",
177
+ ... parameters={"limit": 10}
178
+ ... )
179
+ >>> print(result)
180
+ """
181
+ try:
182
+ result = self.service.Query.execute_by_rid(
183
+ query_rid,
184
+ parameters=parameters or {},
185
+ preview=preview,
186
+ version=version,
187
+ )
188
+ return self._serialize_response(result)
189
+ except Exception as e:
190
+ raise RuntimeError(f"Failed to execute query {query_rid}: {e}")
191
+
192
+ # ===== Value Type Operations =====
193
+
194
+ def get_value_type(
195
+ self, value_type_rid: str, preview: bool = False
196
+ ) -> Dict[str, Any]:
197
+ """
198
+ Get value type details by RID.
199
+
200
+ Args:
201
+ value_type_rid: Value Type Resource Identifier
202
+ Format: ri.functions.main.value-type.<id>
203
+ preview: Enable preview mode (default: False)
204
+
205
+ Returns:
206
+ Value type information dictionary containing:
207
+ - rid: Value type resource identifier
208
+ - apiName: Value type API name
209
+ - definition: Type definition and structure
210
+
211
+ Raises:
212
+ RuntimeError: If the operation fails
213
+
214
+ Example:
215
+ >>> service = FunctionsService()
216
+ >>> value_type = service.get_value_type("ri.functions.main.value-type.xyz")
217
+ >>> print(value_type['apiName'])
218
+ """
219
+ try:
220
+ value_type = self.service.ValueType.get(value_type_rid, preview=preview)
221
+ return self._serialize_response(value_type)
222
+ except Exception as e:
223
+ raise RuntimeError(f"Failed to get value type {value_type_rid}: {e}")
@@ -0,0 +1,281 @@
1
+ """
2
+ LanguageModels service wrapper for Foundry SDK.
3
+ Provides access to Anthropic Claude models and OpenAI embeddings.
4
+ """
5
+
6
+ from typing import Any, Dict, List, Optional
7
+ from .base import BaseService
8
+
9
+
10
+ class LanguageModelsService(BaseService):
11
+ """Service wrapper for Foundry LanguageModels operations."""
12
+
13
+ def _get_service(self) -> Any:
14
+ """Get the Foundry LanguageModels service."""
15
+ return self.client.language_models
16
+
17
+ # ===== Anthropic Model Operations =====
18
+
19
+ def send_message(
20
+ self,
21
+ model_id: str,
22
+ message: str,
23
+ max_tokens: int = 1024,
24
+ system: Optional[str] = None,
25
+ temperature: Optional[float] = None,
26
+ stop_sequences: Optional[List[str]] = None,
27
+ top_k: Optional[int] = None,
28
+ top_p: Optional[float] = None,
29
+ preview: bool = False,
30
+ ) -> Dict[str, Any]:
31
+ """
32
+ Send a single message to an Anthropic model (simplified interface).
33
+
34
+ Args:
35
+ model_id: Model Resource Identifier
36
+ Format: ri.language-models.main.model.<id>
37
+ message: User message text
38
+ max_tokens: Maximum tokens to generate (default: 1024)
39
+ system: Optional system prompt to guide model behavior
40
+ temperature: Sampling temperature (0.0-1.0)
41
+ Lower values are more deterministic
42
+ stop_sequences: Optional list of sequences that stop generation
43
+ top_k: Sample from top K tokens (Anthropic models only)
44
+ top_p: Nucleus sampling threshold (0.0-1.0)
45
+ preview: Enable preview mode (default: False)
46
+
47
+ Returns:
48
+ Response dictionary containing:
49
+ - content: List of content blocks (text, tool use, etc.)
50
+ - role: Message role (typically "assistant")
51
+ - model: Model identifier
52
+ - stopReason: Reason generation stopped
53
+ - usage: Token usage statistics
54
+ - inputTokens: Input tokens consumed
55
+ - outputTokens: Output tokens generated
56
+ - totalTokens: Total tokens (input + output)
57
+
58
+ Raises:
59
+ RuntimeError: If the operation fails
60
+
61
+ Example:
62
+ >>> service = LanguageModelsService()
63
+ >>> response = service.send_message(
64
+ ... "ri.language-models.main.model.abc123",
65
+ ... "Explain quantum computing",
66
+ ... max_tokens=200
67
+ ... )
68
+ >>> print(response['content'][0]['text'])
69
+ """
70
+ try:
71
+ # Transform simple message to SDK message format
72
+ messages = [
73
+ {
74
+ "role": "user",
75
+ "content": [{"type": "text", "text": message}],
76
+ }
77
+ ]
78
+
79
+ # Build request parameters
80
+ request_params: Dict[str, Any] = {
81
+ "messages": messages,
82
+ "maxTokens": max_tokens,
83
+ }
84
+
85
+ # Add optional parameters if provided
86
+ if system is not None:
87
+ request_params["system"] = [{"type": "text", "text": system}]
88
+ if temperature is not None:
89
+ request_params["temperature"] = temperature
90
+ if stop_sequences is not None:
91
+ request_params["stopSequences"] = stop_sequences
92
+ if top_k is not None:
93
+ request_params["topK"] = top_k
94
+ if top_p is not None:
95
+ request_params["topP"] = top_p
96
+
97
+ # Call SDK method
98
+ response = self.service.AnthropicModel.messages(
99
+ model_id,
100
+ request=request_params,
101
+ preview=preview, # type: ignore
102
+ )
103
+
104
+ return self._serialize_response(response)
105
+ except Exception as e:
106
+ raise RuntimeError(f"Failed to send message to model {model_id}: {e}")
107
+
108
+ def send_messages_advanced(
109
+ self,
110
+ model_id: str,
111
+ messages: List[Dict[str, Any]],
112
+ max_tokens: int,
113
+ system: Optional[List[Dict[str, Any]]] = None,
114
+ temperature: Optional[float] = None,
115
+ thinking: Optional[Dict[str, Any]] = None,
116
+ tools: Optional[List[Dict[str, Any]]] = None,
117
+ tool_choice: Optional[Dict[str, Any]] = None,
118
+ stop_sequences: Optional[List[str]] = None,
119
+ top_k: Optional[int] = None,
120
+ top_p: Optional[float] = None,
121
+ preview: bool = False,
122
+ ) -> Dict[str, Any]:
123
+ """
124
+ Send messages to an Anthropic model with advanced features.
125
+
126
+ This method accepts the full SDK request structure, enabling:
127
+ - Multi-turn conversations
128
+ - Tool/function calling
129
+ - Extended thinking mode
130
+ - Document and image processing
131
+ - Citations
132
+
133
+ Args:
134
+ model_id: Model Resource Identifier
135
+ Format: ri.language-models.main.model.<id>
136
+ messages: List of message objects with role and content
137
+ Format: [{"role": "user|assistant", "content": [...]}]
138
+ max_tokens: Maximum tokens to generate
139
+ system: Optional system prompt blocks
140
+ Format: [{"type": "text", "text": "..."}]
141
+ temperature: Sampling temperature (0.0-1.0)
142
+ thinking: Extended thinking configuration
143
+ Format: {"type": "enabled", "budget": 10000}
144
+ tools: Tool definitions for function calling
145
+ tool_choice: Tool selection strategy
146
+ stop_sequences: Sequences that stop generation
147
+ top_k: Sample from top K tokens
148
+ top_p: Nucleus sampling threshold (0.0-1.0)
149
+ preview: Enable preview mode (default: False)
150
+
151
+ Returns:
152
+ Response dictionary containing:
153
+ - content: List of content blocks (text, tool use, thinking, etc.)
154
+ - role: Message role (typically "assistant")
155
+ - model: Model identifier
156
+ - stopReason: Reason generation stopped
157
+ - usage: Token usage statistics
158
+
159
+ Raises:
160
+ RuntimeError: If the operation fails
161
+
162
+ Example:
163
+ >>> service = LanguageModelsService()
164
+ >>> messages = [
165
+ ... {"role": "user", "content": [{"type": "text", "text": "Hi"}]},
166
+ ... {"role": "assistant", "content": [{"type": "text", "text": "Hello!"}]},
167
+ ... {"role": "user", "content": [{"type": "text", "text": "Help me"}]}
168
+ ... ]
169
+ >>> response = service.send_messages_advanced(
170
+ ... "ri.language-models.main.model.abc123",
171
+ ... messages=messages,
172
+ ... max_tokens=500
173
+ ... )
174
+ """
175
+ try:
176
+ # Build request parameters
177
+ request_params: Dict[str, Any] = {
178
+ "messages": messages,
179
+ "maxTokens": max_tokens,
180
+ }
181
+
182
+ # Add optional parameters if provided
183
+ if system is not None:
184
+ request_params["system"] = system
185
+ if temperature is not None:
186
+ request_params["temperature"] = temperature
187
+ if thinking is not None:
188
+ request_params["thinking"] = thinking
189
+ if tools is not None:
190
+ request_params["tools"] = tools
191
+ if tool_choice is not None:
192
+ request_params["toolChoice"] = tool_choice
193
+ if stop_sequences is not None:
194
+ request_params["stopSequences"] = stop_sequences
195
+ if top_k is not None:
196
+ request_params["topK"] = top_k
197
+ if top_p is not None:
198
+ request_params["topP"] = top_p
199
+
200
+ # Call SDK method
201
+ response = self.service.AnthropicModel.messages(
202
+ model_id,
203
+ request=request_params,
204
+ preview=preview, # type: ignore
205
+ )
206
+
207
+ return self._serialize_response(response)
208
+ except Exception as e:
209
+ raise RuntimeError(f"Failed to send messages to model {model_id}: {e}")
210
+
211
+ # ===== OpenAI Model Operations =====
212
+
213
+ def generate_embeddings(
214
+ self,
215
+ model_id: str,
216
+ input_texts: List[str],
217
+ dimensions: Optional[int] = None,
218
+ encoding_format: Optional[str] = None,
219
+ preview: bool = False,
220
+ ) -> Dict[str, Any]:
221
+ """
222
+ Generate embeddings for text using an OpenAI model.
223
+
224
+ Args:
225
+ model_id: Model Resource Identifier
226
+ Format: ri.language-models.main.model.<id>
227
+ input_texts: List of text strings to embed
228
+ Can be a single string or multiple strings
229
+ dimensions: Optional custom embedding dimensions
230
+ Not all models support this parameter
231
+ encoding_format: Output encoding format
232
+ Options: "float" (default) or "base64"
233
+ preview: Enable preview mode (default: False)
234
+
235
+ Returns:
236
+ Response dictionary containing:
237
+ - data: List of embedding objects
238
+ Each object has:
239
+ - embedding: Vector (list of floats or base64 string)
240
+ - index: Position in input array
241
+ - object: Type identifier ("embedding")
242
+ - model: Model identifier
243
+ - usage: Token usage statistics
244
+ - promptTokens: Input tokens consumed
245
+ - totalTokens: Total tokens
246
+
247
+ Raises:
248
+ RuntimeError: If the operation fails
249
+
250
+ Example:
251
+ >>> service = LanguageModelsService()
252
+ >>> response = service.generate_embeddings(
253
+ ... "ri.language-models.main.model.xyz789",
254
+ ... input_texts=["Machine learning", "Deep learning"]
255
+ ... )
256
+ >>> embeddings = [item['embedding'] for item in response['data']]
257
+ """
258
+ try:
259
+ # Build request parameters
260
+ request_params: Dict[str, Any] = {
261
+ "input": input_texts,
262
+ }
263
+
264
+ # Add optional parameters if provided
265
+ if dimensions is not None:
266
+ request_params["dimensions"] = dimensions
267
+ if encoding_format is not None:
268
+ request_params["encodingFormat"] = encoding_format
269
+
270
+ # Call SDK method
271
+ response = self.service.OpenAiModel.embeddings(
272
+ model_id,
273
+ request=request_params,
274
+ preview=preview, # type: ignore
275
+ )
276
+
277
+ return self._serialize_response(response)
278
+ except Exception as e:
279
+ raise RuntimeError(
280
+ f"Failed to generate embeddings with model {model_id}: {e}"
281
+ )
@@ -226,15 +226,7 @@ class MediaSetsService(BaseService):
226
226
  preview=preview,
227
227
  )
228
228
 
229
- with open(output_path_obj, "wb") as file:
230
- if hasattr(response, "content"):
231
- file.write(response.content)
232
- else:
233
- # Handle streaming response
234
- for chunk in response:
235
- file.write(chunk)
236
-
237
- file_size = output_path_obj.stat().st_size
229
+ file_size = self._write_response_to_file(response, output_path_obj)
238
230
  return {
239
231
  "media_set_rid": media_set_rid,
240
232
  "media_item_rid": media_item_rid,
@@ -291,3 +283,146 @@ class MediaSetsService(BaseService):
291
283
  "url": getattr(reference_response, "url", "unknown"),
292
284
  "expires_at": getattr(reference_response, "expires_at", None),
293
285
  }
286
+
287
+ def _write_response_to_file(self, response: Any, output_path: Path) -> int:
288
+ """
289
+ Write response content to file and return file size.
290
+
291
+ Args:
292
+ response: SDK response object (with .content attribute or iterable)
293
+ output_path: Path object for the output file
294
+
295
+ Returns:
296
+ File size in bytes
297
+ """
298
+ with open(output_path, "wb") as file:
299
+ if hasattr(response, "content"):
300
+ file.write(response.content)
301
+ else:
302
+ # Handle streaming response
303
+ for chunk in response:
304
+ file.write(chunk)
305
+ return output_path.stat().st_size
306
+
307
+ def calculate_thumbnail(
308
+ self,
309
+ media_set_rid: str,
310
+ media_item_rid: str,
311
+ preview: bool = False,
312
+ ) -> Dict[str, Any]:
313
+ """
314
+ Initiate thumbnail generation for an image.
315
+
316
+ Args:
317
+ media_set_rid: Media Set Resource Identifier
318
+ media_item_rid: Media Item Resource Identifier
319
+ preview: Enable preview mode
320
+
321
+ Returns:
322
+ Thumbnail calculation status information
323
+ """
324
+ try:
325
+ response = self.service.MediaSet.calculate(
326
+ media_set_rid=media_set_rid,
327
+ media_item_rid=media_item_rid,
328
+ preview=preview,
329
+ )
330
+ return self._format_thumbnail_status(response)
331
+ except Exception as e:
332
+ raise RuntimeError(f"Failed to calculate thumbnail: {e}")
333
+
334
+ def retrieve_thumbnail(
335
+ self,
336
+ media_set_rid: str,
337
+ media_item_rid: str,
338
+ output_path: str,
339
+ preview: bool = False,
340
+ ) -> Dict[str, Any]:
341
+ """
342
+ Retrieve a calculated thumbnail (200px wide webp).
343
+
344
+ Args:
345
+ media_set_rid: Media Set Resource Identifier
346
+ media_item_rid: Media Item Resource Identifier
347
+ output_path: Local path where thumbnail should be saved
348
+ preview: Enable preview mode
349
+
350
+ Returns:
351
+ Download response information
352
+ """
353
+ try:
354
+ output_path_obj = Path(output_path)
355
+ output_path_obj.parent.mkdir(parents=True, exist_ok=True)
356
+
357
+ response = self.service.MediaSet.retrieve(
358
+ media_set_rid=media_set_rid,
359
+ media_item_rid=media_item_rid,
360
+ preview=preview,
361
+ )
362
+
363
+ file_size = self._write_response_to_file(response, output_path_obj)
364
+
365
+ # Validate that we received actual content
366
+ if file_size == 0:
367
+ output_path_obj.unlink(missing_ok=True)
368
+ raise RuntimeError(
369
+ "Downloaded thumbnail is empty - thumbnail may not be ready yet"
370
+ )
371
+
372
+ return {
373
+ "media_set_rid": media_set_rid,
374
+ "media_item_rid": media_item_rid,
375
+ "output_path": str(output_path_obj),
376
+ "file_size": file_size,
377
+ "downloaded": True,
378
+ "format": "image/webp",
379
+ }
380
+ except Exception as e:
381
+ raise RuntimeError(f"Failed to retrieve thumbnail: {e}")
382
+
383
+ def upload_temp_media(
384
+ self,
385
+ file_path: str,
386
+ filename: Optional[str] = None,
387
+ attribution: Optional[str] = None,
388
+ preview: bool = False,
389
+ ) -> Dict[str, Any]:
390
+ """
391
+ Upload temporary media (auto-deleted after 1 hour if not persisted).
392
+
393
+ Args:
394
+ file_path: Local path to the file to upload
395
+ filename: Optional filename override
396
+ attribution: Optional attribution string
397
+ preview: Enable preview mode
398
+
399
+ Returns:
400
+ Media reference information
401
+ """
402
+ try:
403
+ file_path_obj = Path(file_path)
404
+ if not file_path_obj.exists():
405
+ raise FileNotFoundError(f"File not found: {file_path}")
406
+
407
+ # Use provided filename or default to file name
408
+ upload_filename = filename or file_path_obj.name
409
+
410
+ with open(file_path_obj, "rb") as file:
411
+ response = self.service.MediaSet.upload_media(
412
+ body=file,
413
+ filename=upload_filename,
414
+ attribution=attribution,
415
+ preview=preview,
416
+ )
417
+
418
+ return self._format_media_reference(response)
419
+ except Exception as e:
420
+ raise RuntimeError(f"Failed to upload temporary media: {e}")
421
+
422
+ def _format_thumbnail_status(self, status_response: Any) -> Dict[str, Any]:
423
+ """Format thumbnail calculation status response for display."""
424
+ return {
425
+ "status": getattr(status_response, "status", "unknown"),
426
+ "transformation_id": getattr(status_response, "transformation_id", None),
427
+ "media_item_rid": getattr(status_response, "media_item_rid", None),
428
+ }