pltr-cli 0.11.0__py3-none-any.whl → 0.13.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. pltr/__init__.py +1 -1
  2. pltr/cli.py +40 -0
  3. pltr/commands/admin.py +565 -11
  4. pltr/commands/aip_agents.py +333 -0
  5. pltr/commands/connectivity.py +309 -1
  6. pltr/commands/cp.py +103 -0
  7. pltr/commands/dataset.py +104 -4
  8. pltr/commands/functions.py +503 -0
  9. pltr/commands/language_models.py +515 -0
  10. pltr/commands/mediasets.py +176 -0
  11. pltr/commands/models.py +362 -0
  12. pltr/commands/ontology.py +44 -13
  13. pltr/commands/orchestration.py +167 -11
  14. pltr/commands/project.py +231 -22
  15. pltr/commands/resource.py +416 -17
  16. pltr/commands/space.py +25 -303
  17. pltr/commands/sql.py +54 -7
  18. pltr/commands/streams.py +616 -0
  19. pltr/commands/third_party_applications.py +82 -0
  20. pltr/services/admin.py +331 -3
  21. pltr/services/aip_agents.py +147 -0
  22. pltr/services/base.py +104 -1
  23. pltr/services/connectivity.py +139 -0
  24. pltr/services/copy.py +391 -0
  25. pltr/services/dataset.py +77 -4
  26. pltr/services/folder.py +6 -1
  27. pltr/services/functions.py +223 -0
  28. pltr/services/language_models.py +281 -0
  29. pltr/services/mediasets.py +144 -9
  30. pltr/services/models.py +179 -0
  31. pltr/services/ontology.py +48 -1
  32. pltr/services/orchestration.py +133 -1
  33. pltr/services/project.py +213 -39
  34. pltr/services/resource.py +229 -60
  35. pltr/services/space.py +24 -175
  36. pltr/services/sql.py +44 -20
  37. pltr/services/streams.py +290 -0
  38. pltr/services/third_party_applications.py +53 -0
  39. pltr/utils/formatting.py +195 -1
  40. pltr/utils/pagination.py +325 -0
  41. {pltr_cli-0.11.0.dist-info → pltr_cli-0.13.0.dist-info}/METADATA +55 -4
  42. pltr_cli-0.13.0.dist-info/RECORD +70 -0
  43. {pltr_cli-0.11.0.dist-info → pltr_cli-0.13.0.dist-info}/WHEEL +1 -1
  44. pltr_cli-0.11.0.dist-info/RECORD +0 -55
  45. {pltr_cli-0.11.0.dist-info → pltr_cli-0.13.0.dist-info}/entry_points.txt +0 -0
  46. {pltr_cli-0.11.0.dist-info → pltr_cli-0.13.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,515 @@
1
+ """
2
+ LanguageModels commands for Foundry.
3
+ Provides commands for Anthropic Claude models and OpenAI embeddings.
4
+ """
5
+
6
+ import typer
7
+ import json
8
+ from typing import Optional, List, Any, TYPE_CHECKING
9
+ from pathlib import Path
10
+ from rich.console import Console
11
+
12
+ # Lazy import to avoid SDK Literal types being processed by typer at module load time
13
+ if TYPE_CHECKING:
14
+ pass
15
+
16
+ from ..utils.formatting import OutputFormatter
17
+ from ..utils.progress import SpinnerProgressTracker
18
+ from ..auth.base import ProfileNotFoundError, MissingCredentialsError
19
+ from ..utils.completion import (
20
+ complete_profile,
21
+ complete_output_format,
22
+ )
23
+
24
+ # Create main app and sub-apps
25
+ app = typer.Typer(help="Interact with language models")
26
+ anthropic_app = typer.Typer(help="Anthropic Claude models")
27
+ openai_app = typer.Typer(help="OpenAI models")
28
+
29
+ # Add sub-apps
30
+ app.add_typer(anthropic_app, name="anthropic")
31
+ app.add_typer(openai_app, name="openai")
32
+
33
+ console = Console()
34
+ formatter = OutputFormatter(console)
35
+
36
+
37
+ def parse_json_input(input_str: str) -> Any:
38
+ """
39
+ Parse JSON input from string or file.
40
+
41
+ Supports:
42
+ - Inline JSON: '{"key": "value"}' or '["item1", "item2"]'
43
+ - File reference: @input.json
44
+
45
+ Args:
46
+ input_str: Input string or file reference
47
+
48
+ Returns:
49
+ Parsed JSON data
50
+
51
+ Raises:
52
+ FileNotFoundError: If file reference doesn't exist
53
+ json.JSONDecodeError: If JSON is invalid
54
+ """
55
+ if not input_str:
56
+ return None
57
+
58
+ # Handle file reference
59
+ if input_str.startswith("@"):
60
+ file_path = Path(input_str[1:])
61
+ if not file_path.exists():
62
+ raise FileNotFoundError(f"Input file not found: {file_path}")
63
+
64
+ with open(file_path, "r") as f:
65
+ return json.load(f)
66
+
67
+ # Handle inline JSON
68
+ return json.loads(input_str)
69
+
70
+
71
+ def display_anthropic_response(response: dict, format: str, output: Optional[str]):
72
+ """
73
+ Display Anthropic model response with token usage.
74
+
75
+ Args:
76
+ response: Response dictionary from service
77
+ format: Output format
78
+ output: Optional output file path
79
+ """
80
+ # Display token usage prominently if available
81
+ if "usage" in response:
82
+ usage = response["usage"]
83
+ input_tokens = usage.get("inputTokens", 0)
84
+ output_tokens = usage.get("outputTokens", 0)
85
+ total_tokens = usage.get("totalTokens", input_tokens + output_tokens)
86
+
87
+ formatter.print_info(
88
+ f"Token usage - Input: {input_tokens}, Output: {output_tokens}, Total: {total_tokens}"
89
+ )
90
+
91
+ # Display full response
92
+ if output:
93
+ formatter.save_to_file(response, output, format)
94
+ formatter.print_success(f"Response saved to {output}")
95
+ else:
96
+ formatter.display(response, format)
97
+
98
+
99
+ def display_openai_response(response: dict, format: str, output: Optional[str]):
100
+ """
101
+ Display OpenAI model response with token usage.
102
+
103
+ Args:
104
+ response: Response dictionary from service
105
+ format: Output format
106
+ output: Optional output file path
107
+ """
108
+ # Display token usage prominently if available
109
+ if "usage" in response:
110
+ usage = response["usage"]
111
+ prompt_tokens = usage.get("promptTokens", 0)
112
+ total_tokens = usage.get("totalTokens", prompt_tokens)
113
+
114
+ formatter.print_info(
115
+ f"Token usage - Prompt: {prompt_tokens}, Total: {total_tokens}"
116
+ )
117
+
118
+ # Display full response
119
+ if output:
120
+ formatter.save_to_file(response, output, format)
121
+ formatter.print_success(f"Response saved to {output}")
122
+ else:
123
+ formatter.display(response, format)
124
+
125
+
126
+ # ===== Anthropic Commands =====
127
+
128
+
129
+ @anthropic_app.command("messages")
130
+ def anthropic_messages(
131
+ model_id: str = typer.Argument(
132
+ ...,
133
+ help="Model Resource Identifier (ri.language-models.main.model.<id>)",
134
+ ),
135
+ message: str = typer.Option(
136
+ ...,
137
+ "--message",
138
+ "-m",
139
+ help="User message text",
140
+ ),
141
+ max_tokens: int = typer.Option(
142
+ 1024,
143
+ "--max-tokens",
144
+ help="Maximum tokens to generate",
145
+ ),
146
+ system: Optional[str] = typer.Option(
147
+ None,
148
+ "--system",
149
+ "-s",
150
+ help="System prompt to guide model behavior",
151
+ ),
152
+ temperature: Optional[float] = typer.Option(
153
+ None,
154
+ "--temperature",
155
+ "-t",
156
+ help="Sampling temperature (0.0-1.0). Lower is more deterministic.",
157
+ min=0.0,
158
+ max=1.0,
159
+ ),
160
+ stop: Optional[List[str]] = typer.Option(
161
+ None,
162
+ "--stop",
163
+ help="Stop sequences (can be specified multiple times)",
164
+ ),
165
+ top_k: Optional[int] = typer.Option(
166
+ None,
167
+ "--top-k",
168
+ help="Sample from top K tokens (Anthropic models only)",
169
+ min=1,
170
+ ),
171
+ top_p: Optional[float] = typer.Option(
172
+ None,
173
+ "--top-p",
174
+ help="Nucleus sampling threshold (0.0-1.0)",
175
+ min=0.0,
176
+ max=1.0,
177
+ ),
178
+ profile: Optional[str] = typer.Option(
179
+ None,
180
+ "--profile",
181
+ "-p",
182
+ help="Profile name",
183
+ autocompletion=complete_profile,
184
+ ),
185
+ format: str = typer.Option(
186
+ "json",
187
+ "--format",
188
+ "-f",
189
+ help="Output format (table, json, csv)",
190
+ autocompletion=complete_output_format,
191
+ ),
192
+ output: Optional[str] = typer.Option(
193
+ None, "--output", "-o", help="Output file path"
194
+ ),
195
+ preview: bool = typer.Option(
196
+ False,
197
+ "--preview",
198
+ help="Enable preview mode",
199
+ ),
200
+ ):
201
+ """
202
+ Send a single message to an Anthropic Claude model.
203
+
204
+ Simple interface for single-turn Q&A with Claude models. For multi-turn
205
+ conversations, tool calling, or advanced features, use messages-advanced.
206
+
207
+ Examples:
208
+
209
+ # Basic message
210
+ pltr language-models anthropic messages ri.language-models.main.model.abc123 \\
211
+ --message "Explain quantum computing"
212
+
213
+ # With system prompt and custom parameters
214
+ pltr language-models anthropic messages ri.language-models.main.model.abc123 \\
215
+ --message "Write a haiku" \\
216
+ --system "You are a poetic assistant" \\
217
+ --temperature 0.8 \\
218
+ --max-tokens 100
219
+
220
+ # With stop sequences
221
+ pltr language-models anthropic messages ri.language-models.main.model.abc123 \\
222
+ --message "List three items" \\
223
+ --stop "." --stop "\\n\\n"
224
+
225
+ # Save response to file
226
+ pltr language-models anthropic messages ri.language-models.main.model.abc123 \\
227
+ --message "Summarize AI trends" \\
228
+ --output response.json
229
+ """
230
+ try:
231
+ from ..services.language_models import LanguageModelsService
232
+
233
+ service = LanguageModelsService(profile=profile)
234
+
235
+ with SpinnerProgressTracker().track_spinner("Sending message..."):
236
+ response = service.send_message(
237
+ model_id=model_id,
238
+ message=message,
239
+ max_tokens=max_tokens,
240
+ system=system,
241
+ temperature=temperature,
242
+ stop_sequences=stop if stop else None,
243
+ top_k=top_k,
244
+ top_p=top_p,
245
+ preview=preview,
246
+ )
247
+
248
+ display_anthropic_response(response, format, output)
249
+
250
+ except (ProfileNotFoundError, MissingCredentialsError) as e:
251
+ formatter.print_error(f"Authentication error: {e}")
252
+ raise typer.Exit(1)
253
+ except (FileNotFoundError, json.JSONDecodeError) as e:
254
+ formatter.print_error(f"Invalid input: {e}")
255
+ raise typer.Exit(1)
256
+ except Exception as e:
257
+ formatter.print_error(f"Operation failed: {e}")
258
+ raise typer.Exit(1)
259
+
260
+
261
+ @anthropic_app.command("messages-advanced")
262
+ def anthropic_messages_advanced(
263
+ model_id: str = typer.Argument(
264
+ ...,
265
+ help="Model Resource Identifier (ri.language-models.main.model.<id>)",
266
+ ),
267
+ request: str = typer.Option(
268
+ ...,
269
+ "--request",
270
+ "-r",
271
+ help="Request JSON (inline or @file.json). Must include 'messages' and 'maxTokens'.",
272
+ ),
273
+ profile: Optional[str] = typer.Option(
274
+ None,
275
+ "--profile",
276
+ "-p",
277
+ help="Profile name",
278
+ autocompletion=complete_profile,
279
+ ),
280
+ format: str = typer.Option(
281
+ "json",
282
+ "--format",
283
+ "-f",
284
+ help="Output format (table, json, csv)",
285
+ autocompletion=complete_output_format,
286
+ ),
287
+ output: Optional[str] = typer.Option(
288
+ None, "--output", "-o", help="Output file path"
289
+ ),
290
+ preview: bool = typer.Option(
291
+ False,
292
+ "--preview",
293
+ help="Enable preview mode",
294
+ ),
295
+ ):
296
+ """
297
+ Send messages to Anthropic Claude model with advanced features.
298
+
299
+ Accepts full SDK request structure, enabling:
300
+ - Multi-turn conversations
301
+ - Tool/function calling
302
+ - Extended thinking mode
303
+ - Document and image processing
304
+ - Citations
305
+
306
+ The request must be a JSON object containing:
307
+ - messages: List of message objects with role and content
308
+ - maxTokens: Maximum tokens to generate
309
+
310
+ Optional fields:
311
+ - system: System prompt blocks
312
+ - temperature: Sampling temperature (0.0-1.0)
313
+ - thinking: Extended thinking configuration
314
+ - tools: Tool definitions for function calling
315
+ - toolChoice: Tool selection strategy
316
+ - stopSequences: Sequences that stop generation
317
+ - topK: Sample from top K tokens
318
+ - topP: Nucleus sampling threshold
319
+
320
+ Examples:
321
+
322
+ # Multi-turn conversation from file
323
+ # conversation.json:
324
+ # {
325
+ # "messages": [
326
+ # {"role": "user", "content": [{"type": "text", "text": "Hi"}]},
327
+ # {"role": "assistant", "content": [{"type": "text", "text": "Hello!"}]},
328
+ # {"role": "user", "content": [{"type": "text", "text": "Help me"}]}
329
+ # ],
330
+ # "maxTokens": 500
331
+ # }
332
+ pltr language-models anthropic messages-advanced ri.language-models.main.model.abc123 \\
333
+ --request @conversation.json
334
+
335
+ # Inline JSON with system prompt
336
+ pltr language-models anthropic messages-advanced ri.language-models.main.model.abc123 \\
337
+ --request '{"messages": [{"role": "user", "content": [{"type": "text", "text": "Hi"}]}], "maxTokens": 100, "system": [{"type": "text", "text": "Be concise"}]}'
338
+
339
+ # With extended thinking
340
+ pltr language-models anthropic messages-advanced ri.language-models.main.model.abc123 \\
341
+ --request '{"messages": [{"role": "user", "content": [{"type": "text", "text": "Solve this problem"}]}], "maxTokens": 2000, "thinking": {"type": "enabled", "budget": 10000}}'
342
+ """
343
+ try:
344
+ # Parse request JSON
345
+ request_data = parse_json_input(request)
346
+
347
+ # Validate required fields
348
+ if not isinstance(request_data, dict):
349
+ raise ValueError("Request must be a JSON object")
350
+ if "messages" not in request_data:
351
+ raise ValueError("Request must include 'messages' field")
352
+ if "maxTokens" not in request_data:
353
+ raise ValueError("Request must include 'maxTokens' field")
354
+
355
+ from ..services.language_models import LanguageModelsService
356
+
357
+ service = LanguageModelsService(profile=profile)
358
+
359
+ with SpinnerProgressTracker().track_spinner("Sending messages..."):
360
+ response = service.send_messages_advanced(
361
+ model_id=model_id,
362
+ messages=request_data["messages"],
363
+ max_tokens=request_data["maxTokens"],
364
+ system=request_data.get("system"),
365
+ temperature=request_data.get("temperature"),
366
+ thinking=request_data.get("thinking"),
367
+ tools=request_data.get("tools"),
368
+ tool_choice=request_data.get("toolChoice"),
369
+ stop_sequences=request_data.get("stopSequences"),
370
+ top_k=request_data.get("topK"),
371
+ top_p=request_data.get("topP"),
372
+ preview=preview,
373
+ )
374
+
375
+ display_anthropic_response(response, format, output)
376
+
377
+ except (ProfileNotFoundError, MissingCredentialsError) as e:
378
+ formatter.print_error(f"Authentication error: {e}")
379
+ raise typer.Exit(1)
380
+ except (FileNotFoundError, json.JSONDecodeError, ValueError) as e:
381
+ formatter.print_error(f"Invalid input: {e}")
382
+ raise typer.Exit(1)
383
+ except Exception as e:
384
+ formatter.print_error(f"Operation failed: {e}")
385
+ raise typer.Exit(1)
386
+
387
+
388
+ # ===== OpenAI Commands =====
389
+
390
+
391
+ @openai_app.command("embeddings")
392
+ def openai_embeddings(
393
+ model_id: str = typer.Argument(
394
+ ...,
395
+ help="Model Resource Identifier (ri.language-models.main.model.<id>)",
396
+ ),
397
+ input: str = typer.Option(
398
+ ...,
399
+ "--input",
400
+ "-i",
401
+ help='Input text(s). Single string, JSON array \'["text1", "text2"]\', or @file.json',
402
+ ),
403
+ dimensions: Optional[int] = typer.Option(
404
+ None,
405
+ "--dimensions",
406
+ "-d",
407
+ help="Custom embedding dimensions (not all models support this)",
408
+ min=1,
409
+ ),
410
+ encoding: Optional[str] = typer.Option(
411
+ None,
412
+ "--encoding",
413
+ "-e",
414
+ help="Output encoding format: 'float' or 'base64'",
415
+ ),
416
+ profile: Optional[str] = typer.Option(
417
+ None,
418
+ "--profile",
419
+ "-p",
420
+ help="Profile name",
421
+ autocompletion=complete_profile,
422
+ ),
423
+ format: str = typer.Option(
424
+ "json",
425
+ "--format",
426
+ "-f",
427
+ help="Output format (table, json, csv)",
428
+ autocompletion=complete_output_format,
429
+ ),
430
+ output: Optional[str] = typer.Option(
431
+ None, "--output", "-o", help="Output file path"
432
+ ),
433
+ preview: bool = typer.Option(
434
+ False,
435
+ "--preview",
436
+ help="Enable preview mode",
437
+ ),
438
+ ):
439
+ """
440
+ Generate embeddings for text using an OpenAI model.
441
+
442
+ Accepts single text strings or multiple texts for batch processing.
443
+ Returns embedding vectors that can be used for semantic search,
444
+ clustering, or similarity comparisons.
445
+
446
+ Examples:
447
+
448
+ # Single text
449
+ pltr language-models openai embeddings ri.language-models.main.model.xyz789 \\
450
+ --input "Machine learning is fascinating"
451
+
452
+ # Multiple texts (inline JSON array)
453
+ pltr language-models openai embeddings ri.language-models.main.model.xyz789 \\
454
+ --input '["Document 1", "Document 2", "Document 3"]'
455
+
456
+ # Multiple texts from file
457
+ # texts.json: ["Text 1", "Text 2", "Text 3"]
458
+ pltr language-models openai embeddings ri.language-models.main.model.xyz789 \\
459
+ --input @texts.json
460
+
461
+ # Custom dimensions and encoding
462
+ pltr language-models openai embeddings ri.language-models.main.model.xyz789 \\
463
+ --input "Sample text" \\
464
+ --dimensions 1024 \\
465
+ --encoding base64
466
+
467
+ # Save embeddings to file
468
+ pltr language-models openai embeddings ri.language-models.main.model.xyz789 \\
469
+ --input '["Text 1", "Text 2"]' \\
470
+ --output embeddings.json
471
+ """
472
+ try:
473
+ # Parse input - handle string or array
474
+ input_data = (
475
+ parse_json_input(input)
476
+ if input.startswith("@") or input.startswith("[")
477
+ else input
478
+ )
479
+
480
+ # Convert to list if single string
481
+ if isinstance(input_data, str):
482
+ input_texts = [input_data]
483
+ elif isinstance(input_data, list):
484
+ input_texts = input_data
485
+ else:
486
+ raise ValueError("Input must be a string or array of strings")
487
+
488
+ # Validate encoding format if provided
489
+ if encoding and encoding not in ["float", "base64"]:
490
+ raise ValueError("Encoding format must be 'float' or 'base64'")
491
+
492
+ from ..services.language_models import LanguageModelsService
493
+
494
+ service = LanguageModelsService(profile=profile)
495
+
496
+ with SpinnerProgressTracker().track_spinner("Generating embeddings..."):
497
+ response = service.generate_embeddings(
498
+ model_id=model_id,
499
+ input_texts=input_texts,
500
+ dimensions=dimensions,
501
+ encoding_format=encoding,
502
+ preview=preview,
503
+ )
504
+
505
+ display_openai_response(response, format, output)
506
+
507
+ except (ProfileNotFoundError, MissingCredentialsError) as e:
508
+ formatter.print_error(f"Authentication error: {e}")
509
+ raise typer.Exit(1)
510
+ except (FileNotFoundError, json.JSONDecodeError, ValueError) as e:
511
+ formatter.print_error(f"Invalid input: {e}")
512
+ raise typer.Exit(1)
513
+ except Exception as e:
514
+ formatter.print_error(f"Operation failed: {e}")
515
+ raise typer.Exit(1)
@@ -405,6 +405,180 @@ def get_media_reference(
405
405
  raise typer.Exit(1)
406
406
 
407
407
 
408
+ @app.command("thumbnail-calculate")
409
+ def thumbnail_calculate(
410
+ media_set_rid: str = typer.Argument(
411
+ ..., help="Media Set Resource Identifier", autocompletion=complete_rid
412
+ ),
413
+ media_item_rid: str = typer.Argument(
414
+ ..., help="Media Item Resource Identifier", autocompletion=complete_rid
415
+ ),
416
+ profile: Optional[str] = typer.Option(
417
+ None, "--profile", "-p", help="Profile name", autocompletion=complete_profile
418
+ ),
419
+ format: str = typer.Option(
420
+ "table",
421
+ "--format",
422
+ "-f",
423
+ help="Output format (table, json, csv)",
424
+ autocompletion=complete_output_format,
425
+ ),
426
+ output: Optional[str] = typer.Option(
427
+ None, "--output", "-o", help="Output file path"
428
+ ),
429
+ preview: bool = typer.Option(False, "--preview", help="Enable preview mode"),
430
+ ):
431
+ """Initiate thumbnail generation for an image."""
432
+ try:
433
+ cache_rid(media_set_rid)
434
+ cache_rid(media_item_rid)
435
+ service = MediaSetsService(profile=profile)
436
+
437
+ with SpinnerProgressTracker().track_spinner(
438
+ f"Initiating thumbnail calculation for {media_item_rid}..."
439
+ ):
440
+ status = service.calculate_thumbnail(
441
+ media_set_rid, media_item_rid, preview=preview
442
+ )
443
+
444
+ formatter.format_thumbnail_status(status, format, output)
445
+
446
+ if output:
447
+ formatter.print_success(f"Thumbnail status saved to {output}")
448
+ else:
449
+ formatter.print_info(
450
+ "Use 'thumbnail-retrieve' to download once calculation completes"
451
+ )
452
+
453
+ except (ProfileNotFoundError, MissingCredentialsError) as e:
454
+ formatter.print_error(f"Authentication error: {e}")
455
+ raise typer.Exit(1)
456
+ except Exception as e:
457
+ formatter.print_error(f"Failed to calculate thumbnail: {e}")
458
+ raise typer.Exit(1)
459
+
460
+
461
+ @app.command("thumbnail-retrieve")
462
+ def thumbnail_retrieve(
463
+ media_set_rid: str = typer.Argument(
464
+ ..., help="Media Set Resource Identifier", autocompletion=complete_rid
465
+ ),
466
+ media_item_rid: str = typer.Argument(
467
+ ..., help="Media Item Resource Identifier", autocompletion=complete_rid
468
+ ),
469
+ output_path: str = typer.Argument(
470
+ ..., help="Local path where thumbnail should be saved"
471
+ ),
472
+ profile: Optional[str] = typer.Option(
473
+ None, "--profile", "-p", help="Profile name", autocompletion=complete_profile
474
+ ),
475
+ preview: bool = typer.Option(False, "--preview", help="Enable preview mode"),
476
+ overwrite: bool = typer.Option(
477
+ False, "--overwrite", help="Overwrite existing file"
478
+ ),
479
+ ):
480
+ """Download a calculated thumbnail from a media set (200px wide webp)."""
481
+ try:
482
+ # Check if output file already exists
483
+ output_path_obj = Path(output_path)
484
+ if output_path_obj.exists() and not overwrite:
485
+ formatter.print_error(f"File already exists: {output_path}")
486
+ formatter.print_info("Use --overwrite to replace existing file")
487
+ raise typer.Exit(1)
488
+
489
+ cache_rid(media_set_rid)
490
+ cache_rid(media_item_rid)
491
+ service = MediaSetsService(profile=profile)
492
+
493
+ with SpinnerProgressTracker().track_spinner("Downloading thumbnail..."):
494
+ result = service.retrieve_thumbnail(
495
+ media_set_rid,
496
+ media_item_rid,
497
+ output_path,
498
+ preview=preview,
499
+ )
500
+
501
+ formatter.print_success("Successfully downloaded thumbnail")
502
+ formatter.print_info(f"Saved to: {result['output_path']}")
503
+ formatter.print_info(f"File size: {result['file_size']} bytes")
504
+ formatter.print_info(f"Format: {result['format']}")
505
+
506
+ except (ProfileNotFoundError, MissingCredentialsError) as e:
507
+ formatter.print_error(f"Authentication error: {e}")
508
+ raise typer.Exit(1)
509
+ except Exception as e:
510
+ formatter.print_error(f"Failed to retrieve thumbnail: {e}")
511
+ raise typer.Exit(1)
512
+
513
+
514
+ @app.command("upload-temp")
515
+ def upload_temp(
516
+ file_path: str = typer.Argument(..., help="Local path to the file to upload"),
517
+ profile: Optional[str] = typer.Option(
518
+ None, "--profile", "-p", help="Profile name", autocompletion=complete_profile
519
+ ),
520
+ filename: Optional[str] = typer.Option(
521
+ None, "--filename", help="Override filename for the upload"
522
+ ),
523
+ attribution: Optional[str] = typer.Option(
524
+ None, "--attribution", help="Attribution string for the media"
525
+ ),
526
+ format: str = typer.Option(
527
+ "table",
528
+ "--format",
529
+ "-f",
530
+ help="Output format (table, json, csv)",
531
+ autocompletion=complete_output_format,
532
+ ),
533
+ output: Optional[str] = typer.Option(
534
+ None, "--output", "-o", help="Output file path"
535
+ ),
536
+ preview: bool = typer.Option(False, "--preview", help="Enable preview mode"),
537
+ ):
538
+ """Upload temporary media (auto-deleted after 1 hour if not persisted)."""
539
+ try:
540
+ # Validate file exists
541
+ file_path_obj = Path(file_path)
542
+ if not file_path_obj.exists():
543
+ formatter.print_error(f"File not found: {file_path}")
544
+ raise typer.Exit(1)
545
+
546
+ service = MediaSetsService(profile=profile)
547
+
548
+ file_size = file_path_obj.stat().st_size
549
+ formatter.print_info(f"Uploading {file_path} ({file_size} bytes)")
550
+
551
+ with SpinnerProgressTracker().track_spinner(
552
+ f"Uploading {file_path_obj.name}..."
553
+ ):
554
+ reference = service.upload_temp_media(
555
+ file_path,
556
+ filename=filename,
557
+ attribution=attribution,
558
+ preview=preview,
559
+ )
560
+
561
+ formatter.print_success("Successfully uploaded temporary media")
562
+ formatter.format_media_reference(reference, format, output)
563
+
564
+ if output:
565
+ formatter.print_success(f"Media reference saved to {output}")
566
+ else:
567
+ formatter.print_warning(
568
+ "This is a temporary upload. It will be deleted after 1 hour if not persisted."
569
+ )
570
+
571
+ except (ProfileNotFoundError, MissingCredentialsError) as e:
572
+ formatter.print_error(f"Authentication error: {e}")
573
+ raise typer.Exit(1)
574
+ except FileNotFoundError as e:
575
+ formatter.print_error(str(e))
576
+ raise typer.Exit(1)
577
+ except Exception as e:
578
+ formatter.print_error(f"Failed to upload temporary media: {e}")
579
+ raise typer.Exit(1)
580
+
581
+
408
582
  @app.callback()
409
583
  def main():
410
584
  """
@@ -415,6 +589,8 @@ def main():
415
589
  - Create, commit, and abort upload transactions
416
590
  - Upload media files to media sets
417
591
  - Download media items from media sets
592
+ - Generate and retrieve image thumbnails
593
+ - Upload temporary media
418
594
 
419
595
  All operations require Resource Identifiers (RIDs) like:
420
596
  ri.mediasets.main.media-set.12345678-1234-1234-1234-123456789abc