pltr-cli 0.11.0__py3-none-any.whl → 0.13.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. pltr/__init__.py +1 -1
  2. pltr/cli.py +40 -0
  3. pltr/commands/admin.py +565 -11
  4. pltr/commands/aip_agents.py +333 -0
  5. pltr/commands/connectivity.py +309 -1
  6. pltr/commands/cp.py +103 -0
  7. pltr/commands/dataset.py +104 -4
  8. pltr/commands/functions.py +503 -0
  9. pltr/commands/language_models.py +515 -0
  10. pltr/commands/mediasets.py +176 -0
  11. pltr/commands/models.py +362 -0
  12. pltr/commands/ontology.py +44 -13
  13. pltr/commands/orchestration.py +167 -11
  14. pltr/commands/project.py +231 -22
  15. pltr/commands/resource.py +416 -17
  16. pltr/commands/space.py +25 -303
  17. pltr/commands/sql.py +54 -7
  18. pltr/commands/streams.py +616 -0
  19. pltr/commands/third_party_applications.py +82 -0
  20. pltr/services/admin.py +331 -3
  21. pltr/services/aip_agents.py +147 -0
  22. pltr/services/base.py +104 -1
  23. pltr/services/connectivity.py +139 -0
  24. pltr/services/copy.py +391 -0
  25. pltr/services/dataset.py +77 -4
  26. pltr/services/folder.py +6 -1
  27. pltr/services/functions.py +223 -0
  28. pltr/services/language_models.py +281 -0
  29. pltr/services/mediasets.py +144 -9
  30. pltr/services/models.py +179 -0
  31. pltr/services/ontology.py +48 -1
  32. pltr/services/orchestration.py +133 -1
  33. pltr/services/project.py +213 -39
  34. pltr/services/resource.py +229 -60
  35. pltr/services/space.py +24 -175
  36. pltr/services/sql.py +44 -20
  37. pltr/services/streams.py +290 -0
  38. pltr/services/third_party_applications.py +53 -0
  39. pltr/utils/formatting.py +195 -1
  40. pltr/utils/pagination.py +325 -0
  41. {pltr_cli-0.11.0.dist-info → pltr_cli-0.13.0.dist-info}/METADATA +55 -4
  42. pltr_cli-0.13.0.dist-info/RECORD +70 -0
  43. {pltr_cli-0.11.0.dist-info → pltr_cli-0.13.0.dist-info}/WHEEL +1 -1
  44. pltr_cli-0.11.0.dist-info/RECORD +0 -55
  45. {pltr_cli-0.11.0.dist-info → pltr_cli-0.13.0.dist-info}/entry_points.txt +0 -0
  46. {pltr_cli-0.11.0.dist-info → pltr_cli-0.13.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,290 @@
1
+ """
2
+ Streams service wrapper for Foundry SDK.
3
+ Provides access to streaming dataset and stream operations.
4
+ """
5
+
6
+ from typing import Any, Dict, Optional
7
+ from .base import BaseService
8
+
9
+
10
+ class StreamsService(BaseService):
11
+ """Service wrapper for Foundry Streams operations."""
12
+
13
+ def _get_service(self) -> Any:
14
+ """Get the Foundry Streams service."""
15
+ return self.client.streams
16
+
17
+ # ===== Dataset Operations =====
18
+
19
+ def create_dataset(
20
+ self,
21
+ name: str,
22
+ parent_folder_rid: str,
23
+ schema: Dict[str, Any],
24
+ branch_name: Optional[str] = None,
25
+ compressed: Optional[bool] = None,
26
+ partitions_count: Optional[int] = None,
27
+ stream_type: Optional[str] = None,
28
+ preview: bool = False,
29
+ ) -> Dict[str, Any]:
30
+ """
31
+ Create a streaming dataset with a stream on the specified branch.
32
+
33
+ Args:
34
+ name: Dataset name
35
+ parent_folder_rid: Parent folder RID (e.g., ri.compass.main.folder.xxx)
36
+ schema: Foundry schema for the stream (dict with field definitions)
37
+ branch_name: Branch to create stream on (default: 'master')
38
+ compressed: Enable compression for the stream (default: False)
39
+ partitions_count: Number of partitions (default: 1)
40
+ Generally, each partition can handle ~5 MB/s
41
+ stream_type: Stream type ('HIGH_THROUGHPUT' or 'LOW_LATENCY', default: 'LOW_LATENCY')
42
+ preview: Enable preview mode (default: False)
43
+
44
+ Returns:
45
+ Dataset information dictionary containing:
46
+ - rid: Dataset resource identifier
47
+ - name: Dataset name
48
+ - streamRid: Stream resource identifier
49
+
50
+ Raises:
51
+ RuntimeError: If the operation fails
52
+
53
+ Example:
54
+ >>> service = StreamsService()
55
+ >>> schema = {"fieldSchemaList": [{"name": "value", "type": "STRING"}]}
56
+ >>> dataset = service.create_dataset(
57
+ ... name="my-stream",
58
+ ... parent_folder_rid="ri.compass.main.folder.xxx",
59
+ ... schema=schema
60
+ ... )
61
+ """
62
+ try:
63
+ dataset = self.service.Dataset.create(
64
+ name=name,
65
+ parent_folder_rid=parent_folder_rid,
66
+ schema=schema,
67
+ branch_name=branch_name,
68
+ compressed=compressed,
69
+ partitions_count=partitions_count,
70
+ stream_type=stream_type,
71
+ preview=preview,
72
+ )
73
+ return self._serialize_response(dataset)
74
+ except Exception as e:
75
+ raise RuntimeError(f"Failed to create streaming dataset '{name}': {e}")
76
+
77
+ # ===== Stream Operations =====
78
+
79
+ def create_stream(
80
+ self,
81
+ dataset_rid: str,
82
+ branch_name: str,
83
+ schema: Dict[str, Any],
84
+ compressed: Optional[bool] = None,
85
+ partitions_count: Optional[int] = None,
86
+ stream_type: Optional[str] = None,
87
+ preview: bool = False,
88
+ ) -> Dict[str, Any]:
89
+ """
90
+ Create a new stream on a branch of an existing streaming dataset.
91
+
92
+ Args:
93
+ dataset_rid: Dataset RID (e.g., ri.foundry.main.dataset.xxx)
94
+ branch_name: Branch name to create stream on
95
+ schema: Foundry schema for this stream
96
+ compressed: Enable compression (default: False)
97
+ partitions_count: Number of partitions (default: 1)
98
+ stream_type: Stream type ('HIGH_THROUGHPUT' or 'LOW_LATENCY')
99
+ preview: Enable preview mode (default: False)
100
+
101
+ Returns:
102
+ Stream information dictionary containing:
103
+ - streamRid: Stream resource identifier
104
+ - branchName: Branch name
105
+ - schema: Stream schema
106
+
107
+ Raises:
108
+ RuntimeError: If the operation fails
109
+
110
+ Example:
111
+ >>> service = StreamsService()
112
+ >>> stream = service.create_stream(
113
+ ... dataset_rid="ri.foundry.main.dataset.xxx",
114
+ ... branch_name="feature-branch",
115
+ ... schema={"fieldSchemaList": [{"name": "id", "type": "INTEGER"}]}
116
+ ... )
117
+ """
118
+ try:
119
+ stream = self.service.Dataset.Stream.create(
120
+ dataset_rid=dataset_rid,
121
+ branch_name=branch_name,
122
+ schema=schema,
123
+ compressed=compressed,
124
+ partitions_count=partitions_count,
125
+ stream_type=stream_type,
126
+ preview=preview,
127
+ )
128
+ return self._serialize_response(stream)
129
+ except Exception as e:
130
+ raise RuntimeError(
131
+ f"Failed to create stream on branch '{branch_name}': {e}"
132
+ )
133
+
134
+ def get_stream(
135
+ self, dataset_rid: str, stream_branch_name: str, preview: bool = False
136
+ ) -> Dict[str, Any]:
137
+ """
138
+ Get information about a stream.
139
+
140
+ Args:
141
+ dataset_rid: Dataset RID
142
+ stream_branch_name: Branch name of the stream
143
+ preview: Enable preview mode (default: False)
144
+
145
+ Returns:
146
+ Stream information dictionary
147
+
148
+ Raises:
149
+ RuntimeError: If the operation fails
150
+
151
+ Example:
152
+ >>> service = StreamsService()
153
+ >>> stream = service.get_stream(
154
+ ... dataset_rid="ri.foundry.main.dataset.xxx",
155
+ ... stream_branch_name="master"
156
+ ... )
157
+ """
158
+ try:
159
+ stream = self.service.Dataset.Stream.get(
160
+ dataset_rid=dataset_rid,
161
+ stream_branch_name=stream_branch_name,
162
+ preview=preview,
163
+ )
164
+ return self._serialize_response(stream)
165
+ except Exception as e:
166
+ raise RuntimeError(
167
+ f"Failed to get stream on branch '{stream_branch_name}': {e}"
168
+ )
169
+
170
+ def publish_record(
171
+ self,
172
+ dataset_rid: str,
173
+ stream_branch_name: str,
174
+ record: Dict[str, Any],
175
+ view_rid: Optional[str] = None,
176
+ preview: bool = False,
177
+ ) -> None:
178
+ """
179
+ Publish a single record to a stream.
180
+
181
+ Args:
182
+ dataset_rid: Dataset RID
183
+ stream_branch_name: Branch name of the stream
184
+ record: Record data as dictionary matching stream schema
185
+ view_rid: Optional view RID for partitioning
186
+ preview: Enable preview mode (default: False)
187
+
188
+ Raises:
189
+ RuntimeError: If the operation fails
190
+
191
+ Example:
192
+ >>> service = StreamsService()
193
+ >>> service.publish_record(
194
+ ... dataset_rid="ri.foundry.main.dataset.xxx",
195
+ ... stream_branch_name="master",
196
+ ... record={"id": 123, "name": "test", "timestamp": 1234567890}
197
+ ... )
198
+ """
199
+ try:
200
+ self.service.Dataset.Stream.publish_record(
201
+ dataset_rid=dataset_rid,
202
+ stream_branch_name=stream_branch_name,
203
+ record=record,
204
+ view_rid=view_rid,
205
+ preview=preview,
206
+ )
207
+ except Exception as e:
208
+ raise RuntimeError(f"Failed to publish record to stream: {e}")
209
+
210
+ def publish_records(
211
+ self,
212
+ dataset_rid: str,
213
+ stream_branch_name: str,
214
+ records: list,
215
+ view_rid: Optional[str] = None,
216
+ preview: bool = False,
217
+ ) -> None:
218
+ """
219
+ Publish multiple records to a stream in a batch.
220
+
221
+ Args:
222
+ dataset_rid: Dataset RID
223
+ stream_branch_name: Branch name of the stream
224
+ records: List of record dictionaries matching stream schema
225
+ view_rid: Optional view RID for partitioning
226
+ preview: Enable preview mode (default: False)
227
+
228
+ Raises:
229
+ RuntimeError: If the operation fails
230
+
231
+ Example:
232
+ >>> service = StreamsService()
233
+ >>> records = [
234
+ ... {"id": 1, "name": "alice"},
235
+ ... {"id": 2, "name": "bob"}
236
+ ... ]
237
+ >>> service.publish_records(
238
+ ... dataset_rid="ri.foundry.main.dataset.xxx",
239
+ ... stream_branch_name="master",
240
+ ... records=records
241
+ ... )
242
+ """
243
+ try:
244
+ self.service.Dataset.Stream.publish_records(
245
+ dataset_rid=dataset_rid,
246
+ stream_branch_name=stream_branch_name,
247
+ records=records,
248
+ view_rid=view_rid,
249
+ preview=preview,
250
+ )
251
+ except Exception as e:
252
+ raise RuntimeError(
253
+ f"Failed to publish {len(records)} records to stream: {e}"
254
+ )
255
+
256
+ def reset_stream(
257
+ self, dataset_rid: str, stream_branch_name: str, preview: bool = False
258
+ ) -> Dict[str, Any]:
259
+ """
260
+ Reset a stream, clearing all existing data.
261
+
262
+ Args:
263
+ dataset_rid: Dataset RID
264
+ stream_branch_name: Branch name of the stream to reset
265
+ preview: Enable preview mode (default: False)
266
+
267
+ Returns:
268
+ Updated stream information
269
+
270
+ Raises:
271
+ RuntimeError: If the operation fails
272
+
273
+ Example:
274
+ >>> service = StreamsService()
275
+ >>> stream = service.reset_stream(
276
+ ... dataset_rid="ri.foundry.main.dataset.xxx",
277
+ ... stream_branch_name="master"
278
+ ... )
279
+ """
280
+ try:
281
+ stream = self.service.Dataset.Stream.reset(
282
+ dataset_rid=dataset_rid,
283
+ stream_branch_name=stream_branch_name,
284
+ preview=preview,
285
+ )
286
+ return self._serialize_response(stream)
287
+ except Exception as e:
288
+ raise RuntimeError(
289
+ f"Failed to reset stream on branch '{stream_branch_name}': {e}"
290
+ )
@@ -0,0 +1,53 @@
1
+ """
2
+ Third-party applications service wrapper for Foundry SDK.
3
+ Provides access to third-party application management operations.
4
+ """
5
+
6
+ from typing import Any, Dict
7
+
8
+ from .base import BaseService
9
+
10
+
11
+ class ThirdPartyApplicationsService(BaseService):
12
+ """Service wrapper for Foundry third-party applications operations."""
13
+
14
+ def _get_service(self) -> Any:
15
+ """Get the Foundry third-party applications service."""
16
+ return self.client.third_party_applications.ThirdPartyApplication
17
+
18
+ def get_application(
19
+ self, application_rid: str, preview: bool = False
20
+ ) -> Dict[str, Any]:
21
+ """
22
+ Get information about a specific third-party application.
23
+
24
+ Args:
25
+ application_rid: Third-party application Resource Identifier
26
+ Expected format: ri.third-party-applications.<realm>.third-party-application.<locator>
27
+ Example: ri.third-party-applications.main.third-party-application.my-app-123
28
+ preview: Enable preview mode (default: False)
29
+
30
+ Returns:
31
+ Third-party application information dictionary containing:
32
+ - rid: Application resource identifier
33
+ - name: Application name
34
+ - description: Application description (if available)
35
+ - status: Application status (if available)
36
+
37
+ Raises:
38
+ RuntimeError: If the operation fails
39
+
40
+ Example:
41
+ >>> service = ThirdPartyApplicationsService()
42
+ >>> app = service.get_application(
43
+ ... "ri.third-party-applications.main.third-party-application.my-app"
44
+ ... )
45
+ >>> print(app['name'])
46
+ """
47
+ try:
48
+ application = self.service.get(application_rid, preview=preview)
49
+ return self._serialize_response(application)
50
+ except Exception as e:
51
+ raise RuntimeError(
52
+ f"Failed to get third-party application {application_rid}: {e}"
53
+ )
pltr/utils/formatting.py CHANGED
@@ -4,7 +4,7 @@ Output formatting utilities for CLI commands.
4
4
 
5
5
  import json
6
6
  import csv
7
- from typing import Any, Dict, List, Optional, Union
7
+ from typing import Any, Dict, List, Optional, Union, Callable
8
8
  from datetime import datetime
9
9
  from io import StringIO
10
10
 
@@ -12,6 +12,12 @@ from rich.console import Console
12
12
  from rich.table import Table
13
13
  from rich import print as rich_print
14
14
 
15
+ # Type checking import to avoid circular dependencies
16
+ from typing import TYPE_CHECKING
17
+
18
+ if TYPE_CHECKING:
19
+ pass
20
+
15
21
 
16
22
  class OutputFormatter:
17
23
  """Handles different output formats for CLI commands."""
@@ -460,6 +466,118 @@ class OutputFormatter:
460
466
  else:
461
467
  f.write(str(data))
462
468
 
469
+ def format_paginated_output(
470
+ self,
471
+ result: Any, # PaginationResult
472
+ format_type: str = "table",
473
+ output_file: Optional[str] = None,
474
+ formatter_fn: Optional[Callable] = None,
475
+ ) -> Optional[str]:
476
+ """
477
+ Format paginated results with metadata.
478
+
479
+ This method handles display of paginated data and automatically
480
+ includes pagination information based on the output format.
481
+
482
+ Args:
483
+ result: PaginationResult object with .data and .metadata attributes
484
+ format_type: Output format ('table', 'json', 'csv')
485
+ output_file: Optional output file path
486
+ formatter_fn: Optional custom formatter function for the data
487
+
488
+ Returns:
489
+ Formatted string if no output file specified
490
+
491
+ Example:
492
+ >>> result = PaginationResult(data=[...], metadata=metadata)
493
+ >>> formatter.format_paginated_output(result, "json")
494
+ """
495
+ # Extract data and metadata
496
+ data = result.data if hasattr(result, "data") else result
497
+ metadata = result.metadata if hasattr(result, "metadata") else None
498
+
499
+ # JSON format: include pagination metadata in output
500
+ if format_type == "json":
501
+ if metadata:
502
+ output_data = {
503
+ "data": data,
504
+ "pagination": {
505
+ "page": metadata.current_page,
506
+ "items_count": metadata.items_fetched,
507
+ "has_more": metadata.has_more,
508
+ "total_pages_fetched": metadata.total_pages_fetched,
509
+ },
510
+ }
511
+ # Include next_page_token if available
512
+ if metadata.next_page_token:
513
+ output_data["pagination"]["next_page_token"] = (
514
+ metadata.next_page_token
515
+ )
516
+
517
+ return self._format_json(output_data, output_file)
518
+ else:
519
+ # No metadata, format data directly
520
+ return self._format_json(data, output_file)
521
+
522
+ # Table/CSV format: format data normally, then print pagination info
523
+ else:
524
+ # Format the data using custom formatter or default
525
+ if formatter_fn:
526
+ formatted_result = formatter_fn(data, format_type, output_file)
527
+ else:
528
+ formatted_result = self.format_output(data, format_type, output_file)
529
+
530
+ # Print pagination info to console (even when saving to file)
531
+ # For CSV/table formats, pagination metadata is shown on console
532
+ # while data is written to file
533
+ if metadata:
534
+ self.print_pagination_info(metadata)
535
+
536
+ return formatted_result
537
+
538
+ def print_pagination_info(self, metadata: Any) -> None: # PaginationMetadata
539
+ """
540
+ Print pagination information to the console.
541
+
542
+ This provides users with helpful information about the current
543
+ pagination state and how to fetch more data.
544
+
545
+ Args:
546
+ metadata: PaginationMetadata object
547
+
548
+ Example output:
549
+ Fetched 20 items (page 1)
550
+ Next page: --page-token abc123
551
+ Fetch all: Add --all flag
552
+ """
553
+ if not metadata:
554
+ return
555
+
556
+ # Build info message
557
+ info_lines = []
558
+
559
+ # Current state
560
+ info_lines.append(
561
+ f"Fetched {metadata.items_fetched} items (page {metadata.current_page})"
562
+ )
563
+
564
+ # Next steps if more data available
565
+ if metadata.has_more:
566
+ if metadata.next_page_token:
567
+ info_lines.append(f"Next page: --page-token {metadata.next_page_token}")
568
+ else:
569
+ # Iterator pattern without explicit token
570
+ info_lines.append(
571
+ f"Next page: Use --max-pages {metadata.current_page + 1}"
572
+ )
573
+
574
+ info_lines.append("Fetch all: Add --all flag")
575
+ else:
576
+ info_lines.append("No more pages available")
577
+
578
+ # Print as info message
579
+ self.print_info("\n".join(info_lines))
580
+
463
581
  def format_sql_results(
464
582
  self,
465
583
  results: Any,
@@ -826,6 +944,38 @@ class OutputFormatter:
826
944
 
827
945
  return self.format_output(formatted_schedules, format_type, output_file)
828
946
 
947
+ def format_schedule_runs_list(
948
+ self,
949
+ runs: List[Dict[str, Any]],
950
+ format_type: str = "table",
951
+ output_file: Optional[str] = None,
952
+ ) -> Optional[str]:
953
+ """
954
+ Format list of schedule runs.
955
+
956
+ Args:
957
+ runs: List of schedule run dictionaries
958
+ format_type: Output format
959
+ output_file: Optional output file path
960
+
961
+ Returns:
962
+ Formatted string if no output file specified
963
+ """
964
+ formatted_runs = []
965
+ for run in runs:
966
+ build_rid = run.get("build_rid", "")
967
+ formatted_run = {
968
+ "RID": run.get("rid", ""),
969
+ "Status": run.get("status", ""),
970
+ "Started": self._format_datetime(run.get("started_time")),
971
+ "Finished": self._format_datetime(run.get("finished_time")),
972
+ "Build": build_rid[:40] + "..." if len(build_rid) > 40 else build_rid,
973
+ "Result": run.get("result", ""),
974
+ }
975
+ formatted_runs.append(formatted_run)
976
+
977
+ return self.format_output(formatted_runs, format_type, output_file)
978
+
829
979
  # MediaSets formatting methods
830
980
 
831
981
  def format_media_item_info(
@@ -953,6 +1103,50 @@ class OutputFormatter:
953
1103
  else:
954
1104
  return self.format_output(reference, format_type, output_file)
955
1105
 
1106
+ def format_thumbnail_status(
1107
+ self,
1108
+ status: Dict[str, Any],
1109
+ format_type: str = "table",
1110
+ output_file: Optional[str] = None,
1111
+ ) -> Optional[str]:
1112
+ """
1113
+ Format thumbnail calculation status for display.
1114
+
1115
+ Args:
1116
+ status: Thumbnail status dictionary
1117
+ format_type: Output format
1118
+ output_file: Optional output file path
1119
+
1120
+ Returns:
1121
+ Formatted string if no output file specified
1122
+ """
1123
+ if format_type == "table":
1124
+ details = []
1125
+
1126
+ property_order = [
1127
+ ("status", "Status"),
1128
+ ("transformation_id", "Transformation ID"),
1129
+ ("media_item_rid", "Media Item RID"),
1130
+ ]
1131
+
1132
+ for key, label in property_order:
1133
+ if status.get(key) is not None:
1134
+ details.append({"Property": label, "Value": str(status[key])})
1135
+
1136
+ # Add any remaining properties
1137
+ for key, value in status.items():
1138
+ if (
1139
+ key not in [prop[0] for prop in property_order]
1140
+ and value is not None
1141
+ ):
1142
+ details.append(
1143
+ {"Property": key.replace("_", " ").title(), "Value": str(value)}
1144
+ )
1145
+
1146
+ return self.format_output(details, format_type, output_file)
1147
+ else:
1148
+ return self.format_output(status, format_type, output_file)
1149
+
956
1150
  # Dataset formatting methods
957
1151
 
958
1152
  def format_branches(