pltr-cli 0.12.0__py3-none-any.whl → 0.13.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pltr/__init__.py +1 -1
- pltr/cli.py +24 -0
- pltr/commands/admin.py +12 -2
- pltr/commands/functions.py +503 -0
- pltr/commands/language_models.py +515 -0
- pltr/commands/models.py +362 -0
- pltr/commands/project.py +21 -61
- pltr/commands/resource.py +0 -53
- pltr/commands/space.py +25 -303
- pltr/commands/streams.py +616 -0
- pltr/services/admin.py +15 -4
- pltr/services/dataset.py +2 -3
- pltr/services/folder.py +6 -1
- pltr/services/functions.py +223 -0
- pltr/services/language_models.py +281 -0
- pltr/services/models.py +179 -0
- pltr/services/project.py +87 -49
- pltr/services/resource.py +14 -72
- pltr/services/space.py +24 -175
- pltr/services/streams.py +290 -0
- {pltr_cli-0.12.0.dist-info → pltr_cli-0.13.0.dist-info}/METADATA +51 -2
- {pltr_cli-0.12.0.dist-info → pltr_cli-0.13.0.dist-info}/RECORD +25 -17
- {pltr_cli-0.12.0.dist-info → pltr_cli-0.13.0.dist-info}/WHEEL +0 -0
- {pltr_cli-0.12.0.dist-info → pltr_cli-0.13.0.dist-info}/entry_points.txt +0 -0
- {pltr_cli-0.12.0.dist-info → pltr_cli-0.13.0.dist-info}/licenses/LICENSE +0 -0
pltr/commands/streams.py
ADDED
|
@@ -0,0 +1,616 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Streams management commands for Foundry.
|
|
3
|
+
Provides commands for managing streaming datasets and publishing records.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import typer
|
|
7
|
+
import json
|
|
8
|
+
from typing import Optional
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from rich.console import Console
|
|
11
|
+
|
|
12
|
+
from ..services.streams import StreamsService
|
|
13
|
+
from ..utils.formatting import OutputFormatter
|
|
14
|
+
from ..utils.progress import SpinnerProgressTracker
|
|
15
|
+
from ..auth.base import ProfileNotFoundError, MissingCredentialsError
|
|
16
|
+
from ..utils.completion import (
|
|
17
|
+
complete_rid,
|
|
18
|
+
complete_profile,
|
|
19
|
+
complete_output_format,
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
# Create main app and sub-apps
|
|
23
|
+
app = typer.Typer(help="Manage streaming datasets and streams")
|
|
24
|
+
dataset_app = typer.Typer(help="Manage streaming datasets")
|
|
25
|
+
stream_app = typer.Typer(help="Manage streams and publish records")
|
|
26
|
+
|
|
27
|
+
# Add sub-apps
|
|
28
|
+
app.add_typer(dataset_app, name="dataset")
|
|
29
|
+
app.add_typer(stream_app, name="stream")
|
|
30
|
+
|
|
31
|
+
console = Console()
|
|
32
|
+
formatter = OutputFormatter(console)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def parse_json_or_file(data_str: Optional[str]) -> Optional[dict]:
|
|
36
|
+
"""
|
|
37
|
+
Parse JSON from string or file.
|
|
38
|
+
|
|
39
|
+
Supports:
|
|
40
|
+
- Inline JSON: '{"key": "value"}'
|
|
41
|
+
- File reference: @data.json
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
data_str: JSON string or file reference
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
Parsed dictionary or None
|
|
48
|
+
|
|
49
|
+
Raises:
|
|
50
|
+
FileNotFoundError: If file reference doesn't exist
|
|
51
|
+
json.JSONDecodeError: If JSON is invalid
|
|
52
|
+
"""
|
|
53
|
+
if not data_str:
|
|
54
|
+
return None
|
|
55
|
+
|
|
56
|
+
# Handle file reference
|
|
57
|
+
if data_str.startswith("@"):
|
|
58
|
+
file_path = Path(data_str[1:])
|
|
59
|
+
if not file_path.exists():
|
|
60
|
+
raise FileNotFoundError(f"File not found: {file_path}")
|
|
61
|
+
|
|
62
|
+
with open(file_path, "r") as f:
|
|
63
|
+
return json.load(f)
|
|
64
|
+
|
|
65
|
+
# Handle inline JSON
|
|
66
|
+
return json.loads(data_str)
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
@dataset_app.command("create")
|
|
70
|
+
def create_dataset(
|
|
71
|
+
name: str = typer.Argument(
|
|
72
|
+
...,
|
|
73
|
+
help="Dataset name",
|
|
74
|
+
),
|
|
75
|
+
parent_folder_rid: str = typer.Option(
|
|
76
|
+
...,
|
|
77
|
+
"--folder",
|
|
78
|
+
"-f",
|
|
79
|
+
help="Parent folder RID (e.g., ri.compass.main.folder.xxx)",
|
|
80
|
+
autocompletion=complete_rid,
|
|
81
|
+
),
|
|
82
|
+
schema: str = typer.Option(
|
|
83
|
+
...,
|
|
84
|
+
"--schema",
|
|
85
|
+
"-s",
|
|
86
|
+
help="Stream schema as JSON or @file.json. Format: {'fieldSchemaList': [{'name': 'field', 'type': 'STRING'}]}",
|
|
87
|
+
),
|
|
88
|
+
branch: Optional[str] = typer.Option(
|
|
89
|
+
None,
|
|
90
|
+
"--branch",
|
|
91
|
+
"-b",
|
|
92
|
+
help="Branch name (default: master)",
|
|
93
|
+
),
|
|
94
|
+
compressed: Optional[bool] = typer.Option(
|
|
95
|
+
None,
|
|
96
|
+
"--compressed",
|
|
97
|
+
help="Enable compression",
|
|
98
|
+
),
|
|
99
|
+
partitions: Optional[int] = typer.Option(
|
|
100
|
+
None,
|
|
101
|
+
"--partitions",
|
|
102
|
+
help="Number of partitions (default: 1). Each partition handles ~5 MB/s.",
|
|
103
|
+
),
|
|
104
|
+
stream_type: Optional[str] = typer.Option(
|
|
105
|
+
None,
|
|
106
|
+
"--type",
|
|
107
|
+
help="Stream type: HIGH_THROUGHPUT or LOW_LATENCY (default: LOW_LATENCY)",
|
|
108
|
+
),
|
|
109
|
+
profile: Optional[str] = typer.Option(
|
|
110
|
+
None,
|
|
111
|
+
"--profile",
|
|
112
|
+
"-p",
|
|
113
|
+
help="Profile name",
|
|
114
|
+
autocompletion=complete_profile,
|
|
115
|
+
),
|
|
116
|
+
format: str = typer.Option(
|
|
117
|
+
"table",
|
|
118
|
+
"--format",
|
|
119
|
+
help="Output format (table, json, csv)",
|
|
120
|
+
autocompletion=complete_output_format,
|
|
121
|
+
),
|
|
122
|
+
preview: bool = typer.Option(
|
|
123
|
+
False,
|
|
124
|
+
"--preview",
|
|
125
|
+
help="Enable preview mode",
|
|
126
|
+
),
|
|
127
|
+
):
|
|
128
|
+
"""
|
|
129
|
+
Create a new streaming dataset with an initial stream.
|
|
130
|
+
|
|
131
|
+
The schema defines the structure of records in the stream.
|
|
132
|
+
Each field must have a 'name' and 'type' (STRING, INTEGER, DOUBLE, BOOLEAN, etc.).
|
|
133
|
+
|
|
134
|
+
Examples:
|
|
135
|
+
|
|
136
|
+
# Create basic streaming dataset
|
|
137
|
+
pltr streams dataset create my-stream \\
|
|
138
|
+
--folder ri.compass.main.folder.xxx \\
|
|
139
|
+
--schema '{"fieldSchemaList": [{"name": "value", "type": "STRING"}]}'
|
|
140
|
+
|
|
141
|
+
# Create from schema file
|
|
142
|
+
pltr streams dataset create sensor-data \\
|
|
143
|
+
--folder ri.compass.main.folder.xxx \\
|
|
144
|
+
--schema @schema.json \\
|
|
145
|
+
--partitions 5 \\
|
|
146
|
+
--type HIGH_THROUGHPUT
|
|
147
|
+
|
|
148
|
+
# With specific branch
|
|
149
|
+
pltr streams dataset create my-stream \\
|
|
150
|
+
--folder ri.compass.main.folder.xxx \\
|
|
151
|
+
--schema @schema.json \\
|
|
152
|
+
--branch develop
|
|
153
|
+
"""
|
|
154
|
+
try:
|
|
155
|
+
# Parse schema
|
|
156
|
+
schema_dict = parse_json_or_file(schema)
|
|
157
|
+
if schema_dict is None:
|
|
158
|
+
console.print("[red]Error: Schema is required[/red]")
|
|
159
|
+
raise typer.Exit(1)
|
|
160
|
+
|
|
161
|
+
with SpinnerProgressTracker().track_spinner("Creating streaming dataset"):
|
|
162
|
+
service = StreamsService(profile=profile)
|
|
163
|
+
result = service.create_dataset(
|
|
164
|
+
name=name,
|
|
165
|
+
parent_folder_rid=parent_folder_rid,
|
|
166
|
+
schema=schema_dict,
|
|
167
|
+
branch_name=branch,
|
|
168
|
+
compressed=compressed,
|
|
169
|
+
partitions_count=partitions,
|
|
170
|
+
stream_type=stream_type,
|
|
171
|
+
preview=preview,
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
console.print(
|
|
175
|
+
f"[green]✓[/green] Created streaming dataset: {result.get('name')}"
|
|
176
|
+
)
|
|
177
|
+
console.print(f" Dataset RID: {result.get('rid')}")
|
|
178
|
+
console.print(f" Stream RID: {result.get('streamRid')}")
|
|
179
|
+
|
|
180
|
+
formatter.format_output(result, format)
|
|
181
|
+
|
|
182
|
+
except (ProfileNotFoundError, MissingCredentialsError) as e:
|
|
183
|
+
console.print(f"[red]Authentication Error: {e}[/red]")
|
|
184
|
+
raise typer.Exit(1)
|
|
185
|
+
except (FileNotFoundError, json.JSONDecodeError) as e:
|
|
186
|
+
console.print(f"[red]Error parsing schema: {e}[/red]")
|
|
187
|
+
raise typer.Exit(1)
|
|
188
|
+
except Exception as e:
|
|
189
|
+
console.print(f"[red]Error: {e}[/red]")
|
|
190
|
+
raise typer.Exit(1)
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
@stream_app.command("create")
|
|
194
|
+
def create_stream(
|
|
195
|
+
dataset_rid: str = typer.Argument(
|
|
196
|
+
...,
|
|
197
|
+
help="Dataset RID (e.g., ri.foundry.main.dataset.xxx)",
|
|
198
|
+
autocompletion=complete_rid,
|
|
199
|
+
),
|
|
200
|
+
branch: str = typer.Option(
|
|
201
|
+
...,
|
|
202
|
+
"--branch",
|
|
203
|
+
"-b",
|
|
204
|
+
help="Branch name to create stream on",
|
|
205
|
+
),
|
|
206
|
+
schema: str = typer.Option(
|
|
207
|
+
...,
|
|
208
|
+
"--schema",
|
|
209
|
+
"-s",
|
|
210
|
+
help="Stream schema as JSON or @file.json",
|
|
211
|
+
),
|
|
212
|
+
compressed: Optional[bool] = typer.Option(
|
|
213
|
+
None,
|
|
214
|
+
"--compressed",
|
|
215
|
+
help="Enable compression",
|
|
216
|
+
),
|
|
217
|
+
partitions: Optional[int] = typer.Option(
|
|
218
|
+
None,
|
|
219
|
+
"--partitions",
|
|
220
|
+
help="Number of partitions (default: 1)",
|
|
221
|
+
),
|
|
222
|
+
stream_type: Optional[str] = typer.Option(
|
|
223
|
+
None,
|
|
224
|
+
"--type",
|
|
225
|
+
help="Stream type: HIGH_THROUGHPUT or LOW_LATENCY",
|
|
226
|
+
),
|
|
227
|
+
profile: Optional[str] = typer.Option(
|
|
228
|
+
None,
|
|
229
|
+
"--profile",
|
|
230
|
+
"-p",
|
|
231
|
+
help="Profile name",
|
|
232
|
+
autocompletion=complete_profile,
|
|
233
|
+
),
|
|
234
|
+
format: str = typer.Option(
|
|
235
|
+
"table",
|
|
236
|
+
"--format",
|
|
237
|
+
help="Output format (table, json, csv)",
|
|
238
|
+
autocompletion=complete_output_format,
|
|
239
|
+
),
|
|
240
|
+
preview: bool = typer.Option(
|
|
241
|
+
False,
|
|
242
|
+
"--preview",
|
|
243
|
+
help="Enable preview mode",
|
|
244
|
+
),
|
|
245
|
+
):
|
|
246
|
+
"""
|
|
247
|
+
Create a new stream on a branch of an existing streaming dataset.
|
|
248
|
+
|
|
249
|
+
Creates a new branch and stream in one operation.
|
|
250
|
+
|
|
251
|
+
Examples:
|
|
252
|
+
|
|
253
|
+
# Create stream on new branch
|
|
254
|
+
pltr streams stream create ri.foundry.main.dataset.xxx \\
|
|
255
|
+
--branch feature-branch \\
|
|
256
|
+
--schema '{"fieldSchemaList": [{"name": "id", "type": "INTEGER"}]}'
|
|
257
|
+
|
|
258
|
+
# High-throughput stream
|
|
259
|
+
pltr streams stream create ri.foundry.main.dataset.xxx \\
|
|
260
|
+
--branch production \\
|
|
261
|
+
--schema @schema.json \\
|
|
262
|
+
--partitions 10 \\
|
|
263
|
+
--type HIGH_THROUGHPUT
|
|
264
|
+
"""
|
|
265
|
+
try:
|
|
266
|
+
# Parse schema
|
|
267
|
+
schema_dict = parse_json_or_file(schema)
|
|
268
|
+
if schema_dict is None:
|
|
269
|
+
console.print("[red]Error: Schema is required[/red]")
|
|
270
|
+
raise typer.Exit(1)
|
|
271
|
+
|
|
272
|
+
with SpinnerProgressTracker().track_spinner("Creating stream"):
|
|
273
|
+
service = StreamsService(profile=profile)
|
|
274
|
+
result = service.create_stream(
|
|
275
|
+
dataset_rid=dataset_rid,
|
|
276
|
+
branch_name=branch,
|
|
277
|
+
schema=schema_dict,
|
|
278
|
+
compressed=compressed,
|
|
279
|
+
partitions_count=partitions,
|
|
280
|
+
stream_type=stream_type,
|
|
281
|
+
preview=preview,
|
|
282
|
+
)
|
|
283
|
+
|
|
284
|
+
console.print(f"[green]✓[/green] Created stream on branch: {branch}")
|
|
285
|
+
console.print(f" Stream RID: {result.get('streamRid')}")
|
|
286
|
+
|
|
287
|
+
formatter.format_output(result, format)
|
|
288
|
+
|
|
289
|
+
except (ProfileNotFoundError, MissingCredentialsError) as e:
|
|
290
|
+
console.print(f"[red]Authentication Error: {e}[/red]")
|
|
291
|
+
raise typer.Exit(1)
|
|
292
|
+
except (FileNotFoundError, json.JSONDecodeError) as e:
|
|
293
|
+
console.print(f"[red]Error parsing schema: {e}[/red]")
|
|
294
|
+
raise typer.Exit(1)
|
|
295
|
+
except Exception as e:
|
|
296
|
+
console.print(f"[red]Error: {e}[/red]")
|
|
297
|
+
raise typer.Exit(1)
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
@stream_app.command("get")
|
|
301
|
+
def get_stream(
|
|
302
|
+
dataset_rid: str = typer.Argument(
|
|
303
|
+
...,
|
|
304
|
+
help="Dataset RID",
|
|
305
|
+
autocompletion=complete_rid,
|
|
306
|
+
),
|
|
307
|
+
branch: str = typer.Option(
|
|
308
|
+
...,
|
|
309
|
+
"--branch",
|
|
310
|
+
"-b",
|
|
311
|
+
help="Stream branch name",
|
|
312
|
+
),
|
|
313
|
+
profile: Optional[str] = typer.Option(
|
|
314
|
+
None,
|
|
315
|
+
"--profile",
|
|
316
|
+
"-p",
|
|
317
|
+
help="Profile name",
|
|
318
|
+
autocompletion=complete_profile,
|
|
319
|
+
),
|
|
320
|
+
format: str = typer.Option(
|
|
321
|
+
"table",
|
|
322
|
+
"--format",
|
|
323
|
+
help="Output format (table, json, csv)",
|
|
324
|
+
autocompletion=complete_output_format,
|
|
325
|
+
),
|
|
326
|
+
preview: bool = typer.Option(
|
|
327
|
+
False,
|
|
328
|
+
"--preview",
|
|
329
|
+
help="Enable preview mode",
|
|
330
|
+
),
|
|
331
|
+
):
|
|
332
|
+
"""
|
|
333
|
+
Get information about a stream.
|
|
334
|
+
|
|
335
|
+
Retrieves stream metadata including schema and configuration.
|
|
336
|
+
|
|
337
|
+
Examples:
|
|
338
|
+
|
|
339
|
+
# Get stream on master branch
|
|
340
|
+
pltr streams stream get ri.foundry.main.dataset.xxx --branch master
|
|
341
|
+
|
|
342
|
+
# Get stream as JSON
|
|
343
|
+
pltr streams stream get ri.foundry.main.dataset.xxx \\
|
|
344
|
+
--branch feature-branch \\
|
|
345
|
+
--format json
|
|
346
|
+
"""
|
|
347
|
+
try:
|
|
348
|
+
with SpinnerProgressTracker().track_spinner("Fetching stream information"):
|
|
349
|
+
service = StreamsService(profile=profile)
|
|
350
|
+
result = service.get_stream(
|
|
351
|
+
dataset_rid=dataset_rid,
|
|
352
|
+
stream_branch_name=branch,
|
|
353
|
+
preview=preview,
|
|
354
|
+
)
|
|
355
|
+
|
|
356
|
+
formatter.format_output(result, format)
|
|
357
|
+
|
|
358
|
+
except (ProfileNotFoundError, MissingCredentialsError) as e:
|
|
359
|
+
console.print(f"[red]Authentication Error: {e}[/red]")
|
|
360
|
+
raise typer.Exit(1)
|
|
361
|
+
except Exception as e:
|
|
362
|
+
console.print(f"[red]Error: {e}[/red]")
|
|
363
|
+
raise typer.Exit(1)
|
|
364
|
+
|
|
365
|
+
|
|
366
|
+
@stream_app.command("publish")
|
|
367
|
+
def publish_record(
|
|
368
|
+
dataset_rid: str = typer.Argument(
|
|
369
|
+
...,
|
|
370
|
+
help="Dataset RID",
|
|
371
|
+
autocompletion=complete_rid,
|
|
372
|
+
),
|
|
373
|
+
branch: str = typer.Option(
|
|
374
|
+
...,
|
|
375
|
+
"--branch",
|
|
376
|
+
"-b",
|
|
377
|
+
help="Stream branch name",
|
|
378
|
+
),
|
|
379
|
+
record: str = typer.Option(
|
|
380
|
+
...,
|
|
381
|
+
"--record",
|
|
382
|
+
"-r",
|
|
383
|
+
help="Record data as JSON or @file.json",
|
|
384
|
+
),
|
|
385
|
+
view_rid: Optional[str] = typer.Option(
|
|
386
|
+
None,
|
|
387
|
+
"--view",
|
|
388
|
+
help="View RID for partitioning",
|
|
389
|
+
autocompletion=complete_rid,
|
|
390
|
+
),
|
|
391
|
+
profile: Optional[str] = typer.Option(
|
|
392
|
+
None,
|
|
393
|
+
"--profile",
|
|
394
|
+
"-p",
|
|
395
|
+
help="Profile name",
|
|
396
|
+
autocompletion=complete_profile,
|
|
397
|
+
),
|
|
398
|
+
preview: bool = typer.Option(
|
|
399
|
+
False,
|
|
400
|
+
"--preview",
|
|
401
|
+
help="Enable preview mode",
|
|
402
|
+
),
|
|
403
|
+
):
|
|
404
|
+
"""
|
|
405
|
+
Publish a single record to a stream.
|
|
406
|
+
|
|
407
|
+
The record must match the stream's schema.
|
|
408
|
+
|
|
409
|
+
Examples:
|
|
410
|
+
|
|
411
|
+
# Publish inline record
|
|
412
|
+
pltr streams stream publish ri.foundry.main.dataset.xxx \\
|
|
413
|
+
--branch master \\
|
|
414
|
+
--record '{"id": 123, "name": "test", "timestamp": 1234567890}'
|
|
415
|
+
|
|
416
|
+
# Publish from file
|
|
417
|
+
pltr streams stream publish ri.foundry.main.dataset.xxx \\
|
|
418
|
+
--branch master \\
|
|
419
|
+
--record @record.json
|
|
420
|
+
"""
|
|
421
|
+
try:
|
|
422
|
+
# Parse record
|
|
423
|
+
record_dict = parse_json_or_file(record)
|
|
424
|
+
if record_dict is None:
|
|
425
|
+
console.print("[red]Error: Record is required[/red]")
|
|
426
|
+
raise typer.Exit(1)
|
|
427
|
+
|
|
428
|
+
with SpinnerProgressTracker().track_spinner("Publishing record"):
|
|
429
|
+
service = StreamsService(profile=profile)
|
|
430
|
+
service.publish_record(
|
|
431
|
+
dataset_rid=dataset_rid,
|
|
432
|
+
stream_branch_name=branch,
|
|
433
|
+
record=record_dict,
|
|
434
|
+
view_rid=view_rid,
|
|
435
|
+
preview=preview,
|
|
436
|
+
)
|
|
437
|
+
|
|
438
|
+
console.print("[green]✓[/green] Record published successfully")
|
|
439
|
+
|
|
440
|
+
except (ProfileNotFoundError, MissingCredentialsError) as e:
|
|
441
|
+
console.print(f"[red]Authentication Error: {e}[/red]")
|
|
442
|
+
raise typer.Exit(1)
|
|
443
|
+
except (FileNotFoundError, json.JSONDecodeError) as e:
|
|
444
|
+
console.print(f"[red]Error parsing record: {e}[/red]")
|
|
445
|
+
raise typer.Exit(1)
|
|
446
|
+
except Exception as e:
|
|
447
|
+
console.print(f"[red]Error: {e}[/red]")
|
|
448
|
+
raise typer.Exit(1)
|
|
449
|
+
|
|
450
|
+
|
|
451
|
+
@stream_app.command("publish-batch")
|
|
452
|
+
def publish_records(
|
|
453
|
+
dataset_rid: str = typer.Argument(
|
|
454
|
+
...,
|
|
455
|
+
help="Dataset RID",
|
|
456
|
+
autocompletion=complete_rid,
|
|
457
|
+
),
|
|
458
|
+
branch: str = typer.Option(
|
|
459
|
+
...,
|
|
460
|
+
"--branch",
|
|
461
|
+
"-b",
|
|
462
|
+
help="Stream branch name",
|
|
463
|
+
),
|
|
464
|
+
records: str = typer.Option(
|
|
465
|
+
...,
|
|
466
|
+
"--records",
|
|
467
|
+
"-r",
|
|
468
|
+
help="Records as JSON array or @file.json",
|
|
469
|
+
),
|
|
470
|
+
view_rid: Optional[str] = typer.Option(
|
|
471
|
+
None,
|
|
472
|
+
"--view",
|
|
473
|
+
help="View RID for partitioning",
|
|
474
|
+
autocompletion=complete_rid,
|
|
475
|
+
),
|
|
476
|
+
profile: Optional[str] = typer.Option(
|
|
477
|
+
None,
|
|
478
|
+
"--profile",
|
|
479
|
+
"-p",
|
|
480
|
+
help="Profile name",
|
|
481
|
+
autocompletion=complete_profile,
|
|
482
|
+
),
|
|
483
|
+
preview: bool = typer.Option(
|
|
484
|
+
False,
|
|
485
|
+
"--preview",
|
|
486
|
+
help="Enable preview mode",
|
|
487
|
+
),
|
|
488
|
+
):
|
|
489
|
+
"""
|
|
490
|
+
Publish multiple records to a stream in a batch.
|
|
491
|
+
|
|
492
|
+
More efficient than publishing records individually.
|
|
493
|
+
|
|
494
|
+
Examples:
|
|
495
|
+
|
|
496
|
+
# Publish multiple records inline
|
|
497
|
+
pltr streams stream publish-batch ri.foundry.main.dataset.xxx \\
|
|
498
|
+
--branch master \\
|
|
499
|
+
--records '[{"id": 1, "name": "alice"}, {"id": 2, "name": "bob"}]'
|
|
500
|
+
|
|
501
|
+
# Publish from file
|
|
502
|
+
pltr streams stream publish-batch ri.foundry.main.dataset.xxx \\
|
|
503
|
+
--branch master \\
|
|
504
|
+
--records @records.json
|
|
505
|
+
"""
|
|
506
|
+
try:
|
|
507
|
+
# Parse records
|
|
508
|
+
records_list = parse_json_or_file(records)
|
|
509
|
+
if not records_list or not isinstance(records_list, list):
|
|
510
|
+
console.print("[red]Error: Records must be a JSON array[/red]")
|
|
511
|
+
raise typer.Exit(1)
|
|
512
|
+
|
|
513
|
+
with SpinnerProgressTracker().track_spinner("Publishing records"):
|
|
514
|
+
service = StreamsService(profile=profile)
|
|
515
|
+
service.publish_records(
|
|
516
|
+
dataset_rid=dataset_rid,
|
|
517
|
+
stream_branch_name=branch,
|
|
518
|
+
records=records_list,
|
|
519
|
+
view_rid=view_rid,
|
|
520
|
+
preview=preview,
|
|
521
|
+
)
|
|
522
|
+
|
|
523
|
+
console.print(
|
|
524
|
+
f"[green]✓[/green] Published {len(records_list)} records successfully"
|
|
525
|
+
)
|
|
526
|
+
|
|
527
|
+
except (ProfileNotFoundError, MissingCredentialsError) as e:
|
|
528
|
+
console.print(f"[red]Authentication Error: {e}[/red]")
|
|
529
|
+
raise typer.Exit(1)
|
|
530
|
+
except (FileNotFoundError, json.JSONDecodeError) as e:
|
|
531
|
+
console.print(f"[red]Error parsing records: {e}[/red]")
|
|
532
|
+
raise typer.Exit(1)
|
|
533
|
+
except Exception as e:
|
|
534
|
+
console.print(f"[red]Error: {e}[/red]")
|
|
535
|
+
raise typer.Exit(1)
|
|
536
|
+
|
|
537
|
+
|
|
538
|
+
@stream_app.command("reset")
|
|
539
|
+
def reset_stream(
|
|
540
|
+
dataset_rid: str = typer.Argument(
|
|
541
|
+
...,
|
|
542
|
+
help="Dataset RID",
|
|
543
|
+
autocompletion=complete_rid,
|
|
544
|
+
),
|
|
545
|
+
branch: str = typer.Option(
|
|
546
|
+
...,
|
|
547
|
+
"--branch",
|
|
548
|
+
"-b",
|
|
549
|
+
help="Stream branch name to reset",
|
|
550
|
+
),
|
|
551
|
+
profile: Optional[str] = typer.Option(
|
|
552
|
+
None,
|
|
553
|
+
"--profile",
|
|
554
|
+
"-p",
|
|
555
|
+
help="Profile name",
|
|
556
|
+
autocompletion=complete_profile,
|
|
557
|
+
),
|
|
558
|
+
format: str = typer.Option(
|
|
559
|
+
"table",
|
|
560
|
+
"--format",
|
|
561
|
+
help="Output format (table, json, csv)",
|
|
562
|
+
autocompletion=complete_output_format,
|
|
563
|
+
),
|
|
564
|
+
preview: bool = typer.Option(
|
|
565
|
+
False,
|
|
566
|
+
"--preview",
|
|
567
|
+
help="Enable preview mode",
|
|
568
|
+
),
|
|
569
|
+
confirm: bool = typer.Option(
|
|
570
|
+
False,
|
|
571
|
+
"--confirm",
|
|
572
|
+
help="Skip confirmation prompt",
|
|
573
|
+
),
|
|
574
|
+
):
|
|
575
|
+
"""
|
|
576
|
+
Reset a stream, clearing all existing data.
|
|
577
|
+
|
|
578
|
+
WARNING: This operation is irreversible and will delete all records.
|
|
579
|
+
|
|
580
|
+
Examples:
|
|
581
|
+
|
|
582
|
+
# Reset with confirmation
|
|
583
|
+
pltr streams stream reset ri.foundry.main.dataset.xxx --branch master
|
|
584
|
+
|
|
585
|
+
# Skip confirmation
|
|
586
|
+
pltr streams stream reset ri.foundry.main.dataset.xxx \\
|
|
587
|
+
--branch master \\
|
|
588
|
+
--confirm
|
|
589
|
+
"""
|
|
590
|
+
if not confirm:
|
|
591
|
+
proceed = typer.confirm(
|
|
592
|
+
f"⚠️ This will delete all data in stream on branch '{branch}'. Continue?"
|
|
593
|
+
)
|
|
594
|
+
if not proceed:
|
|
595
|
+
console.print("Operation cancelled")
|
|
596
|
+
raise typer.Exit(0)
|
|
597
|
+
|
|
598
|
+
try:
|
|
599
|
+
with SpinnerProgressTracker().track_spinner("Resetting stream"):
|
|
600
|
+
service = StreamsService(profile=profile)
|
|
601
|
+
result = service.reset_stream(
|
|
602
|
+
dataset_rid=dataset_rid,
|
|
603
|
+
stream_branch_name=branch,
|
|
604
|
+
preview=preview,
|
|
605
|
+
)
|
|
606
|
+
|
|
607
|
+
console.print(f"[green]✓[/green] Stream reset successfully on branch: {branch}")
|
|
608
|
+
|
|
609
|
+
formatter.format_output(result, format)
|
|
610
|
+
|
|
611
|
+
except (ProfileNotFoundError, MissingCredentialsError) as e:
|
|
612
|
+
console.print(f"[red]Authentication Error: {e}[/red]")
|
|
613
|
+
raise typer.Exit(1)
|
|
614
|
+
except Exception as e:
|
|
615
|
+
console.print(f"[red]Error: {e}[/red]")
|
|
616
|
+
raise typer.Exit(1)
|
pltr/services/admin.py
CHANGED
|
@@ -67,11 +67,16 @@ class AdminService(BaseService):
|
|
|
67
67
|
|
|
68
68
|
def fetch_page(page_token: Optional[str]) -> Dict[str, Any]:
|
|
69
69
|
"""Fetch a single page of users."""
|
|
70
|
-
|
|
70
|
+
iterator = self.service.User.list(
|
|
71
71
|
page_size=config.page_size or settings.get("page_size", 20),
|
|
72
72
|
page_token=page_token,
|
|
73
73
|
)
|
|
74
|
-
|
|
74
|
+
# ResourceIterator has .data and .next_page_token attributes
|
|
75
|
+
# Extract them properly for the pagination handler
|
|
76
|
+
return {
|
|
77
|
+
"data": [self._serialize_response(user) for user in iterator.data],
|
|
78
|
+
"next_page_token": iterator.next_page_token,
|
|
79
|
+
}
|
|
75
80
|
|
|
76
81
|
# Use response pagination handler
|
|
77
82
|
return self._paginate_response(fetch_page, config, progress_callback)
|
|
@@ -219,10 +224,16 @@ class AdminService(BaseService):
|
|
|
219
224
|
Dictionary containing group list and pagination info
|
|
220
225
|
"""
|
|
221
226
|
try:
|
|
222
|
-
|
|
227
|
+
iterator = self.service.Group.list(
|
|
223
228
|
page_size=page_size, page_token=page_token
|
|
224
229
|
)
|
|
225
|
-
|
|
230
|
+
# ResourceIterator has .data and .next_page_token attributes
|
|
231
|
+
# Return structure compatible with formatter.display()
|
|
232
|
+
groups = [self._serialize_response(group) for group in iterator.data]
|
|
233
|
+
return {
|
|
234
|
+
"data": groups,
|
|
235
|
+
"next_page_token": iterator.next_page_token,
|
|
236
|
+
}
|
|
226
237
|
except Exception as e:
|
|
227
238
|
raise RuntimeError(f"Failed to list groups: {str(e)}")
|
|
228
239
|
|
pltr/services/dataset.py
CHANGED
|
@@ -18,8 +18,6 @@ class DatasetService(BaseService):
|
|
|
18
18
|
"""Get the Foundry datasets service."""
|
|
19
19
|
return self.client.datasets
|
|
20
20
|
|
|
21
|
-
# list_datasets method removed - not supported by foundry-platform-sdk v1.27.0
|
|
22
|
-
|
|
23
21
|
def get_dataset(self, dataset_rid: str) -> Dict[str, Any]:
|
|
24
22
|
"""
|
|
25
23
|
Get information about a specific dataset.
|
|
@@ -254,8 +252,9 @@ class DatasetService(BaseService):
|
|
|
254
252
|
# Clean column name (remove special characters for field name)
|
|
255
253
|
clean_name = col.strip().replace(" ", "_").replace("-", "_")
|
|
256
254
|
|
|
255
|
+
# SDK 1.69.0 expects FieldType enum but accepts strings at runtime
|
|
257
256
|
fields.append(
|
|
258
|
-
DatasetFieldSchema(name=clean_name, type=field_type, nullable=nullable)
|
|
257
|
+
DatasetFieldSchema(name=clean_name, type=field_type, nullable=nullable) # type: ignore[arg-type]
|
|
259
258
|
)
|
|
260
259
|
|
|
261
260
|
return DatasetSchema(field_schema_list=fields)
|
pltr/services/folder.py
CHANGED
|
@@ -4,6 +4,8 @@ Folder service wrapper for Foundry SDK filesystem API.
|
|
|
4
4
|
|
|
5
5
|
from typing import Any, Optional, Dict, List
|
|
6
6
|
|
|
7
|
+
from foundry_sdk.v2.filesystem.models import GetFoldersBatchRequestElement
|
|
8
|
+
|
|
7
9
|
from .base import BaseService
|
|
8
10
|
|
|
9
11
|
|
|
@@ -95,7 +97,10 @@ class FolderService(BaseService):
|
|
|
95
97
|
raise ValueError("Maximum batch size is 1000 folders")
|
|
96
98
|
|
|
97
99
|
try:
|
|
98
|
-
|
|
100
|
+
elements = [
|
|
101
|
+
GetFoldersBatchRequestElement(folder_rid=rid) for rid in folder_rids
|
|
102
|
+
]
|
|
103
|
+
response = self.service.Folder.get_batch(body=elements, preview=True)
|
|
99
104
|
folders = []
|
|
100
105
|
for folder in response.folders:
|
|
101
106
|
folders.append(self._format_folder_info(folder))
|