pltr-cli 0.12.0__py3-none-any.whl → 0.13.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,515 @@
1
+ """
2
+ LanguageModels commands for Foundry.
3
+ Provides commands for Anthropic Claude models and OpenAI embeddings.
4
+ """
5
+
6
+ import typer
7
+ import json
8
+ from typing import Optional, List, Any, TYPE_CHECKING
9
+ from pathlib import Path
10
+ from rich.console import Console
11
+
12
+ # Lazy import to avoid SDK Literal types being processed by typer at module load time
13
+ if TYPE_CHECKING:
14
+ pass
15
+
16
+ from ..utils.formatting import OutputFormatter
17
+ from ..utils.progress import SpinnerProgressTracker
18
+ from ..auth.base import ProfileNotFoundError, MissingCredentialsError
19
+ from ..utils.completion import (
20
+ complete_profile,
21
+ complete_output_format,
22
+ )
23
+
24
+ # Create main app and sub-apps
25
+ app = typer.Typer(help="Interact with language models")
26
+ anthropic_app = typer.Typer(help="Anthropic Claude models")
27
+ openai_app = typer.Typer(help="OpenAI models")
28
+
29
+ # Add sub-apps
30
+ app.add_typer(anthropic_app, name="anthropic")
31
+ app.add_typer(openai_app, name="openai")
32
+
33
+ console = Console()
34
+ formatter = OutputFormatter(console)
35
+
36
+
37
+ def parse_json_input(input_str: str) -> Any:
38
+ """
39
+ Parse JSON input from string or file.
40
+
41
+ Supports:
42
+ - Inline JSON: '{"key": "value"}' or '["item1", "item2"]'
43
+ - File reference: @input.json
44
+
45
+ Args:
46
+ input_str: Input string or file reference
47
+
48
+ Returns:
49
+ Parsed JSON data
50
+
51
+ Raises:
52
+ FileNotFoundError: If file reference doesn't exist
53
+ json.JSONDecodeError: If JSON is invalid
54
+ """
55
+ if not input_str:
56
+ return None
57
+
58
+ # Handle file reference
59
+ if input_str.startswith("@"):
60
+ file_path = Path(input_str[1:])
61
+ if not file_path.exists():
62
+ raise FileNotFoundError(f"Input file not found: {file_path}")
63
+
64
+ with open(file_path, "r") as f:
65
+ return json.load(f)
66
+
67
+ # Handle inline JSON
68
+ return json.loads(input_str)
69
+
70
+
71
+ def display_anthropic_response(response: dict, format: str, output: Optional[str]):
72
+ """
73
+ Display Anthropic model response with token usage.
74
+
75
+ Args:
76
+ response: Response dictionary from service
77
+ format: Output format
78
+ output: Optional output file path
79
+ """
80
+ # Display token usage prominently if available
81
+ if "usage" in response:
82
+ usage = response["usage"]
83
+ input_tokens = usage.get("inputTokens", 0)
84
+ output_tokens = usage.get("outputTokens", 0)
85
+ total_tokens = usage.get("totalTokens", input_tokens + output_tokens)
86
+
87
+ formatter.print_info(
88
+ f"Token usage - Input: {input_tokens}, Output: {output_tokens}, Total: {total_tokens}"
89
+ )
90
+
91
+ # Display full response
92
+ if output:
93
+ formatter.save_to_file(response, output, format)
94
+ formatter.print_success(f"Response saved to {output}")
95
+ else:
96
+ formatter.display(response, format)
97
+
98
+
99
+ def display_openai_response(response: dict, format: str, output: Optional[str]):
100
+ """
101
+ Display OpenAI model response with token usage.
102
+
103
+ Args:
104
+ response: Response dictionary from service
105
+ format: Output format
106
+ output: Optional output file path
107
+ """
108
+ # Display token usage prominently if available
109
+ if "usage" in response:
110
+ usage = response["usage"]
111
+ prompt_tokens = usage.get("promptTokens", 0)
112
+ total_tokens = usage.get("totalTokens", prompt_tokens)
113
+
114
+ formatter.print_info(
115
+ f"Token usage - Prompt: {prompt_tokens}, Total: {total_tokens}"
116
+ )
117
+
118
+ # Display full response
119
+ if output:
120
+ formatter.save_to_file(response, output, format)
121
+ formatter.print_success(f"Response saved to {output}")
122
+ else:
123
+ formatter.display(response, format)
124
+
125
+
126
+ # ===== Anthropic Commands =====
127
+
128
+
129
+ @anthropic_app.command("messages")
130
+ def anthropic_messages(
131
+ model_id: str = typer.Argument(
132
+ ...,
133
+ help="Model Resource Identifier (ri.language-models.main.model.<id>)",
134
+ ),
135
+ message: str = typer.Option(
136
+ ...,
137
+ "--message",
138
+ "-m",
139
+ help="User message text",
140
+ ),
141
+ max_tokens: int = typer.Option(
142
+ 1024,
143
+ "--max-tokens",
144
+ help="Maximum tokens to generate",
145
+ ),
146
+ system: Optional[str] = typer.Option(
147
+ None,
148
+ "--system",
149
+ "-s",
150
+ help="System prompt to guide model behavior",
151
+ ),
152
+ temperature: Optional[float] = typer.Option(
153
+ None,
154
+ "--temperature",
155
+ "-t",
156
+ help="Sampling temperature (0.0-1.0). Lower is more deterministic.",
157
+ min=0.0,
158
+ max=1.0,
159
+ ),
160
+ stop: Optional[List[str]] = typer.Option(
161
+ None,
162
+ "--stop",
163
+ help="Stop sequences (can be specified multiple times)",
164
+ ),
165
+ top_k: Optional[int] = typer.Option(
166
+ None,
167
+ "--top-k",
168
+ help="Sample from top K tokens (Anthropic models only)",
169
+ min=1,
170
+ ),
171
+ top_p: Optional[float] = typer.Option(
172
+ None,
173
+ "--top-p",
174
+ help="Nucleus sampling threshold (0.0-1.0)",
175
+ min=0.0,
176
+ max=1.0,
177
+ ),
178
+ profile: Optional[str] = typer.Option(
179
+ None,
180
+ "--profile",
181
+ "-p",
182
+ help="Profile name",
183
+ autocompletion=complete_profile,
184
+ ),
185
+ format: str = typer.Option(
186
+ "json",
187
+ "--format",
188
+ "-f",
189
+ help="Output format (table, json, csv)",
190
+ autocompletion=complete_output_format,
191
+ ),
192
+ output: Optional[str] = typer.Option(
193
+ None, "--output", "-o", help="Output file path"
194
+ ),
195
+ preview: bool = typer.Option(
196
+ False,
197
+ "--preview",
198
+ help="Enable preview mode",
199
+ ),
200
+ ):
201
+ """
202
+ Send a single message to an Anthropic Claude model.
203
+
204
+ Simple interface for single-turn Q&A with Claude models. For multi-turn
205
+ conversations, tool calling, or advanced features, use messages-advanced.
206
+
207
+ Examples:
208
+
209
+ # Basic message
210
+ pltr language-models anthropic messages ri.language-models.main.model.abc123 \\
211
+ --message "Explain quantum computing"
212
+
213
+ # With system prompt and custom parameters
214
+ pltr language-models anthropic messages ri.language-models.main.model.abc123 \\
215
+ --message "Write a haiku" \\
216
+ --system "You are a poetic assistant" \\
217
+ --temperature 0.8 \\
218
+ --max-tokens 100
219
+
220
+ # With stop sequences
221
+ pltr language-models anthropic messages ri.language-models.main.model.abc123 \\
222
+ --message "List three items" \\
223
+ --stop "." --stop "\\n\\n"
224
+
225
+ # Save response to file
226
+ pltr language-models anthropic messages ri.language-models.main.model.abc123 \\
227
+ --message "Summarize AI trends" \\
228
+ --output response.json
229
+ """
230
+ try:
231
+ from ..services.language_models import LanguageModelsService
232
+
233
+ service = LanguageModelsService(profile=profile)
234
+
235
+ with SpinnerProgressTracker().track_spinner("Sending message..."):
236
+ response = service.send_message(
237
+ model_id=model_id,
238
+ message=message,
239
+ max_tokens=max_tokens,
240
+ system=system,
241
+ temperature=temperature,
242
+ stop_sequences=stop if stop else None,
243
+ top_k=top_k,
244
+ top_p=top_p,
245
+ preview=preview,
246
+ )
247
+
248
+ display_anthropic_response(response, format, output)
249
+
250
+ except (ProfileNotFoundError, MissingCredentialsError) as e:
251
+ formatter.print_error(f"Authentication error: {e}")
252
+ raise typer.Exit(1)
253
+ except (FileNotFoundError, json.JSONDecodeError) as e:
254
+ formatter.print_error(f"Invalid input: {e}")
255
+ raise typer.Exit(1)
256
+ except Exception as e:
257
+ formatter.print_error(f"Operation failed: {e}")
258
+ raise typer.Exit(1)
259
+
260
+
261
+ @anthropic_app.command("messages-advanced")
262
+ def anthropic_messages_advanced(
263
+ model_id: str = typer.Argument(
264
+ ...,
265
+ help="Model Resource Identifier (ri.language-models.main.model.<id>)",
266
+ ),
267
+ request: str = typer.Option(
268
+ ...,
269
+ "--request",
270
+ "-r",
271
+ help="Request JSON (inline or @file.json). Must include 'messages' and 'maxTokens'.",
272
+ ),
273
+ profile: Optional[str] = typer.Option(
274
+ None,
275
+ "--profile",
276
+ "-p",
277
+ help="Profile name",
278
+ autocompletion=complete_profile,
279
+ ),
280
+ format: str = typer.Option(
281
+ "json",
282
+ "--format",
283
+ "-f",
284
+ help="Output format (table, json, csv)",
285
+ autocompletion=complete_output_format,
286
+ ),
287
+ output: Optional[str] = typer.Option(
288
+ None, "--output", "-o", help="Output file path"
289
+ ),
290
+ preview: bool = typer.Option(
291
+ False,
292
+ "--preview",
293
+ help="Enable preview mode",
294
+ ),
295
+ ):
296
+ """
297
+ Send messages to Anthropic Claude model with advanced features.
298
+
299
+ Accepts full SDK request structure, enabling:
300
+ - Multi-turn conversations
301
+ - Tool/function calling
302
+ - Extended thinking mode
303
+ - Document and image processing
304
+ - Citations
305
+
306
+ The request must be a JSON object containing:
307
+ - messages: List of message objects with role and content
308
+ - maxTokens: Maximum tokens to generate
309
+
310
+ Optional fields:
311
+ - system: System prompt blocks
312
+ - temperature: Sampling temperature (0.0-1.0)
313
+ - thinking: Extended thinking configuration
314
+ - tools: Tool definitions for function calling
315
+ - toolChoice: Tool selection strategy
316
+ - stopSequences: Sequences that stop generation
317
+ - topK: Sample from top K tokens
318
+ - topP: Nucleus sampling threshold
319
+
320
+ Examples:
321
+
322
+ # Multi-turn conversation from file
323
+ # conversation.json:
324
+ # {
325
+ # "messages": [
326
+ # {"role": "user", "content": [{"type": "text", "text": "Hi"}]},
327
+ # {"role": "assistant", "content": [{"type": "text", "text": "Hello!"}]},
328
+ # {"role": "user", "content": [{"type": "text", "text": "Help me"}]}
329
+ # ],
330
+ # "maxTokens": 500
331
+ # }
332
+ pltr language-models anthropic messages-advanced ri.language-models.main.model.abc123 \\
333
+ --request @conversation.json
334
+
335
+ # Inline JSON with system prompt
336
+ pltr language-models anthropic messages-advanced ri.language-models.main.model.abc123 \\
337
+ --request '{"messages": [{"role": "user", "content": [{"type": "text", "text": "Hi"}]}], "maxTokens": 100, "system": [{"type": "text", "text": "Be concise"}]}'
338
+
339
+ # With extended thinking
340
+ pltr language-models anthropic messages-advanced ri.language-models.main.model.abc123 \\
341
+ --request '{"messages": [{"role": "user", "content": [{"type": "text", "text": "Solve this problem"}]}], "maxTokens": 2000, "thinking": {"type": "enabled", "budget": 10000}}'
342
+ """
343
+ try:
344
+ # Parse request JSON
345
+ request_data = parse_json_input(request)
346
+
347
+ # Validate required fields
348
+ if not isinstance(request_data, dict):
349
+ raise ValueError("Request must be a JSON object")
350
+ if "messages" not in request_data:
351
+ raise ValueError("Request must include 'messages' field")
352
+ if "maxTokens" not in request_data:
353
+ raise ValueError("Request must include 'maxTokens' field")
354
+
355
+ from ..services.language_models import LanguageModelsService
356
+
357
+ service = LanguageModelsService(profile=profile)
358
+
359
+ with SpinnerProgressTracker().track_spinner("Sending messages..."):
360
+ response = service.send_messages_advanced(
361
+ model_id=model_id,
362
+ messages=request_data["messages"],
363
+ max_tokens=request_data["maxTokens"],
364
+ system=request_data.get("system"),
365
+ temperature=request_data.get("temperature"),
366
+ thinking=request_data.get("thinking"),
367
+ tools=request_data.get("tools"),
368
+ tool_choice=request_data.get("toolChoice"),
369
+ stop_sequences=request_data.get("stopSequences"),
370
+ top_k=request_data.get("topK"),
371
+ top_p=request_data.get("topP"),
372
+ preview=preview,
373
+ )
374
+
375
+ display_anthropic_response(response, format, output)
376
+
377
+ except (ProfileNotFoundError, MissingCredentialsError) as e:
378
+ formatter.print_error(f"Authentication error: {e}")
379
+ raise typer.Exit(1)
380
+ except (FileNotFoundError, json.JSONDecodeError, ValueError) as e:
381
+ formatter.print_error(f"Invalid input: {e}")
382
+ raise typer.Exit(1)
383
+ except Exception as e:
384
+ formatter.print_error(f"Operation failed: {e}")
385
+ raise typer.Exit(1)
386
+
387
+
388
+ # ===== OpenAI Commands =====
389
+
390
+
391
+ @openai_app.command("embeddings")
392
+ def openai_embeddings(
393
+ model_id: str = typer.Argument(
394
+ ...,
395
+ help="Model Resource Identifier (ri.language-models.main.model.<id>)",
396
+ ),
397
+ input: str = typer.Option(
398
+ ...,
399
+ "--input",
400
+ "-i",
401
+ help='Input text(s). Single string, JSON array \'["text1", "text2"]\', or @file.json',
402
+ ),
403
+ dimensions: Optional[int] = typer.Option(
404
+ None,
405
+ "--dimensions",
406
+ "-d",
407
+ help="Custom embedding dimensions (not all models support this)",
408
+ min=1,
409
+ ),
410
+ encoding: Optional[str] = typer.Option(
411
+ None,
412
+ "--encoding",
413
+ "-e",
414
+ help="Output encoding format: 'float' or 'base64'",
415
+ ),
416
+ profile: Optional[str] = typer.Option(
417
+ None,
418
+ "--profile",
419
+ "-p",
420
+ help="Profile name",
421
+ autocompletion=complete_profile,
422
+ ),
423
+ format: str = typer.Option(
424
+ "json",
425
+ "--format",
426
+ "-f",
427
+ help="Output format (table, json, csv)",
428
+ autocompletion=complete_output_format,
429
+ ),
430
+ output: Optional[str] = typer.Option(
431
+ None, "--output", "-o", help="Output file path"
432
+ ),
433
+ preview: bool = typer.Option(
434
+ False,
435
+ "--preview",
436
+ help="Enable preview mode",
437
+ ),
438
+ ):
439
+ """
440
+ Generate embeddings for text using an OpenAI model.
441
+
442
+ Accepts single text strings or multiple texts for batch processing.
443
+ Returns embedding vectors that can be used for semantic search,
444
+ clustering, or similarity comparisons.
445
+
446
+ Examples:
447
+
448
+ # Single text
449
+ pltr language-models openai embeddings ri.language-models.main.model.xyz789 \\
450
+ --input "Machine learning is fascinating"
451
+
452
+ # Multiple texts (inline JSON array)
453
+ pltr language-models openai embeddings ri.language-models.main.model.xyz789 \\
454
+ --input '["Document 1", "Document 2", "Document 3"]'
455
+
456
+ # Multiple texts from file
457
+ # texts.json: ["Text 1", "Text 2", "Text 3"]
458
+ pltr language-models openai embeddings ri.language-models.main.model.xyz789 \\
459
+ --input @texts.json
460
+
461
+ # Custom dimensions and encoding
462
+ pltr language-models openai embeddings ri.language-models.main.model.xyz789 \\
463
+ --input "Sample text" \\
464
+ --dimensions 1024 \\
465
+ --encoding base64
466
+
467
+ # Save embeddings to file
468
+ pltr language-models openai embeddings ri.language-models.main.model.xyz789 \\
469
+ --input '["Text 1", "Text 2"]' \\
470
+ --output embeddings.json
471
+ """
472
+ try:
473
+ # Parse input - handle string or array
474
+ input_data = (
475
+ parse_json_input(input)
476
+ if input.startswith("@") or input.startswith("[")
477
+ else input
478
+ )
479
+
480
+ # Convert to list if single string
481
+ if isinstance(input_data, str):
482
+ input_texts = [input_data]
483
+ elif isinstance(input_data, list):
484
+ input_texts = input_data
485
+ else:
486
+ raise ValueError("Input must be a string or array of strings")
487
+
488
+ # Validate encoding format if provided
489
+ if encoding and encoding not in ["float", "base64"]:
490
+ raise ValueError("Encoding format must be 'float' or 'base64'")
491
+
492
+ from ..services.language_models import LanguageModelsService
493
+
494
+ service = LanguageModelsService(profile=profile)
495
+
496
+ with SpinnerProgressTracker().track_spinner("Generating embeddings..."):
497
+ response = service.generate_embeddings(
498
+ model_id=model_id,
499
+ input_texts=input_texts,
500
+ dimensions=dimensions,
501
+ encoding_format=encoding,
502
+ preview=preview,
503
+ )
504
+
505
+ display_openai_response(response, format, output)
506
+
507
+ except (ProfileNotFoundError, MissingCredentialsError) as e:
508
+ formatter.print_error(f"Authentication error: {e}")
509
+ raise typer.Exit(1)
510
+ except (FileNotFoundError, json.JSONDecodeError, ValueError) as e:
511
+ formatter.print_error(f"Invalid input: {e}")
512
+ raise typer.Exit(1)
513
+ except Exception as e:
514
+ formatter.print_error(f"Operation failed: {e}")
515
+ raise typer.Exit(1)