pltr-cli 0.12.0__py3-none-any.whl → 0.13.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pltr/__init__.py +1 -1
- pltr/cli.py +24 -0
- pltr/commands/admin.py +12 -2
- pltr/commands/functions.py +503 -0
- pltr/commands/language_models.py +515 -0
- pltr/commands/models.py +362 -0
- pltr/commands/project.py +21 -61
- pltr/commands/resource.py +0 -53
- pltr/commands/space.py +25 -303
- pltr/commands/streams.py +616 -0
- pltr/services/admin.py +15 -4
- pltr/services/dataset.py +2 -3
- pltr/services/folder.py +6 -1
- pltr/services/functions.py +223 -0
- pltr/services/language_models.py +281 -0
- pltr/services/models.py +179 -0
- pltr/services/project.py +87 -49
- pltr/services/resource.py +14 -72
- pltr/services/space.py +24 -175
- pltr/services/streams.py +290 -0
- {pltr_cli-0.12.0.dist-info → pltr_cli-0.13.0.dist-info}/METADATA +51 -2
- {pltr_cli-0.12.0.dist-info → pltr_cli-0.13.0.dist-info}/RECORD +25 -17
- {pltr_cli-0.12.0.dist-info → pltr_cli-0.13.0.dist-info}/WHEEL +0 -0
- {pltr_cli-0.12.0.dist-info → pltr_cli-0.13.0.dist-info}/entry_points.txt +0 -0
- {pltr_cli-0.12.0.dist-info → pltr_cli-0.13.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Functions service wrapper for Foundry SDK.
|
|
3
|
+
Provides access to Functions query execution and value type operations.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from typing import Any, Dict, Optional
|
|
7
|
+
from .base import BaseService
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class FunctionsService(BaseService):
|
|
11
|
+
"""Service wrapper for Foundry Functions operations."""
|
|
12
|
+
|
|
13
|
+
def _get_service(self) -> Any:
|
|
14
|
+
"""Get the Foundry Functions service."""
|
|
15
|
+
return self.client.functions
|
|
16
|
+
|
|
17
|
+
# ===== Query Operations =====
|
|
18
|
+
|
|
19
|
+
def get_query(
|
|
20
|
+
self,
|
|
21
|
+
query_api_name: str,
|
|
22
|
+
preview: bool = False,
|
|
23
|
+
version: Optional[str] = None,
|
|
24
|
+
) -> Dict[str, Any]:
|
|
25
|
+
"""
|
|
26
|
+
Get query metadata by API name.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
query_api_name: Query API name (e.g., "myQuery")
|
|
30
|
+
preview: Enable preview mode (default: False)
|
|
31
|
+
version: Optional query version (e.g., "1.0.0")
|
|
32
|
+
If not specified, returns latest version
|
|
33
|
+
|
|
34
|
+
Returns:
|
|
35
|
+
Query information dictionary containing:
|
|
36
|
+
- rid: Query resource identifier
|
|
37
|
+
- apiName: Query API name
|
|
38
|
+
- version: Query version
|
|
39
|
+
- parameters: Query parameters with types
|
|
40
|
+
- output: Output structure definition
|
|
41
|
+
|
|
42
|
+
Raises:
|
|
43
|
+
RuntimeError: If the operation fails
|
|
44
|
+
|
|
45
|
+
Example:
|
|
46
|
+
>>> service = FunctionsService()
|
|
47
|
+
>>> query = service.get_query("myQuery")
|
|
48
|
+
>>> print(query['apiName'])
|
|
49
|
+
"""
|
|
50
|
+
try:
|
|
51
|
+
query = self.service.Query.get(
|
|
52
|
+
query_api_name, preview=preview, version=version
|
|
53
|
+
)
|
|
54
|
+
return self._serialize_response(query)
|
|
55
|
+
except Exception as e:
|
|
56
|
+
raise RuntimeError(f"Failed to get query '{query_api_name}': {e}")
|
|
57
|
+
|
|
58
|
+
def get_query_by_rid(
|
|
59
|
+
self, query_rid: str, preview: bool = False, version: Optional[str] = None
|
|
60
|
+
) -> Dict[str, Any]:
|
|
61
|
+
"""
|
|
62
|
+
Get query metadata by RID.
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
query_rid: Query Resource Identifier
|
|
66
|
+
Format: ri.functions.main.query.<id>
|
|
67
|
+
preview: Enable preview mode (default: False)
|
|
68
|
+
version: Optional query version (e.g., "1.0.0")
|
|
69
|
+
If not specified, returns latest version
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
Query information dictionary containing:
|
|
73
|
+
- rid: Query resource identifier
|
|
74
|
+
- apiName: Query API name
|
|
75
|
+
- version: Query version
|
|
76
|
+
- parameters: Query parameters with types
|
|
77
|
+
- output: Output structure definition
|
|
78
|
+
|
|
79
|
+
Raises:
|
|
80
|
+
RuntimeError: If the operation fails
|
|
81
|
+
|
|
82
|
+
Example:
|
|
83
|
+
>>> service = FunctionsService()
|
|
84
|
+
>>> query = service.get_query_by_rid("ri.functions.main.query.abc123")
|
|
85
|
+
>>> print(query['rid'])
|
|
86
|
+
"""
|
|
87
|
+
try:
|
|
88
|
+
query = self.service.Query.get_by_rid(
|
|
89
|
+
query_rid, preview=preview, version=version
|
|
90
|
+
)
|
|
91
|
+
return self._serialize_response(query)
|
|
92
|
+
except Exception as e:
|
|
93
|
+
raise RuntimeError(f"Failed to get query {query_rid}: {e}")
|
|
94
|
+
|
|
95
|
+
def execute_query(
|
|
96
|
+
self,
|
|
97
|
+
query_api_name: str,
|
|
98
|
+
parameters: Optional[Dict[str, Any]] = None,
|
|
99
|
+
preview: bool = False,
|
|
100
|
+
version: Optional[str] = None,
|
|
101
|
+
) -> Dict[str, Any]:
|
|
102
|
+
"""
|
|
103
|
+
Execute a query by API name with parameters.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
query_api_name: Query API name (e.g., "myQuery")
|
|
107
|
+
parameters: Query parameters as dictionary with DataValue encoding
|
|
108
|
+
Examples:
|
|
109
|
+
- Primitives: {"limit": 10, "name": "John"}
|
|
110
|
+
- Arrays: {"ids": [1, 2, 3]}
|
|
111
|
+
- Structs: {"config": {"enabled": true}}
|
|
112
|
+
- Dates: {"date": "2024-01-01"} (ISO 8601)
|
|
113
|
+
- Timestamps: {"created": "2021-01-04T05:00:00Z"} (ISO 8601)
|
|
114
|
+
preview: Enable preview mode (default: False)
|
|
115
|
+
version: Optional query version (e.g., "1.0.0")
|
|
116
|
+
If not specified, executes latest version
|
|
117
|
+
|
|
118
|
+
Returns:
|
|
119
|
+
Query execution result dictionary
|
|
120
|
+
|
|
121
|
+
Raises:
|
|
122
|
+
RuntimeError: If the operation fails
|
|
123
|
+
|
|
124
|
+
Example:
|
|
125
|
+
>>> service = FunctionsService()
|
|
126
|
+
>>> result = service.execute_query(
|
|
127
|
+
... "myQuery",
|
|
128
|
+
... parameters={"limit": 10, "filter": "active"}
|
|
129
|
+
... )
|
|
130
|
+
>>> print(result)
|
|
131
|
+
"""
|
|
132
|
+
try:
|
|
133
|
+
result = self.service.Query.execute(
|
|
134
|
+
query_api_name,
|
|
135
|
+
parameters=parameters or {},
|
|
136
|
+
preview=preview,
|
|
137
|
+
version=version,
|
|
138
|
+
)
|
|
139
|
+
return self._serialize_response(result)
|
|
140
|
+
except Exception as e:
|
|
141
|
+
raise RuntimeError(f"Failed to execute query '{query_api_name}': {e}")
|
|
142
|
+
|
|
143
|
+
def execute_query_by_rid(
|
|
144
|
+
self,
|
|
145
|
+
query_rid: str,
|
|
146
|
+
parameters: Optional[Dict[str, Any]] = None,
|
|
147
|
+
preview: bool = False,
|
|
148
|
+
version: Optional[str] = None,
|
|
149
|
+
) -> Dict[str, Any]:
|
|
150
|
+
"""
|
|
151
|
+
Execute a query by RID with parameters.
|
|
152
|
+
|
|
153
|
+
Args:
|
|
154
|
+
query_rid: Query Resource Identifier
|
|
155
|
+
Format: ri.functions.main.query.<id>
|
|
156
|
+
parameters: Query parameters as dictionary with DataValue encoding
|
|
157
|
+
Examples:
|
|
158
|
+
- Primitives: {"limit": 10, "name": "John"}
|
|
159
|
+
- Arrays: {"ids": [1, 2, 3]}
|
|
160
|
+
- Structs: {"config": {"enabled": true}}
|
|
161
|
+
- Dates: {"date": "2024-01-01"} (ISO 8601)
|
|
162
|
+
- Timestamps: {"created": "2021-01-04T05:00:00Z"} (ISO 8601)
|
|
163
|
+
preview: Enable preview mode (default: False)
|
|
164
|
+
version: Optional query version (e.g., "1.0.0")
|
|
165
|
+
If not specified, executes latest version
|
|
166
|
+
|
|
167
|
+
Returns:
|
|
168
|
+
Query execution result dictionary
|
|
169
|
+
|
|
170
|
+
Raises:
|
|
171
|
+
RuntimeError: If the operation fails
|
|
172
|
+
|
|
173
|
+
Example:
|
|
174
|
+
>>> service = FunctionsService()
|
|
175
|
+
>>> result = service.execute_query_by_rid(
|
|
176
|
+
... "ri.functions.main.query.abc123",
|
|
177
|
+
... parameters={"limit": 10}
|
|
178
|
+
... )
|
|
179
|
+
>>> print(result)
|
|
180
|
+
"""
|
|
181
|
+
try:
|
|
182
|
+
result = self.service.Query.execute_by_rid(
|
|
183
|
+
query_rid,
|
|
184
|
+
parameters=parameters or {},
|
|
185
|
+
preview=preview,
|
|
186
|
+
version=version,
|
|
187
|
+
)
|
|
188
|
+
return self._serialize_response(result)
|
|
189
|
+
except Exception as e:
|
|
190
|
+
raise RuntimeError(f"Failed to execute query {query_rid}: {e}")
|
|
191
|
+
|
|
192
|
+
# ===== Value Type Operations =====
|
|
193
|
+
|
|
194
|
+
def get_value_type(
|
|
195
|
+
self, value_type_rid: str, preview: bool = False
|
|
196
|
+
) -> Dict[str, Any]:
|
|
197
|
+
"""
|
|
198
|
+
Get value type details by RID.
|
|
199
|
+
|
|
200
|
+
Args:
|
|
201
|
+
value_type_rid: Value Type Resource Identifier
|
|
202
|
+
Format: ri.functions.main.value-type.<id>
|
|
203
|
+
preview: Enable preview mode (default: False)
|
|
204
|
+
|
|
205
|
+
Returns:
|
|
206
|
+
Value type information dictionary containing:
|
|
207
|
+
- rid: Value type resource identifier
|
|
208
|
+
- apiName: Value type API name
|
|
209
|
+
- definition: Type definition and structure
|
|
210
|
+
|
|
211
|
+
Raises:
|
|
212
|
+
RuntimeError: If the operation fails
|
|
213
|
+
|
|
214
|
+
Example:
|
|
215
|
+
>>> service = FunctionsService()
|
|
216
|
+
>>> value_type = service.get_value_type("ri.functions.main.value-type.xyz")
|
|
217
|
+
>>> print(value_type['apiName'])
|
|
218
|
+
"""
|
|
219
|
+
try:
|
|
220
|
+
value_type = self.service.ValueType.get(value_type_rid, preview=preview)
|
|
221
|
+
return self._serialize_response(value_type)
|
|
222
|
+
except Exception as e:
|
|
223
|
+
raise RuntimeError(f"Failed to get value type {value_type_rid}: {e}")
|
|
@@ -0,0 +1,281 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LanguageModels service wrapper for Foundry SDK.
|
|
3
|
+
Provides access to Anthropic Claude models and OpenAI embeddings.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from typing import Any, Dict, List, Optional
|
|
7
|
+
from .base import BaseService
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class LanguageModelsService(BaseService):
|
|
11
|
+
"""Service wrapper for Foundry LanguageModels operations."""
|
|
12
|
+
|
|
13
|
+
def _get_service(self) -> Any:
|
|
14
|
+
"""Get the Foundry LanguageModels service."""
|
|
15
|
+
return self.client.language_models
|
|
16
|
+
|
|
17
|
+
# ===== Anthropic Model Operations =====
|
|
18
|
+
|
|
19
|
+
def send_message(
|
|
20
|
+
self,
|
|
21
|
+
model_id: str,
|
|
22
|
+
message: str,
|
|
23
|
+
max_tokens: int = 1024,
|
|
24
|
+
system: Optional[str] = None,
|
|
25
|
+
temperature: Optional[float] = None,
|
|
26
|
+
stop_sequences: Optional[List[str]] = None,
|
|
27
|
+
top_k: Optional[int] = None,
|
|
28
|
+
top_p: Optional[float] = None,
|
|
29
|
+
preview: bool = False,
|
|
30
|
+
) -> Dict[str, Any]:
|
|
31
|
+
"""
|
|
32
|
+
Send a single message to an Anthropic model (simplified interface).
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
model_id: Model Resource Identifier
|
|
36
|
+
Format: ri.language-models.main.model.<id>
|
|
37
|
+
message: User message text
|
|
38
|
+
max_tokens: Maximum tokens to generate (default: 1024)
|
|
39
|
+
system: Optional system prompt to guide model behavior
|
|
40
|
+
temperature: Sampling temperature (0.0-1.0)
|
|
41
|
+
Lower values are more deterministic
|
|
42
|
+
stop_sequences: Optional list of sequences that stop generation
|
|
43
|
+
top_k: Sample from top K tokens (Anthropic models only)
|
|
44
|
+
top_p: Nucleus sampling threshold (0.0-1.0)
|
|
45
|
+
preview: Enable preview mode (default: False)
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
Response dictionary containing:
|
|
49
|
+
- content: List of content blocks (text, tool use, etc.)
|
|
50
|
+
- role: Message role (typically "assistant")
|
|
51
|
+
- model: Model identifier
|
|
52
|
+
- stopReason: Reason generation stopped
|
|
53
|
+
- usage: Token usage statistics
|
|
54
|
+
- inputTokens: Input tokens consumed
|
|
55
|
+
- outputTokens: Output tokens generated
|
|
56
|
+
- totalTokens: Total tokens (input + output)
|
|
57
|
+
|
|
58
|
+
Raises:
|
|
59
|
+
RuntimeError: If the operation fails
|
|
60
|
+
|
|
61
|
+
Example:
|
|
62
|
+
>>> service = LanguageModelsService()
|
|
63
|
+
>>> response = service.send_message(
|
|
64
|
+
... "ri.language-models.main.model.abc123",
|
|
65
|
+
... "Explain quantum computing",
|
|
66
|
+
... max_tokens=200
|
|
67
|
+
... )
|
|
68
|
+
>>> print(response['content'][0]['text'])
|
|
69
|
+
"""
|
|
70
|
+
try:
|
|
71
|
+
# Transform simple message to SDK message format
|
|
72
|
+
messages = [
|
|
73
|
+
{
|
|
74
|
+
"role": "user",
|
|
75
|
+
"content": [{"type": "text", "text": message}],
|
|
76
|
+
}
|
|
77
|
+
]
|
|
78
|
+
|
|
79
|
+
# Build request parameters
|
|
80
|
+
request_params: Dict[str, Any] = {
|
|
81
|
+
"messages": messages,
|
|
82
|
+
"maxTokens": max_tokens,
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
# Add optional parameters if provided
|
|
86
|
+
if system is not None:
|
|
87
|
+
request_params["system"] = [{"type": "text", "text": system}]
|
|
88
|
+
if temperature is not None:
|
|
89
|
+
request_params["temperature"] = temperature
|
|
90
|
+
if stop_sequences is not None:
|
|
91
|
+
request_params["stopSequences"] = stop_sequences
|
|
92
|
+
if top_k is not None:
|
|
93
|
+
request_params["topK"] = top_k
|
|
94
|
+
if top_p is not None:
|
|
95
|
+
request_params["topP"] = top_p
|
|
96
|
+
|
|
97
|
+
# Call SDK method
|
|
98
|
+
response = self.service.AnthropicModel.messages(
|
|
99
|
+
model_id,
|
|
100
|
+
request=request_params,
|
|
101
|
+
preview=preview, # type: ignore
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
return self._serialize_response(response)
|
|
105
|
+
except Exception as e:
|
|
106
|
+
raise RuntimeError(f"Failed to send message to model {model_id}: {e}")
|
|
107
|
+
|
|
108
|
+
def send_messages_advanced(
|
|
109
|
+
self,
|
|
110
|
+
model_id: str,
|
|
111
|
+
messages: List[Dict[str, Any]],
|
|
112
|
+
max_tokens: int,
|
|
113
|
+
system: Optional[List[Dict[str, Any]]] = None,
|
|
114
|
+
temperature: Optional[float] = None,
|
|
115
|
+
thinking: Optional[Dict[str, Any]] = None,
|
|
116
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
117
|
+
tool_choice: Optional[Dict[str, Any]] = None,
|
|
118
|
+
stop_sequences: Optional[List[str]] = None,
|
|
119
|
+
top_k: Optional[int] = None,
|
|
120
|
+
top_p: Optional[float] = None,
|
|
121
|
+
preview: bool = False,
|
|
122
|
+
) -> Dict[str, Any]:
|
|
123
|
+
"""
|
|
124
|
+
Send messages to an Anthropic model with advanced features.
|
|
125
|
+
|
|
126
|
+
This method accepts the full SDK request structure, enabling:
|
|
127
|
+
- Multi-turn conversations
|
|
128
|
+
- Tool/function calling
|
|
129
|
+
- Extended thinking mode
|
|
130
|
+
- Document and image processing
|
|
131
|
+
- Citations
|
|
132
|
+
|
|
133
|
+
Args:
|
|
134
|
+
model_id: Model Resource Identifier
|
|
135
|
+
Format: ri.language-models.main.model.<id>
|
|
136
|
+
messages: List of message objects with role and content
|
|
137
|
+
Format: [{"role": "user|assistant", "content": [...]}]
|
|
138
|
+
max_tokens: Maximum tokens to generate
|
|
139
|
+
system: Optional system prompt blocks
|
|
140
|
+
Format: [{"type": "text", "text": "..."}]
|
|
141
|
+
temperature: Sampling temperature (0.0-1.0)
|
|
142
|
+
thinking: Extended thinking configuration
|
|
143
|
+
Format: {"type": "enabled", "budget": 10000}
|
|
144
|
+
tools: Tool definitions for function calling
|
|
145
|
+
tool_choice: Tool selection strategy
|
|
146
|
+
stop_sequences: Sequences that stop generation
|
|
147
|
+
top_k: Sample from top K tokens
|
|
148
|
+
top_p: Nucleus sampling threshold (0.0-1.0)
|
|
149
|
+
preview: Enable preview mode (default: False)
|
|
150
|
+
|
|
151
|
+
Returns:
|
|
152
|
+
Response dictionary containing:
|
|
153
|
+
- content: List of content blocks (text, tool use, thinking, etc.)
|
|
154
|
+
- role: Message role (typically "assistant")
|
|
155
|
+
- model: Model identifier
|
|
156
|
+
- stopReason: Reason generation stopped
|
|
157
|
+
- usage: Token usage statistics
|
|
158
|
+
|
|
159
|
+
Raises:
|
|
160
|
+
RuntimeError: If the operation fails
|
|
161
|
+
|
|
162
|
+
Example:
|
|
163
|
+
>>> service = LanguageModelsService()
|
|
164
|
+
>>> messages = [
|
|
165
|
+
... {"role": "user", "content": [{"type": "text", "text": "Hi"}]},
|
|
166
|
+
... {"role": "assistant", "content": [{"type": "text", "text": "Hello!"}]},
|
|
167
|
+
... {"role": "user", "content": [{"type": "text", "text": "Help me"}]}
|
|
168
|
+
... ]
|
|
169
|
+
>>> response = service.send_messages_advanced(
|
|
170
|
+
... "ri.language-models.main.model.abc123",
|
|
171
|
+
... messages=messages,
|
|
172
|
+
... max_tokens=500
|
|
173
|
+
... )
|
|
174
|
+
"""
|
|
175
|
+
try:
|
|
176
|
+
# Build request parameters
|
|
177
|
+
request_params: Dict[str, Any] = {
|
|
178
|
+
"messages": messages,
|
|
179
|
+
"maxTokens": max_tokens,
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
# Add optional parameters if provided
|
|
183
|
+
if system is not None:
|
|
184
|
+
request_params["system"] = system
|
|
185
|
+
if temperature is not None:
|
|
186
|
+
request_params["temperature"] = temperature
|
|
187
|
+
if thinking is not None:
|
|
188
|
+
request_params["thinking"] = thinking
|
|
189
|
+
if tools is not None:
|
|
190
|
+
request_params["tools"] = tools
|
|
191
|
+
if tool_choice is not None:
|
|
192
|
+
request_params["toolChoice"] = tool_choice
|
|
193
|
+
if stop_sequences is not None:
|
|
194
|
+
request_params["stopSequences"] = stop_sequences
|
|
195
|
+
if top_k is not None:
|
|
196
|
+
request_params["topK"] = top_k
|
|
197
|
+
if top_p is not None:
|
|
198
|
+
request_params["topP"] = top_p
|
|
199
|
+
|
|
200
|
+
# Call SDK method
|
|
201
|
+
response = self.service.AnthropicModel.messages(
|
|
202
|
+
model_id,
|
|
203
|
+
request=request_params,
|
|
204
|
+
preview=preview, # type: ignore
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
return self._serialize_response(response)
|
|
208
|
+
except Exception as e:
|
|
209
|
+
raise RuntimeError(f"Failed to send messages to model {model_id}: {e}")
|
|
210
|
+
|
|
211
|
+
# ===== OpenAI Model Operations =====
|
|
212
|
+
|
|
213
|
+
def generate_embeddings(
|
|
214
|
+
self,
|
|
215
|
+
model_id: str,
|
|
216
|
+
input_texts: List[str],
|
|
217
|
+
dimensions: Optional[int] = None,
|
|
218
|
+
encoding_format: Optional[str] = None,
|
|
219
|
+
preview: bool = False,
|
|
220
|
+
) -> Dict[str, Any]:
|
|
221
|
+
"""
|
|
222
|
+
Generate embeddings for text using an OpenAI model.
|
|
223
|
+
|
|
224
|
+
Args:
|
|
225
|
+
model_id: Model Resource Identifier
|
|
226
|
+
Format: ri.language-models.main.model.<id>
|
|
227
|
+
input_texts: List of text strings to embed
|
|
228
|
+
Can be a single string or multiple strings
|
|
229
|
+
dimensions: Optional custom embedding dimensions
|
|
230
|
+
Not all models support this parameter
|
|
231
|
+
encoding_format: Output encoding format
|
|
232
|
+
Options: "float" (default) or "base64"
|
|
233
|
+
preview: Enable preview mode (default: False)
|
|
234
|
+
|
|
235
|
+
Returns:
|
|
236
|
+
Response dictionary containing:
|
|
237
|
+
- data: List of embedding objects
|
|
238
|
+
Each object has:
|
|
239
|
+
- embedding: Vector (list of floats or base64 string)
|
|
240
|
+
- index: Position in input array
|
|
241
|
+
- object: Type identifier ("embedding")
|
|
242
|
+
- model: Model identifier
|
|
243
|
+
- usage: Token usage statistics
|
|
244
|
+
- promptTokens: Input tokens consumed
|
|
245
|
+
- totalTokens: Total tokens
|
|
246
|
+
|
|
247
|
+
Raises:
|
|
248
|
+
RuntimeError: If the operation fails
|
|
249
|
+
|
|
250
|
+
Example:
|
|
251
|
+
>>> service = LanguageModelsService()
|
|
252
|
+
>>> response = service.generate_embeddings(
|
|
253
|
+
... "ri.language-models.main.model.xyz789",
|
|
254
|
+
... input_texts=["Machine learning", "Deep learning"]
|
|
255
|
+
... )
|
|
256
|
+
>>> embeddings = [item['embedding'] for item in response['data']]
|
|
257
|
+
"""
|
|
258
|
+
try:
|
|
259
|
+
# Build request parameters
|
|
260
|
+
request_params: Dict[str, Any] = {
|
|
261
|
+
"input": input_texts,
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
# Add optional parameters if provided
|
|
265
|
+
if dimensions is not None:
|
|
266
|
+
request_params["dimensions"] = dimensions
|
|
267
|
+
if encoding_format is not None:
|
|
268
|
+
request_params["encodingFormat"] = encoding_format
|
|
269
|
+
|
|
270
|
+
# Call SDK method
|
|
271
|
+
response = self.service.OpenAiModel.embeddings(
|
|
272
|
+
model_id,
|
|
273
|
+
request=request_params,
|
|
274
|
+
preview=preview, # type: ignore
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
return self._serialize_response(response)
|
|
278
|
+
except Exception as e:
|
|
279
|
+
raise RuntimeError(
|
|
280
|
+
f"Failed to generate embeddings with model {model_id}: {e}"
|
|
281
|
+
)
|
pltr/services/models.py
ADDED
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Models service wrapper for Foundry SDK.
|
|
3
|
+
Provides access to ML model registry operations (model lifecycle, versioning, metadata).
|
|
4
|
+
|
|
5
|
+
Note: This is distinct from LanguageModels, which handles LLM chat/embeddings operations.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from typing import Any, Dict, Optional
|
|
9
|
+
from .base import BaseService
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class ModelsService(BaseService):
|
|
13
|
+
"""Service wrapper for Foundry Models operations."""
|
|
14
|
+
|
|
15
|
+
def _get_service(self) -> Any:
|
|
16
|
+
"""Get the Foundry Models service."""
|
|
17
|
+
return self.client.models
|
|
18
|
+
|
|
19
|
+
# ===== Model Operations =====
|
|
20
|
+
|
|
21
|
+
def create_model(
|
|
22
|
+
self,
|
|
23
|
+
name: str,
|
|
24
|
+
parent_folder_rid: str,
|
|
25
|
+
preview: bool = False,
|
|
26
|
+
) -> Dict[str, Any]:
|
|
27
|
+
"""
|
|
28
|
+
Create a new ML model in the registry.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
name: Model name
|
|
32
|
+
parent_folder_rid: Parent folder RID (e.g., ri.compass.main.folder.xxx)
|
|
33
|
+
preview: Enable preview mode (default: False)
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
Model information dictionary containing:
|
|
37
|
+
- rid: Model resource identifier
|
|
38
|
+
- name: Model name
|
|
39
|
+
- parentFolderRid: Parent folder RID
|
|
40
|
+
|
|
41
|
+
Raises:
|
|
42
|
+
RuntimeError: If the operation fails
|
|
43
|
+
|
|
44
|
+
Example:
|
|
45
|
+
>>> service = ModelsService()
|
|
46
|
+
>>> model = service.create_model(
|
|
47
|
+
... name="fraud-detector",
|
|
48
|
+
... parent_folder_rid="ri.compass.main.folder.xxx"
|
|
49
|
+
... )
|
|
50
|
+
"""
|
|
51
|
+
try:
|
|
52
|
+
model = self.service.Model.create(
|
|
53
|
+
name=name,
|
|
54
|
+
parent_folder_rid=parent_folder_rid,
|
|
55
|
+
preview=preview,
|
|
56
|
+
)
|
|
57
|
+
return self._serialize_response(model)
|
|
58
|
+
except Exception as e:
|
|
59
|
+
raise RuntimeError(f"Failed to create model '{name}': {e}")
|
|
60
|
+
|
|
61
|
+
def get_model(
|
|
62
|
+
self,
|
|
63
|
+
model_rid: str,
|
|
64
|
+
preview: bool = False,
|
|
65
|
+
) -> Dict[str, Any]:
|
|
66
|
+
"""
|
|
67
|
+
Get information about a model.
|
|
68
|
+
|
|
69
|
+
Args:
|
|
70
|
+
model_rid: Model RID (e.g., ri.foundry.main.model.xxx)
|
|
71
|
+
preview: Enable preview mode (default: False)
|
|
72
|
+
|
|
73
|
+
Returns:
|
|
74
|
+
Model information dictionary
|
|
75
|
+
|
|
76
|
+
Raises:
|
|
77
|
+
RuntimeError: If the operation fails
|
|
78
|
+
|
|
79
|
+
Example:
|
|
80
|
+
>>> service = ModelsService()
|
|
81
|
+
>>> model = service.get_model(
|
|
82
|
+
... model_rid="ri.foundry.main.model.abc123"
|
|
83
|
+
... )
|
|
84
|
+
"""
|
|
85
|
+
try:
|
|
86
|
+
model = self.service.Model.get(
|
|
87
|
+
model_rid=model_rid,
|
|
88
|
+
preview=preview,
|
|
89
|
+
)
|
|
90
|
+
return self._serialize_response(model)
|
|
91
|
+
except Exception as e:
|
|
92
|
+
raise RuntimeError(f"Failed to get model '{model_rid}': {e}")
|
|
93
|
+
|
|
94
|
+
# ===== ModelVersion Operations =====
|
|
95
|
+
|
|
96
|
+
def get_model_version(
|
|
97
|
+
self,
|
|
98
|
+
model_rid: str,
|
|
99
|
+
model_version_rid: str,
|
|
100
|
+
preview: bool = False,
|
|
101
|
+
) -> Dict[str, Any]:
|
|
102
|
+
"""
|
|
103
|
+
Get information about a specific model version.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
model_rid: Model RID (e.g., ri.foundry.main.model.xxx)
|
|
107
|
+
model_version_rid: Version identifier (e.g., v1.0.0 or version RID)
|
|
108
|
+
preview: Enable preview mode (default: False)
|
|
109
|
+
|
|
110
|
+
Returns:
|
|
111
|
+
Model version information dictionary
|
|
112
|
+
|
|
113
|
+
Raises:
|
|
114
|
+
RuntimeError: If the operation fails
|
|
115
|
+
|
|
116
|
+
Example:
|
|
117
|
+
>>> service = ModelsService()
|
|
118
|
+
>>> version = service.get_model_version(
|
|
119
|
+
... model_rid="ri.foundry.main.model.abc123",
|
|
120
|
+
... model_version_rid="v1.0.0"
|
|
121
|
+
... )
|
|
122
|
+
"""
|
|
123
|
+
try:
|
|
124
|
+
version = self.service.ModelVersion.get(
|
|
125
|
+
model_rid=model_rid,
|
|
126
|
+
model_version_rid=model_version_rid,
|
|
127
|
+
preview=preview,
|
|
128
|
+
)
|
|
129
|
+
return self._serialize_response(version)
|
|
130
|
+
except Exception as e:
|
|
131
|
+
raise RuntimeError(
|
|
132
|
+
f"Failed to get model version '{model_version_rid}' for model '{model_rid}': {e}"
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
def list_model_versions(
|
|
136
|
+
self,
|
|
137
|
+
model_rid: str,
|
|
138
|
+
page_size: Optional[int] = None,
|
|
139
|
+
page_token: Optional[str] = None,
|
|
140
|
+
preview: bool = False,
|
|
141
|
+
) -> Dict[str, Any]:
|
|
142
|
+
"""
|
|
143
|
+
List all versions of a model with pagination support.
|
|
144
|
+
|
|
145
|
+
Args:
|
|
146
|
+
model_rid: Model RID (e.g., ri.foundry.main.model.xxx)
|
|
147
|
+
page_size: Maximum number of versions to return per page
|
|
148
|
+
page_token: Token for fetching next page of results
|
|
149
|
+
preview: Enable preview mode (default: False)
|
|
150
|
+
|
|
151
|
+
Returns:
|
|
152
|
+
Dictionary containing:
|
|
153
|
+
- data: List of model version information dictionaries
|
|
154
|
+
- nextPageToken: Token for next page (if available)
|
|
155
|
+
|
|
156
|
+
Raises:
|
|
157
|
+
RuntimeError: If the operation fails
|
|
158
|
+
|
|
159
|
+
Example:
|
|
160
|
+
>>> service = ModelsService()
|
|
161
|
+
>>> result = service.list_model_versions(
|
|
162
|
+
... model_rid="ri.foundry.main.model.abc123",
|
|
163
|
+
... page_size=50
|
|
164
|
+
... )
|
|
165
|
+
>>> versions = result['data']
|
|
166
|
+
>>> next_token = result.get('nextPageToken')
|
|
167
|
+
"""
|
|
168
|
+
try:
|
|
169
|
+
response = self.service.ModelVersion.list(
|
|
170
|
+
model_rid=model_rid,
|
|
171
|
+
page_size=page_size,
|
|
172
|
+
page_token=page_token,
|
|
173
|
+
preview=preview,
|
|
174
|
+
)
|
|
175
|
+
return self._serialize_response(response)
|
|
176
|
+
except Exception as e:
|
|
177
|
+
raise RuntimeError(
|
|
178
|
+
f"Failed to list model versions for model '{model_rid}': {e}"
|
|
179
|
+
)
|