pragmatiks-gcp-provider 0.64.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gcp_provider/__init__.py +25 -0
- gcp_provider/py.typed +0 -0
- gcp_provider/resources/__init__.py +24 -0
- gcp_provider/resources/gke.py +435 -0
- gcp_provider/resources/secret.py +172 -0
- pragmatiks_gcp_provider-0.64.0.dist-info/METADATA +98 -0
- pragmatiks_gcp_provider-0.64.0.dist-info/RECORD +8 -0
- pragmatiks_gcp_provider-0.64.0.dist-info/WHEEL +4 -0
gcp_provider/__init__.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
"""GCP provider for Pragmatiks.
|
|
2
|
+
|
|
3
|
+
Provides GCP resources for managing infrastructure in Google Cloud Platform
|
|
4
|
+
using user-provided credentials (multi-tenant SaaS pattern).
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from pragma_sdk import Provider
|
|
8
|
+
|
|
9
|
+
from gcp_provider.resources import GKE, GKEConfig, GKEOutputs, Secret, SecretConfig, SecretOutputs
|
|
10
|
+
|
|
11
|
+
gcp = Provider(name="gcp")
|
|
12
|
+
|
|
13
|
+
# Register resources
|
|
14
|
+
gcp.resource("gke")(GKE)
|
|
15
|
+
gcp.resource("secret")(Secret)
|
|
16
|
+
|
|
17
|
+
__all__ = [
|
|
18
|
+
"gcp",
|
|
19
|
+
"GKE",
|
|
20
|
+
"GKEConfig",
|
|
21
|
+
"GKEOutputs",
|
|
22
|
+
"Secret",
|
|
23
|
+
"SecretConfig",
|
|
24
|
+
"SecretOutputs",
|
|
25
|
+
]
|
gcp_provider/py.typed
ADDED
|
File without changes
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
"""Resource definitions for gcp provider.
|
|
2
|
+
|
|
3
|
+
Import and export your Resource classes here for discovery by the runtime.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from gcp_provider.resources.gke import (
|
|
7
|
+
GKE,
|
|
8
|
+
GKEConfig,
|
|
9
|
+
GKEOutputs,
|
|
10
|
+
)
|
|
11
|
+
from gcp_provider.resources.secret import (
|
|
12
|
+
Secret,
|
|
13
|
+
SecretConfig,
|
|
14
|
+
SecretOutputs,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
__all__ = [
|
|
18
|
+
"GKE",
|
|
19
|
+
"GKEConfig",
|
|
20
|
+
"GKEOutputs",
|
|
21
|
+
"Secret",
|
|
22
|
+
"SecretConfig",
|
|
23
|
+
"SecretOutputs",
|
|
24
|
+
]
|
|
@@ -0,0 +1,435 @@
|
|
|
1
|
+
"""GCP GKE cluster resource supporting both Autopilot and Standard modes."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import json
|
|
7
|
+
import re
|
|
8
|
+
from collections.abc import AsyncIterator
|
|
9
|
+
from datetime import datetime
|
|
10
|
+
from typing import Annotated, Any, ClassVar, Literal, Self
|
|
11
|
+
|
|
12
|
+
from google.api_core.exceptions import AlreadyExists, NotFound
|
|
13
|
+
from google.cloud.container_v1 import ClusterManagerAsyncClient
|
|
14
|
+
from google.cloud.container_v1.types import (
|
|
15
|
+
Cluster,
|
|
16
|
+
CreateClusterRequest,
|
|
17
|
+
DeleteClusterRequest,
|
|
18
|
+
GetClusterRequest,
|
|
19
|
+
NodeConfig,
|
|
20
|
+
NodePool,
|
|
21
|
+
)
|
|
22
|
+
from google.cloud.logging_v2 import Client as LoggingClient
|
|
23
|
+
from google.oauth2 import service_account
|
|
24
|
+
from pydantic import Field, field_validator, model_validator
|
|
25
|
+
from pragma_sdk import Config, HealthStatus, LogEntry, Outputs, Resource
|
|
26
|
+
|
|
27
|
+
_CLUSTER_NAME_PATTERN = re.compile(r"^[a-z][a-z0-9-]{0,38}[a-z0-9]$|^[a-z]$")
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class GKEConfig(Config):
|
|
31
|
+
"""Configuration for a GKE cluster.
|
|
32
|
+
|
|
33
|
+
Attributes:
|
|
34
|
+
project_id: GCP project ID where the cluster will be created.
|
|
35
|
+
credentials: GCP service account credentials JSON object or string.
|
|
36
|
+
location: GCP location - either a region (e.g., europe-west4) for regional
|
|
37
|
+
clusters or a zone (e.g., europe-west4-a) for zonal clusters.
|
|
38
|
+
name: Name of the GKE cluster.
|
|
39
|
+
autopilot: Whether to create an Autopilot cluster. Defaults to True.
|
|
40
|
+
network: VPC network name. Defaults to "default".
|
|
41
|
+
subnetwork: VPC subnetwork name. If not specified, uses network default.
|
|
42
|
+
release_channel: Release channel for cluster updates.
|
|
43
|
+
initial_node_count: Number of nodes in default pool (standard clusters only).
|
|
44
|
+
machine_type: Machine type for nodes (standard clusters only).
|
|
45
|
+
disk_size_gb: Boot disk size in GB (standard clusters only).
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
project_id: str
|
|
49
|
+
credentials: dict[str, Any] | str
|
|
50
|
+
location: str
|
|
51
|
+
name: str
|
|
52
|
+
autopilot: bool = True
|
|
53
|
+
network: str = "default"
|
|
54
|
+
subnetwork: str | None = None
|
|
55
|
+
release_channel: Literal["RAPID", "REGULAR", "STABLE"] = "REGULAR"
|
|
56
|
+
initial_node_count: Annotated[int, Field(ge=1)] = 1
|
|
57
|
+
machine_type: str = "e2-medium"
|
|
58
|
+
disk_size_gb: Annotated[int, Field(ge=10)] = 100
|
|
59
|
+
|
|
60
|
+
@field_validator("name")
|
|
61
|
+
@classmethod
|
|
62
|
+
def validate_cluster_name(cls, v: str) -> str:
|
|
63
|
+
"""Validate cluster name follows GCP naming rules."""
|
|
64
|
+
if not _CLUSTER_NAME_PATTERN.match(v):
|
|
65
|
+
msg = (
|
|
66
|
+
"Cluster name must start with a lowercase letter, contain only "
|
|
67
|
+
"lowercase letters, numbers, and hyphens, and be 1-40 characters"
|
|
68
|
+
)
|
|
69
|
+
raise ValueError(msg)
|
|
70
|
+
return v
|
|
71
|
+
|
|
72
|
+
@model_validator(mode="after")
|
|
73
|
+
def validate_standard_cluster_config(self) -> Self:
|
|
74
|
+
"""Validate node configuration for standard clusters."""
|
|
75
|
+
if not self.autopilot and self.initial_node_count < 1:
|
|
76
|
+
msg = "Standard clusters require initial_node_count >= 1"
|
|
77
|
+
raise ValueError(msg)
|
|
78
|
+
return self
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
class GKEOutputs(Outputs):
|
|
82
|
+
"""Outputs from GKE cluster creation.
|
|
83
|
+
|
|
84
|
+
Attributes:
|
|
85
|
+
name: Cluster name.
|
|
86
|
+
endpoint: Cluster API server endpoint URL.
|
|
87
|
+
cluster_ca_certificate: Base64-encoded cluster CA certificate.
|
|
88
|
+
location: Cluster location (region or zone).
|
|
89
|
+
status: Cluster status (RUNNING, PROVISIONING, etc.).
|
|
90
|
+
console_url: URL to view cluster in GCP Console.
|
|
91
|
+
logs_url: URL to view cluster logs in Cloud Logging.
|
|
92
|
+
"""
|
|
93
|
+
|
|
94
|
+
name: str
|
|
95
|
+
endpoint: str
|
|
96
|
+
cluster_ca_certificate: str
|
|
97
|
+
location: str
|
|
98
|
+
status: str
|
|
99
|
+
console_url: str
|
|
100
|
+
logs_url: str
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
# Polling configuration for cluster operations
|
|
104
|
+
_POLL_INTERVAL_SECONDS = 30
|
|
105
|
+
_MAX_POLL_ATTEMPTS = 40 # 40 * 30s = 20 minutes max wait
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
class GKE(Resource[GKEConfig, GKEOutputs]):
|
|
109
|
+
"""GCP GKE cluster resource supporting Autopilot and Standard modes.
|
|
110
|
+
|
|
111
|
+
Creates and manages GKE clusters using user-provided service account
|
|
112
|
+
credentials (multi-tenant SaaS pattern).
|
|
113
|
+
|
|
114
|
+
Modes:
|
|
115
|
+
- Autopilot (default): Fully managed, no node configuration needed
|
|
116
|
+
- Standard: Manual node pool with configurable machine type and count
|
|
117
|
+
|
|
118
|
+
Lifecycle:
|
|
119
|
+
- on_create: Creates cluster, waits for RUNNING state
|
|
120
|
+
- on_update: Limited updates (some require recreation)
|
|
121
|
+
- on_delete: Deletes cluster, waits for completion
|
|
122
|
+
"""
|
|
123
|
+
|
|
124
|
+
provider: ClassVar[str] = "gcp"
|
|
125
|
+
resource: ClassVar[str] = "gke"
|
|
126
|
+
|
|
127
|
+
def _get_client(self) -> ClusterManagerAsyncClient:
|
|
128
|
+
"""Get Cluster Manager async client with user-provided credentials.
|
|
129
|
+
|
|
130
|
+
Returns:
|
|
131
|
+
Configured Cluster Manager async client.
|
|
132
|
+
|
|
133
|
+
Raises:
|
|
134
|
+
ValueError: If credentials format is invalid.
|
|
135
|
+
"""
|
|
136
|
+
creds_data = self.config.credentials
|
|
137
|
+
|
|
138
|
+
if isinstance(creds_data, str):
|
|
139
|
+
creds_data = json.loads(creds_data)
|
|
140
|
+
|
|
141
|
+
credentials = service_account.Credentials.from_service_account_info(creds_data)
|
|
142
|
+
return ClusterManagerAsyncClient(credentials=credentials)
|
|
143
|
+
|
|
144
|
+
def _cluster_path(self) -> str:
|
|
145
|
+
"""Build cluster resource path.
|
|
146
|
+
|
|
147
|
+
Returns:
|
|
148
|
+
Full GCP resource path for this cluster.
|
|
149
|
+
"""
|
|
150
|
+
return f"projects/{self.config.project_id}/locations/{self.config.location}/clusters/{self.config.name}"
|
|
151
|
+
|
|
152
|
+
def _parent_path(self) -> str:
|
|
153
|
+
"""Build parent resource path for cluster creation.
|
|
154
|
+
|
|
155
|
+
Returns:
|
|
156
|
+
Parent path (project/location).
|
|
157
|
+
"""
|
|
158
|
+
return f"projects/{self.config.project_id}/locations/{self.config.location}"
|
|
159
|
+
|
|
160
|
+
def _build_outputs(self, cluster: Cluster) -> GKEOutputs:
|
|
161
|
+
"""Build outputs from cluster object."""
|
|
162
|
+
project = self.config.project_id
|
|
163
|
+
location = self.config.location
|
|
164
|
+
name = self.config.name
|
|
165
|
+
|
|
166
|
+
console_url = (
|
|
167
|
+
f"https://console.cloud.google.com/kubernetes/clusters/details/"
|
|
168
|
+
f"{location}/{name}/details?project={project}"
|
|
169
|
+
)
|
|
170
|
+
logs_url = (
|
|
171
|
+
f"https://console.cloud.google.com/logs/query;query="
|
|
172
|
+
f"resource.type%3D%22k8s_cluster%22%0A"
|
|
173
|
+
f"resource.labels.cluster_name%3D%22{name}%22%0A"
|
|
174
|
+
f"resource.labels.location%3D%22{location}%22"
|
|
175
|
+
f"?project={project}"
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
return GKEOutputs(
|
|
179
|
+
name=cluster.name,
|
|
180
|
+
endpoint=cluster.endpoint,
|
|
181
|
+
cluster_ca_certificate=cluster.master_auth.cluster_ca_certificate,
|
|
182
|
+
location=cluster.location,
|
|
183
|
+
status=Cluster.Status(cluster.status).name,
|
|
184
|
+
console_url=console_url,
|
|
185
|
+
logs_url=logs_url,
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
async def _wait_for_running(self, client: ClusterManagerAsyncClient) -> Cluster:
|
|
189
|
+
"""Poll cluster until it reaches RUNNING state.
|
|
190
|
+
|
|
191
|
+
Args:
|
|
192
|
+
client: Cluster Manager client.
|
|
193
|
+
|
|
194
|
+
Returns:
|
|
195
|
+
Cluster in RUNNING state.
|
|
196
|
+
|
|
197
|
+
Raises:
|
|
198
|
+
TimeoutError: If cluster doesn't reach RUNNING in time.
|
|
199
|
+
RuntimeError: If cluster enters ERROR state.
|
|
200
|
+
"""
|
|
201
|
+
for _ in range(_MAX_POLL_ATTEMPTS):
|
|
202
|
+
cluster = await client.get_cluster(request=GetClusterRequest(name=self._cluster_path()))
|
|
203
|
+
|
|
204
|
+
if cluster.status == Cluster.Status.RUNNING:
|
|
205
|
+
return cluster
|
|
206
|
+
|
|
207
|
+
if cluster.status == Cluster.Status.ERROR:
|
|
208
|
+
msg = f"Cluster entered ERROR state: {cluster.status_message}"
|
|
209
|
+
raise RuntimeError(msg)
|
|
210
|
+
|
|
211
|
+
if cluster.status in (
|
|
212
|
+
Cluster.Status.STOPPING,
|
|
213
|
+
Cluster.Status.DEGRADED,
|
|
214
|
+
):
|
|
215
|
+
msg = f"Cluster in unexpected state: {cluster.status.name}"
|
|
216
|
+
raise RuntimeError(msg)
|
|
217
|
+
|
|
218
|
+
await asyncio.sleep(_POLL_INTERVAL_SECONDS)
|
|
219
|
+
|
|
220
|
+
msg = f"Cluster did not reach RUNNING state within {_MAX_POLL_ATTEMPTS * _POLL_INTERVAL_SECONDS} seconds"
|
|
221
|
+
raise TimeoutError(msg)
|
|
222
|
+
|
|
223
|
+
async def _wait_for_deletion(self, client: ClusterManagerAsyncClient) -> None:
|
|
224
|
+
"""Poll until cluster is deleted.
|
|
225
|
+
|
|
226
|
+
Args:
|
|
227
|
+
client: Cluster Manager client.
|
|
228
|
+
|
|
229
|
+
Raises:
|
|
230
|
+
TimeoutError: If cluster doesn't delete in time.
|
|
231
|
+
"""
|
|
232
|
+
for _ in range(_MAX_POLL_ATTEMPTS):
|
|
233
|
+
try:
|
|
234
|
+
await client.get_cluster(request=GetClusterRequest(name=self._cluster_path()))
|
|
235
|
+
await asyncio.sleep(_POLL_INTERVAL_SECONDS)
|
|
236
|
+
except NotFound:
|
|
237
|
+
return
|
|
238
|
+
|
|
239
|
+
msg = f"Cluster was not deleted within {_MAX_POLL_ATTEMPTS * _POLL_INTERVAL_SECONDS} seconds"
|
|
240
|
+
raise TimeoutError(msg)
|
|
241
|
+
|
|
242
|
+
def _build_cluster_config(self) -> Cluster:
|
|
243
|
+
"""Build cluster configuration object.
|
|
244
|
+
|
|
245
|
+
Returns:
|
|
246
|
+
Cluster configuration for create request.
|
|
247
|
+
"""
|
|
248
|
+
cluster = Cluster(
|
|
249
|
+
name=self.config.name,
|
|
250
|
+
network=self.config.network,
|
|
251
|
+
release_channel={"channel": self.config.release_channel},
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
if self.config.subnetwork:
|
|
255
|
+
cluster.subnetwork = self.config.subnetwork
|
|
256
|
+
|
|
257
|
+
if self.config.autopilot:
|
|
258
|
+
cluster.autopilot = {"enabled": True}
|
|
259
|
+
else:
|
|
260
|
+
cluster.node_pools = [
|
|
261
|
+
NodePool(
|
|
262
|
+
name="default-pool",
|
|
263
|
+
initial_node_count=self.config.initial_node_count,
|
|
264
|
+
config=NodeConfig(
|
|
265
|
+
machine_type=self.config.machine_type,
|
|
266
|
+
disk_size_gb=self.config.disk_size_gb,
|
|
267
|
+
),
|
|
268
|
+
)
|
|
269
|
+
]
|
|
270
|
+
|
|
271
|
+
return cluster
|
|
272
|
+
|
|
273
|
+
async def on_create(self) -> GKEOutputs:
|
|
274
|
+
"""Create GKE cluster and wait for RUNNING state.
|
|
275
|
+
|
|
276
|
+
Idempotent: If cluster already exists, returns its current state.
|
|
277
|
+
|
|
278
|
+
Returns:
|
|
279
|
+
GKEOutputs with cluster details.
|
|
280
|
+
"""
|
|
281
|
+
client = self._get_client()
|
|
282
|
+
|
|
283
|
+
try:
|
|
284
|
+
await client.create_cluster(
|
|
285
|
+
request=CreateClusterRequest(
|
|
286
|
+
parent=self._parent_path(),
|
|
287
|
+
cluster=self._build_cluster_config(),
|
|
288
|
+
)
|
|
289
|
+
)
|
|
290
|
+
except AlreadyExists:
|
|
291
|
+
pass
|
|
292
|
+
|
|
293
|
+
cluster = await self._wait_for_running(client)
|
|
294
|
+
|
|
295
|
+
return self._build_outputs(cluster)
|
|
296
|
+
|
|
297
|
+
async def on_update(self, previous_config: GKEConfig) -> GKEOutputs:
|
|
298
|
+
"""Update cluster configuration.
|
|
299
|
+
|
|
300
|
+
Most GKE cluster properties require recreation. This method validates
|
|
301
|
+
that immutable fields haven't changed and returns current state.
|
|
302
|
+
|
|
303
|
+
Args:
|
|
304
|
+
previous_config: The previous configuration before update.
|
|
305
|
+
|
|
306
|
+
Returns:
|
|
307
|
+
GKEOutputs with current cluster state.
|
|
308
|
+
|
|
309
|
+
Raises:
|
|
310
|
+
ValueError: If immutable fields changed (requires delete + create).
|
|
311
|
+
"""
|
|
312
|
+
if previous_config.project_id != self.config.project_id:
|
|
313
|
+
msg = "Cannot change project_id; delete and recreate resource"
|
|
314
|
+
raise ValueError(msg)
|
|
315
|
+
|
|
316
|
+
if previous_config.location != self.config.location:
|
|
317
|
+
msg = "Cannot change location; delete and recreate resource"
|
|
318
|
+
raise ValueError(msg)
|
|
319
|
+
|
|
320
|
+
if previous_config.name != self.config.name:
|
|
321
|
+
msg = "Cannot change name; delete and recreate resource"
|
|
322
|
+
raise ValueError(msg)
|
|
323
|
+
|
|
324
|
+
if previous_config.network != self.config.network:
|
|
325
|
+
msg = "Cannot change network; delete and recreate resource"
|
|
326
|
+
raise ValueError(msg)
|
|
327
|
+
|
|
328
|
+
if previous_config.autopilot != self.config.autopilot:
|
|
329
|
+
msg = "Cannot change autopilot mode; delete and recreate resource"
|
|
330
|
+
raise ValueError(msg)
|
|
331
|
+
|
|
332
|
+
# For unchanged configs, return existing outputs if available
|
|
333
|
+
if self.outputs is not None:
|
|
334
|
+
return self.outputs
|
|
335
|
+
|
|
336
|
+
# Fetch current cluster state
|
|
337
|
+
client = self._get_client()
|
|
338
|
+
cluster = await client.get_cluster(request=GetClusterRequest(name=self._cluster_path()))
|
|
339
|
+
|
|
340
|
+
return self._build_outputs(cluster)
|
|
341
|
+
|
|
342
|
+
async def on_delete(self) -> None:
|
|
343
|
+
"""Delete cluster and wait for completion.
|
|
344
|
+
|
|
345
|
+
Idempotent: Succeeds if cluster doesn't exist.
|
|
346
|
+
"""
|
|
347
|
+
client = self._get_client()
|
|
348
|
+
|
|
349
|
+
try:
|
|
350
|
+
await client.delete_cluster(request=DeleteClusterRequest(name=self._cluster_path()))
|
|
351
|
+
await self._wait_for_deletion(client)
|
|
352
|
+
except NotFound:
|
|
353
|
+
pass
|
|
354
|
+
|
|
355
|
+
async def health(self) -> HealthStatus:
|
|
356
|
+
"""Check cluster health by querying cluster status."""
|
|
357
|
+
client = self._get_client()
|
|
358
|
+
|
|
359
|
+
try:
|
|
360
|
+
cluster = await client.get_cluster(
|
|
361
|
+
request=GetClusterRequest(name=self._cluster_path())
|
|
362
|
+
)
|
|
363
|
+
except NotFound:
|
|
364
|
+
return HealthStatus(
|
|
365
|
+
status="unhealthy",
|
|
366
|
+
message="Cluster not found",
|
|
367
|
+
)
|
|
368
|
+
|
|
369
|
+
status = Cluster.Status(cluster.status)
|
|
370
|
+
|
|
371
|
+
if status == Cluster.Status.RUNNING:
|
|
372
|
+
return HealthStatus(
|
|
373
|
+
status="healthy",
|
|
374
|
+
message="Cluster is running",
|
|
375
|
+
details={"node_count": sum(np.initial_node_count for np in cluster.node_pools)},
|
|
376
|
+
)
|
|
377
|
+
|
|
378
|
+
if status in (Cluster.Status.PROVISIONING, Cluster.Status.RECONCILING):
|
|
379
|
+
return HealthStatus(
|
|
380
|
+
status="degraded",
|
|
381
|
+
message=f"Cluster is {status.name.lower()}",
|
|
382
|
+
)
|
|
383
|
+
|
|
384
|
+
return HealthStatus(
|
|
385
|
+
status="unhealthy",
|
|
386
|
+
message=f"Cluster status: {status.name}",
|
|
387
|
+
details={"status_message": cluster.status_message} if cluster.status_message else None,
|
|
388
|
+
)
|
|
389
|
+
|
|
390
|
+
async def logs(
|
|
391
|
+
self,
|
|
392
|
+
since: datetime | None = None,
|
|
393
|
+
tail: int = 100,
|
|
394
|
+
) -> AsyncIterator[LogEntry]:
|
|
395
|
+
"""Fetch cluster logs from Cloud Logging."""
|
|
396
|
+
creds_data = self.config.credentials
|
|
397
|
+
if isinstance(creds_data, str):
|
|
398
|
+
creds_data = json.loads(creds_data)
|
|
399
|
+
|
|
400
|
+
credentials = service_account.Credentials.from_service_account_info(creds_data)
|
|
401
|
+
logging_client = LoggingClient(credentials=credentials, project=self.config.project_id)
|
|
402
|
+
|
|
403
|
+
filter_parts = [
|
|
404
|
+
'resource.type="k8s_cluster"',
|
|
405
|
+
f'resource.labels.cluster_name="{self.config.name}"',
|
|
406
|
+
f'resource.labels.location="{self.config.location}"',
|
|
407
|
+
]
|
|
408
|
+
if since:
|
|
409
|
+
filter_parts.append(f'timestamp>="{since.isoformat()}Z"')
|
|
410
|
+
|
|
411
|
+
filter_str = " AND ".join(filter_parts)
|
|
412
|
+
|
|
413
|
+
entries = logging_client.list_entries(
|
|
414
|
+
filter_=filter_str,
|
|
415
|
+
order_by="timestamp desc",
|
|
416
|
+
max_results=tail,
|
|
417
|
+
)
|
|
418
|
+
|
|
419
|
+
for entry in entries:
|
|
420
|
+
level = "info"
|
|
421
|
+
if hasattr(entry, "severity"):
|
|
422
|
+
severity = str(entry.severity).lower()
|
|
423
|
+
if "error" in severity or "critical" in severity:
|
|
424
|
+
level = "error"
|
|
425
|
+
elif "warn" in severity:
|
|
426
|
+
level = "warn"
|
|
427
|
+
elif "debug" in severity:
|
|
428
|
+
level = "debug"
|
|
429
|
+
|
|
430
|
+
yield LogEntry(
|
|
431
|
+
timestamp=entry.timestamp,
|
|
432
|
+
level=level,
|
|
433
|
+
message=str(entry.payload) if entry.payload else "",
|
|
434
|
+
metadata={"log_name": entry.log_name} if entry.log_name else None,
|
|
435
|
+
)
|
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
"""GCP Secret Manager resource."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
from typing import Any, ClassVar
|
|
7
|
+
|
|
8
|
+
from google.api_core.exceptions import AlreadyExists, NotFound
|
|
9
|
+
from google.cloud.secretmanager_v1 import SecretManagerServiceAsyncClient
|
|
10
|
+
from google.oauth2 import service_account
|
|
11
|
+
from pragma_sdk import Config, Outputs, Resource
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class SecretConfig(Config):
|
|
15
|
+
"""Configuration for a GCP Secret Manager secret.
|
|
16
|
+
|
|
17
|
+
Attributes:
|
|
18
|
+
project_id: GCP project ID where the secret will be created.
|
|
19
|
+
secret_id: Identifier for the secret within GCP (must be unique per project).
|
|
20
|
+
data: Secret payload data to store.
|
|
21
|
+
credentials: GCP service account credentials JSON object or string.
|
|
22
|
+
Required for multi-tenant SaaS - no ADC fallback.
|
|
23
|
+
Use a pragma/secret resource with a FieldReference to provide credentials.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
project_id: str
|
|
27
|
+
secret_id: str
|
|
28
|
+
data: str
|
|
29
|
+
credentials: dict[str, Any] | str
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class SecretOutputs(Outputs):
|
|
33
|
+
"""Outputs from GCP Secret Manager secret creation.
|
|
34
|
+
|
|
35
|
+
Attributes:
|
|
36
|
+
resource_name: Full GCP resource name (projects/{project}/secrets/{id}).
|
|
37
|
+
version_name: Full version resource name including version number.
|
|
38
|
+
version_id: The version number as a string.
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
resource_name: str
|
|
42
|
+
version_name: str
|
|
43
|
+
version_id: str
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class Secret(Resource[SecretConfig, SecretOutputs]):
|
|
47
|
+
"""GCP Secret Manager secret resource.
|
|
48
|
+
|
|
49
|
+
Creates and manages secrets in GCP Secret Manager using user-provided
|
|
50
|
+
service account credentials (multi-tenant SaaS pattern).
|
|
51
|
+
|
|
52
|
+
Lifecycle:
|
|
53
|
+
- on_create: Creates secret and initial version
|
|
54
|
+
- on_update: Creates new version if data changed
|
|
55
|
+
- on_delete: Deletes secret and all versions
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
provider: ClassVar[str] = "gcp"
|
|
59
|
+
resource: ClassVar[str] = "secret"
|
|
60
|
+
|
|
61
|
+
def _get_client(self) -> SecretManagerServiceAsyncClient:
|
|
62
|
+
"""Get Secret Manager async client with user-provided credentials.
|
|
63
|
+
|
|
64
|
+
Creates a client authenticated with the user's GCP service account
|
|
65
|
+
credentials rather than using ADC/Workload Identity. This is required
|
|
66
|
+
for multi-tenant SaaS where each user operates in their own GCP project.
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
Configured Secret Manager async client using user's credentials.
|
|
70
|
+
|
|
71
|
+
Raises:
|
|
72
|
+
ValueError: If credentials format is invalid.
|
|
73
|
+
"""
|
|
74
|
+
creds_data = self.config.credentials
|
|
75
|
+
|
|
76
|
+
if isinstance(creds_data, str):
|
|
77
|
+
creds_data = json.loads(creds_data)
|
|
78
|
+
|
|
79
|
+
credentials = service_account.Credentials.from_service_account_info(creds_data)
|
|
80
|
+
return SecretManagerServiceAsyncClient(credentials=credentials)
|
|
81
|
+
|
|
82
|
+
def _secret_path(self) -> str:
|
|
83
|
+
"""Build secret resource path.
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
Full GCP resource path for this secret.
|
|
87
|
+
"""
|
|
88
|
+
return f"projects/{self.config.project_id}/secrets/{self.config.secret_id}"
|
|
89
|
+
|
|
90
|
+
async def on_create(self) -> SecretOutputs:
|
|
91
|
+
"""Create GCP secret with initial version.
|
|
92
|
+
|
|
93
|
+
Idempotent: If secret already exists, adds a new version.
|
|
94
|
+
|
|
95
|
+
Returns:
|
|
96
|
+
SecretOutputs with resource name and version info.
|
|
97
|
+
"""
|
|
98
|
+
client = self._get_client()
|
|
99
|
+
parent = f"projects/{self.config.project_id}"
|
|
100
|
+
|
|
101
|
+
try:
|
|
102
|
+
secret = await client.create_secret(
|
|
103
|
+
request={
|
|
104
|
+
"parent": parent,
|
|
105
|
+
"secret_id": self.config.secret_id,
|
|
106
|
+
"secret": {"replication": {"automatic": {}}},
|
|
107
|
+
}
|
|
108
|
+
)
|
|
109
|
+
except AlreadyExists:
|
|
110
|
+
secret = await client.get_secret(name=self._secret_path())
|
|
111
|
+
|
|
112
|
+
version = await client.add_secret_version(
|
|
113
|
+
request={
|
|
114
|
+
"parent": secret.name,
|
|
115
|
+
"payload": {"data": self.config.data.encode("utf-8")},
|
|
116
|
+
}
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
return SecretOutputs(
|
|
120
|
+
resource_name=secret.name,
|
|
121
|
+
version_name=version.name,
|
|
122
|
+
version_id=version.name.split("/")[-1],
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
async def on_update(self, previous_config: SecretConfig) -> SecretOutputs:
|
|
126
|
+
"""Update secret by creating new version if data changed.
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
previous_config: The previous configuration before update.
|
|
130
|
+
|
|
131
|
+
Returns:
|
|
132
|
+
SecretOutputs with updated version info.
|
|
133
|
+
|
|
134
|
+
Raises:
|
|
135
|
+
ValueError: If project_id or secret_id changed (requires delete + create).
|
|
136
|
+
"""
|
|
137
|
+
if previous_config.project_id != self.config.project_id:
|
|
138
|
+
msg = "Cannot change project_id; delete and recreate resource"
|
|
139
|
+
raise ValueError(msg)
|
|
140
|
+
|
|
141
|
+
if previous_config.secret_id != self.config.secret_id:
|
|
142
|
+
msg = "Cannot change secret_id; delete and recreate resource"
|
|
143
|
+
raise ValueError(msg)
|
|
144
|
+
|
|
145
|
+
if previous_config.data == self.config.data and self.outputs is not None:
|
|
146
|
+
return self.outputs
|
|
147
|
+
|
|
148
|
+
client = self._get_client()
|
|
149
|
+
version = await client.add_secret_version(
|
|
150
|
+
request={
|
|
151
|
+
"parent": self._secret_path(),
|
|
152
|
+
"payload": {"data": self.config.data.encode("utf-8")},
|
|
153
|
+
}
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
return SecretOutputs(
|
|
157
|
+
resource_name=self._secret_path(),
|
|
158
|
+
version_name=version.name,
|
|
159
|
+
version_id=version.name.split("/")[-1],
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
async def on_delete(self) -> None:
|
|
163
|
+
"""Delete secret and all versions.
|
|
164
|
+
|
|
165
|
+
Idempotent: Succeeds if secret doesn't exist.
|
|
166
|
+
"""
|
|
167
|
+
client = self._get_client()
|
|
168
|
+
|
|
169
|
+
try:
|
|
170
|
+
await client.delete_secret(name=self._secret_path())
|
|
171
|
+
except NotFound:
|
|
172
|
+
pass
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
Metadata-Version: 2.3
|
|
2
|
+
Name: pragmatiks-gcp-provider
|
|
3
|
+
Version: 0.64.0
|
|
4
|
+
Summary: GCP provider for Pragmatiks
|
|
5
|
+
Author: Pragmatiks
|
|
6
|
+
Author-email: Pragmatiks <team@pragmatiks.io>
|
|
7
|
+
Requires-Dist: pragmatiks-sdk>=0.6.0
|
|
8
|
+
Requires-Dist: google-cloud-container>=2.50.0
|
|
9
|
+
Requires-Dist: google-cloud-logging>=3.10.0
|
|
10
|
+
Requires-Dist: google-cloud-secret-manager>=2.20.0
|
|
11
|
+
Requires-Python: >=3.13
|
|
12
|
+
Description-Content-Type: text/markdown
|
|
13
|
+
|
|
14
|
+
# GCP Provider
|
|
15
|
+
|
|
16
|
+
GCP provider for Pragmatiks - manage Google Cloud resources declaratively.
|
|
17
|
+
|
|
18
|
+
## Available Resources
|
|
19
|
+
|
|
20
|
+
### Secret (gcp/secret)
|
|
21
|
+
|
|
22
|
+
Manages secrets in GCP Secret Manager using user-provided service account credentials.
|
|
23
|
+
|
|
24
|
+
```python
|
|
25
|
+
from gcp_provider import Secret, SecretConfig
|
|
26
|
+
|
|
27
|
+
# Define a secret
|
|
28
|
+
secret = Secret(
|
|
29
|
+
name="my-api-key",
|
|
30
|
+
config=SecretConfig(
|
|
31
|
+
project_id="my-gcp-project",
|
|
32
|
+
secret_id="api-key",
|
|
33
|
+
data="super-secret-value",
|
|
34
|
+
credentials={"type": "service_account", ...}, # or JSON string
|
|
35
|
+
),
|
|
36
|
+
)
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
**Config:**
|
|
40
|
+
- `project_id` - GCP project ID where the secret will be created
|
|
41
|
+
- `secret_id` - Identifier for the secret (must be unique per project)
|
|
42
|
+
- `data` - Secret payload data to store
|
|
43
|
+
- `credentials` - GCP service account credentials (JSON object or string)
|
|
44
|
+
|
|
45
|
+
**Outputs:**
|
|
46
|
+
- `resource_name` - Full GCP resource name (`projects/{project}/secrets/{id}`)
|
|
47
|
+
- `version_name` - Full version resource name including version number
|
|
48
|
+
- `version_id` - The version number as a string
|
|
49
|
+
|
|
50
|
+
## Installation
|
|
51
|
+
|
|
52
|
+
```bash
|
|
53
|
+
pip install pragmatiks-gcp-provider
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
## Development
|
|
57
|
+
|
|
58
|
+
### Testing
|
|
59
|
+
|
|
60
|
+
```bash
|
|
61
|
+
# Install dependencies
|
|
62
|
+
uv sync --dev
|
|
63
|
+
|
|
64
|
+
# Run tests
|
|
65
|
+
uv run pytest tests/
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
### Writing Tests
|
|
69
|
+
|
|
70
|
+
Use `ProviderHarness` to test lifecycle methods:
|
|
71
|
+
|
|
72
|
+
```python
|
|
73
|
+
from pragma_sdk.provider import ProviderHarness
|
|
74
|
+
from gcp_provider import Secret, SecretConfig
|
|
75
|
+
|
|
76
|
+
async def test_create_secret():
|
|
77
|
+
harness = ProviderHarness()
|
|
78
|
+
result = await harness.invoke_create(
|
|
79
|
+
Secret,
|
|
80
|
+
name="test-secret",
|
|
81
|
+
config=SecretConfig(
|
|
82
|
+
project_id="test-project",
|
|
83
|
+
secret_id="my-secret",
|
|
84
|
+
data="secret-value",
|
|
85
|
+
credentials=mock_credentials,
|
|
86
|
+
),
|
|
87
|
+
)
|
|
88
|
+
assert result.success
|
|
89
|
+
assert result.outputs.resource_name is not None
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
## Deployment
|
|
93
|
+
|
|
94
|
+
Push your provider to Pragmatiks platform:
|
|
95
|
+
|
|
96
|
+
```bash
|
|
97
|
+
pragma provider push
|
|
98
|
+
```
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
gcp_provider/__init__.py,sha256=b8pjwnaNe5nJnNXp03jkENxsDXgID0rLheLF0JjgAD8,540
|
|
2
|
+
gcp_provider/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
|
+
gcp_provider/resources/__init__.py,sha256=ghouF-RCSEMHy6ZwJ29ZgG963unNLHc6z_1grlu-gAg,417
|
|
4
|
+
gcp_provider/resources/gke.py,sha256=KQdQ8Vmm24OuKl6gYuQri9vcJDx7aEG7lCMBhZihHQk,15248
|
|
5
|
+
gcp_provider/resources/secret.py,sha256=pZMMRRG_79VKh6sEA5i6FA5jnmy7-eKWNQWrR1u4-wc,5678
|
|
6
|
+
pragmatiks_gcp_provider-0.64.0.dist-info/WHEEL,sha256=e_m4S054HL0hyR3CpOk-b7Q7fDX6BuFkgL5OjAExXas,80
|
|
7
|
+
pragmatiks_gcp_provider-0.64.0.dist-info/METADATA,sha256=gkopH3XYTtmOhtSoio-SPcId5Gg0U3LAPIpwqtRrs1A,2317
|
|
8
|
+
pragmatiks_gcp_provider-0.64.0.dist-info/RECORD,,
|