peak-sdk 1.13.0__py3-none-any.whl → 1.15.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. peak/_metadata.py +4 -1
  2. peak/_version.py +2 -2
  3. peak/cli/press/apps/specs.py +14 -1
  4. peak/cli/press/blocks/specs.py +13 -1
  5. peak/cli/press/deployments.py +58 -2
  6. peak/cli/resources/alerts/emails.py +8 -4
  7. peak/cli/resources/artifacts.py +6 -0
  8. peak/handler.py +56 -16
  9. peak/press/apps.py +39 -1
  10. peak/press/blocks.py +39 -1
  11. peak/press/deployments.py +63 -1
  12. peak/resources/alerts.py +10 -2
  13. peak/resources/artifacts.py +39 -3
  14. peak/sample_yaml/press/apps/specs/create_app_spec.yaml +31 -0
  15. peak/sample_yaml/press/apps/specs/create_app_spec_release.yaml +31 -0
  16. peak/sample_yaml/press/blocks/specs/service/api/create_block_spec.yaml +31 -0
  17. peak/sample_yaml/press/blocks/specs/service/api/create_block_spec_release.yaml +31 -0
  18. peak/sample_yaml/press/blocks/specs/service/webapp/create_block_spec.yaml +31 -0
  19. peak/sample_yaml/press/blocks/specs/service/webapp/create_block_spec_release.yaml +31 -0
  20. peak/sample_yaml/press/blocks/specs/workflow/create_block_spec.yaml +31 -0
  21. peak/sample_yaml/press/blocks/specs/workflow/create_block_spec_release.yaml +31 -0
  22. peak/sample_yaml/press/deployments/patch_app_parameters_v2.yaml +15 -0
  23. peak/sample_yaml/press/deployments/patch_block_parameters_v2.yaml +10 -0
  24. peak/sample_yaml/resources/artifacts/create_artifact.yaml +3 -0
  25. peak/sample_yaml/resources/artifacts/create_artifact_version.yaml +2 -0
  26. peak/sample_yaml/resources/emails/send_email.yaml +3 -0
  27. peak/session.py +11 -7
  28. peak/telemetry.py +4 -1
  29. peak/tools/logging/logger.py +2 -2
  30. {peak_sdk-1.13.0.dist-info → peak_sdk-1.15.0.dist-info}/METADATA +6 -6
  31. {peak_sdk-1.13.0.dist-info → peak_sdk-1.15.0.dist-info}/RECORD +34 -32
  32. {peak_sdk-1.13.0.dist-info → peak_sdk-1.15.0.dist-info}/LICENSE +0 -0
  33. {peak_sdk-1.13.0.dist-info → peak_sdk-1.15.0.dist-info}/WHEEL +0 -0
  34. {peak_sdk-1.13.0.dist-info → peak_sdk-1.15.0.dist-info}/entry_points.txt +0 -0
peak/resources/alerts.py CHANGED
@@ -23,6 +23,7 @@
23
23
 
24
24
  from __future__ import annotations
25
25
 
26
+ import json
26
27
  from typing import Any, Dict, Iterator, List, Literal, Optional, overload
27
28
 
28
29
  from peak.base_client import BaseClient
@@ -126,6 +127,7 @@ class Alert(BaseClient):
126
127
  def send_email(
127
128
  self,
128
129
  body: Dict[str, Any],
130
+ attachments: Optional[list[str]] = None,
129
131
  ) -> Dict[str, Any]:
130
132
  """Send an email to the specified recipients using the specified template.
131
133
 
@@ -134,6 +136,8 @@ class Alert(BaseClient):
134
136
 
135
137
  Args:
136
138
  body (Dict[str, Any]): A dictionary containing the details of the email to send.
139
+ attachments (Optional[list[str]]): A list of file paths to attach to the email.
140
+ When a directory is provided, a zip is automatically created for the same.
137
141
 
138
142
  Returns:
139
143
  Dict[str, Any]: A dictionary containing the ID of the email sent.
@@ -161,11 +165,15 @@ class Alert(BaseClient):
161
165
  """
162
166
  method, endpoint = HttpMethods.POST, f"{self.BASE_ENDPOINT}/emails"
163
167
 
168
+ parsed_body = {k: json.dumps(v) if not isinstance(v, str) else v for (k, v) in body.items()}
169
+
164
170
  return self.session.create_request( # type: ignore[no-any-return]
165
171
  endpoint,
166
172
  method,
167
- body=body,
168
- content_type=ContentType.APPLICATION_JSON,
173
+ body=parsed_body,
174
+ content_type=ContentType.MULTIPART_FORM_DATA,
175
+ path=attachments,
176
+ file_key="attachments",
169
177
  )
170
178
 
171
179
  def describe_email(self, email_id: int) -> Dict[str, Any]:
@@ -19,12 +19,14 @@
19
19
  # # along with this program. If not, see <https://apache.org/licenses/LICENSE-2.0>
20
20
  #
21
21
  """Artifact client module."""
22
+
22
23
  from __future__ import annotations
23
24
 
24
25
  from typing import Any, Dict, Iterator, List, Literal, Optional, overload
25
26
 
26
27
  from peak.base_client import BaseClient
27
28
  from peak.constants import ArtifactInfo, ContentType, HttpMethods
29
+ from peak.helpers import parse_body_for_multipart_request
28
30
  from peak.session import Session
29
31
 
30
32
 
@@ -104,6 +106,9 @@ class Artifact(BaseClient):
104
106
  name: str,
105
107
  artifact: ArtifactInfo,
106
108
  description: Optional[str] = None,
109
+ source: Optional[str] = None,
110
+ scan: Optional[bool] = None,
111
+ validate: Optional[bool] = None,
107
112
  ) -> Dict[str, Any]:
108
113
  """Create a new artifact.
109
114
 
@@ -115,6 +120,9 @@ class Artifact(BaseClient):
115
120
  artifact (ArtifactInfo): Mapping of artifact attributes that specifies how the artifact will be generated,
116
121
  it accepts two keys `path`, which is required and `ignore_files` which is optional, and defaults to `.dockerignore`, it is strongly advised that users use `ignore_files` when generating artifacts to avoid copying any extra files in artifact.
117
122
  description (str | None): A brief description of the artifact.
123
+ source (str | None): The source of the artifact.
124
+ scan (bool | None): Whether to scan the artifact for vulnerabilities.
125
+ validate (bool | None): Whether to validate the artifact. Source needs to be provided for the validation to work.
118
126
 
119
127
  Returns:
120
128
  Dict[str, Any]: `Id` and `Version` of the created artifact.
@@ -127,13 +135,27 @@ class Artifact(BaseClient):
127
135
  InternalServerErrorException: The server failed to process the request.
128
136
  """
129
137
  method, endpoint = HttpMethods.POST, f"{self.BASE_ENDPOINT}/artifacts/"
130
- body: Dict[str, Any] = {"name": name, "description": description}
138
+ body: Dict[str, Any] = {
139
+ "name": name,
140
+ "description": description,
141
+ }
142
+
143
+ if source is not None:
144
+ body["source"] = source
145
+
146
+ if scan is not None:
147
+ body["scan"] = scan
148
+
149
+ if validate is not None:
150
+ body["validate"] = validate
151
+
152
+ updated_body = parse_body_for_multipart_request(body)
131
153
 
132
154
  return self.session.create_request( # type: ignore[no-any-return]
133
155
  endpoint,
134
156
  method,
135
157
  content_type=ContentType.MULTIPART_FORM_DATA,
136
- body=body,
158
+ body=updated_body,
137
159
  path=artifact["path"],
138
160
  ignore_files=artifact.get("ignore_files"),
139
161
  )
@@ -205,6 +227,8 @@ class Artifact(BaseClient):
205
227
  self,
206
228
  artifact_id: str,
207
229
  artifact: ArtifactInfo,
230
+ scan: Optional[bool] = None,
231
+ validate: Optional[bool] = None,
208
232
  ) -> Dict[str, int]:
209
233
  """Create a new version of the artifact.
210
234
 
@@ -215,6 +239,8 @@ class Artifact(BaseClient):
215
239
  artifact_id (str): ID of the artifact for which a new version is to be created.
216
240
  artifact (ArtifactInfo): Mapping of artifact attributes that specifies how the artifact will be generated,
217
241
  it accepts two keys `path`, which is required and `ignore_files` which is optional, and defaults to `.dockerignore`, it is strongly advised that users use `ignore_files` when generating artifacts to avoid copying any extra files in artifact.
242
+ scan (bool | None): Whether to scan the artifact for vulnerabilities.
243
+ validate (bool | None): Whether to validate the artifact. Source needs to be present in the artifact for the validation to work.
218
244
 
219
245
  Returns:
220
246
  Dict[str, int]: version number.
@@ -229,11 +255,21 @@ class Artifact(BaseClient):
229
255
  """
230
256
  method, endpoint = HttpMethods.PUT, f"{self.BASE_ENDPOINT}/artifacts/{artifact_id}"
231
257
 
258
+ body = {}
259
+
260
+ if scan is not None:
261
+ body["scan"] = scan
262
+
263
+ if validate is not None:
264
+ body["validate"] = validate
265
+
266
+ updated_body = parse_body_for_multipart_request(body)
267
+
232
268
  return self.session.create_request( # type: ignore[no-any-return]
233
269
  endpoint,
234
270
  method,
235
271
  content_type=ContentType.MULTIPART_FORM_DATA,
236
- body={},
272
+ body=updated_body,
237
273
  path=artifact["path"],
238
274
  ignore_files=artifact.get("ignore_files"),
239
275
  )
@@ -87,6 +87,37 @@ parameters:
87
87
  defaultValue:
88
88
  - input.csv
89
89
  - output.csv
90
+ - name: max_retries
91
+ type: number
92
+ required: false
93
+ title: Maximum Retries
94
+ description: Set maximum number of retry attempts
95
+ defaultValue: 3
96
+ - description: "specify the method to aggregate the data"
97
+ hideValue: true
98
+ required: true
99
+ title: "Aggregation Method"
100
+ name: "aggregation_method"
101
+ type: "string"
102
+ conditions:
103
+ - conditionType: "AND"
104
+ conditions:
105
+ - dependsOn: "max_retries"
106
+ operator: "equals"
107
+ value: 3
108
+ - dependsOn: "agg_type"
109
+ operator: "equals"
110
+ value: "AVG"
111
+ - description: "Specify the filtering criteria for data processing"
112
+ hideValue: true
113
+ required: true
114
+ title: "Filter Condition"
115
+ name: "filter_condition"
116
+ type: "string"
117
+ conditions:
118
+ - dependsOn: "max_retries"
119
+ operator: "equals"
120
+ value: 3
90
121
  featured: true
91
122
  scope: shared
92
123
  tenants:
@@ -76,3 +76,34 @@ parameters:
76
76
  defaultValue:
77
77
  - input.csv
78
78
  - output.csv
79
+ - name: max_retries
80
+ type: number
81
+ required: false
82
+ title: Maximum Retries
83
+ description: Set maximum number of retry attempts
84
+ defaultValue: 3
85
+ - description: "specify the method to aggregate the data"
86
+ hideValue: true
87
+ required: true
88
+ title: "Aggregation Method"
89
+ name: "aggregation_method"
90
+ type: "string"
91
+ conditions:
92
+ - conditionType: "AND"
93
+ conditions:
94
+ - dependsOn: "max_retries"
95
+ operator: "equals"
96
+ value: 3
97
+ - dependsOn: "agg_type"
98
+ operator: "equals"
99
+ value: "AVG"
100
+ - description: "Specify the filtering criteria for data processing"
101
+ hideValue: true
102
+ required: true
103
+ title: "Filter Condition"
104
+ name: "filter_condition"
105
+ type: "string"
106
+ conditions:
107
+ - dependsOn: "max_retries"
108
+ operator: "equals"
109
+ value: 3
@@ -90,6 +90,37 @@ parameters:
90
90
  defaultValue:
91
91
  - input.csv
92
92
  - output.csv
93
+ - name: max_retries
94
+ type: number
95
+ required: false
96
+ title: Maximum Retries
97
+ description: Set maximum number of retry attempts
98
+ defaultValue: 3
99
+ - description: "specify the method to aggregate the data"
100
+ hideValue: true
101
+ required: true
102
+ title: "Aggregation Method"
103
+ name: "aggregation_method"
104
+ type: "string"
105
+ conditions:
106
+ - conditionType: "AND"
107
+ conditions:
108
+ - dependsOn: "max_retries"
109
+ operator: "equals"
110
+ value: 3
111
+ - dependsOn: "agg_type"
112
+ operator: "equals"
113
+ value: "AVG"
114
+ - description: "Specify the filtering criteria for data processing"
115
+ hideValue: true
116
+ required: true
117
+ title: "Filter Condition"
118
+ name: "filter_condition"
119
+ type: "string"
120
+ conditions:
121
+ - dependsOn: "max_retries"
122
+ operator: "equals"
123
+ value: 3
93
124
  artifact:
94
125
  path: "."
95
126
  ignore_files:
@@ -81,6 +81,37 @@ parameters:
81
81
  defaultValue:
82
82
  - input.csv
83
83
  - output.csv
84
+ - name: max_retries
85
+ type: number
86
+ required: false
87
+ title: Maximum Retries
88
+ description: Set maximum number of retry attempts
89
+ defaultValue: 3
90
+ - description: "specify the method to aggregate the data"
91
+ hideValue: true
92
+ required: true
93
+ title: "Aggregation Method"
94
+ name: "aggregation_method"
95
+ type: "string"
96
+ conditions:
97
+ - conditionType: "AND"
98
+ conditions:
99
+ - dependsOn: "max_retries"
100
+ operator: "equals"
101
+ value: 3
102
+ - dependsOn: "agg_type"
103
+ operator: "equals"
104
+ value: "AVG"
105
+ - description: "Specify the filtering criteria for data processing"
106
+ hideValue: true
107
+ required: true
108
+ title: "Filter Condition"
109
+ name: "filter_condition"
110
+ type: "string"
111
+ conditions:
112
+ - dependsOn: "max_retries"
113
+ operator: "equals"
114
+ value: 3
84
115
  artifact:
85
116
  path: "."
86
117
  ignore_files:
@@ -92,6 +92,37 @@ parameters:
92
92
  defaultValue:
93
93
  - input.csv
94
94
  - output.csv
95
+ - name: max_retries
96
+ type: number
97
+ required: false
98
+ title: Maximum Retries
99
+ description: Set maximum number of retry attempts
100
+ defaultValue: 3
101
+ - description: "specify the method to aggregate the data"
102
+ hideValue: true
103
+ required: true
104
+ title: "Aggregation Method"
105
+ name: "aggregation_method"
106
+ type: "string"
107
+ conditions:
108
+ - conditionType: "AND"
109
+ conditions:
110
+ - dependsOn: "max_retries"
111
+ operator: "equals"
112
+ value: 3
113
+ - dependsOn: "agg_type"
114
+ operator: "equals"
115
+ value: "AVG"
116
+ - description: "Specify the filtering criteria for data processing"
117
+ hideValue: true
118
+ required: true
119
+ title: "Filter Condition"
120
+ name: "filter_condition"
121
+ type: "string"
122
+ conditions:
123
+ - dependsOn: "max_retries"
124
+ operator: "equals"
125
+ value: 3
95
126
  artifact:
96
127
  path: "."
97
128
  ignore_files:
@@ -83,6 +83,37 @@ parameters:
83
83
  defaultValue:
84
84
  - input.csv
85
85
  - output.csv
86
+ - name: max_retries
87
+ type: number
88
+ required: false
89
+ title: Maximum Retries
90
+ description: Set maximum number of retry attempts
91
+ defaultValue: 3
92
+ - description: "specify the method to aggregate the data"
93
+ hideValue: true
94
+ required: true
95
+ title: "Aggregation Method"
96
+ name: "aggregation_method"
97
+ type: "string"
98
+ conditions:
99
+ - conditionType: "AND"
100
+ conditions:
101
+ - dependsOn: "max_retries"
102
+ operator: "equals"
103
+ value: 3
104
+ - dependsOn: "agg_type"
105
+ operator: "equals"
106
+ value: "AVG"
107
+ - description: "Specify the filtering criteria for data processing"
108
+ hideValue: true
109
+ required: true
110
+ title: "Filter Condition"
111
+ name: "filter_condition"
112
+ type: "string"
113
+ conditions:
114
+ - dependsOn: "max_retries"
115
+ operator: "equals"
116
+ value: 3
86
117
  artifact:
87
118
  path: "."
88
119
  ignore_files:
@@ -172,6 +172,37 @@ parameters:
172
172
  defaultValue:
173
173
  - input.csv
174
174
  - output.csv
175
+ - name: max_retries
176
+ type: number
177
+ required: false
178
+ title: Maximum Retries
179
+ description: Set maximum number of retry attempts
180
+ defaultValue: 3
181
+ - description: "specify the method to aggregate the data"
182
+ hideValue: true
183
+ required: true
184
+ title: "Aggregation Method"
185
+ name: "aggregation_method"
186
+ type: "string"
187
+ conditions:
188
+ - conditionType: "AND"
189
+ conditions:
190
+ - dependsOn: "max_retries"
191
+ operator: "equals"
192
+ value: 3
193
+ - dependsOn: "agg_type"
194
+ operator: "equals"
195
+ value: "AVG"
196
+ - description: "Specify the filtering criteria for data processing"
197
+ hideValue: true
198
+ required: true
199
+ title: "Filter Condition"
200
+ name: "filter_condition"
201
+ type: "string"
202
+ conditions:
203
+ - dependsOn: "max_retries"
204
+ operator: "equals"
205
+ value: 3
175
206
  artifact:
176
207
  path: "."
177
208
  ignore_files:
@@ -161,6 +161,37 @@ parameters:
161
161
  defaultValue:
162
162
  - input.csv
163
163
  - output.csv
164
+ - name: max_retries
165
+ type: number
166
+ required: false
167
+ title: Maximum Retries
168
+ description: Set maximum number of retry attempts
169
+ defaultValue: 3
170
+ - description: "specify the method to aggregate the data"
171
+ hideValue: true
172
+ required: true
173
+ title: "Aggregation Method"
174
+ name: "aggregation_method"
175
+ type: "string"
176
+ conditions:
177
+ - conditionType: "AND"
178
+ conditions:
179
+ - dependsOn: "max_retries"
180
+ operator: "equals"
181
+ value: 3
182
+ - dependsOn: "agg_type"
183
+ operator: "equals"
184
+ value: "AVG"
185
+ - description: "Specify the filtering criteria for data processing"
186
+ hideValue: true
187
+ required: true
188
+ title: "Filter Condition"
189
+ name: "filter_condition"
190
+ type: "string"
191
+ conditions:
192
+ - dependsOn: "max_retries"
193
+ operator: "equals"
194
+ value: 3
164
195
  artifact:
165
196
  path: "."
166
197
  ignore_files:
@@ -0,0 +1,15 @@
1
+ # parameters.yaml
2
+
3
+ body:
4
+ appParameters:
5
+ agg_type: "MAX"
6
+ num_iterations: 50
7
+ email_notifications: true
8
+ file_names:
9
+ - input.csv
10
+ - output.csv
11
+ parameters:
12
+ block1:
13
+ email_notifications: false
14
+ block2:
15
+ num_iterations: 10
@@ -0,0 +1,10 @@
1
+ # parameters.yaml
2
+
3
+ body:
4
+ parameters:
5
+ agg_type: "MAX"
6
+ num_iterations: 50
7
+ email_notifications: true
8
+ file_names:
9
+ - input.csv
10
+ - output.csv
@@ -2,6 +2,9 @@
2
2
 
3
3
  name: new-artifact
4
4
  description: Creating a new artifact
5
+ source: "email"
6
+ scan: True
7
+ validate: True
5
8
  artifact:
6
9
  path: "."
7
10
  ignore_files:
@@ -1,5 +1,7 @@
1
1
  # artifact_version.yaml
2
2
 
3
+ scan: True
4
+ validate: False
3
5
  artifact:
4
6
  path: "."
5
7
  ignore_files:
@@ -13,3 +13,6 @@ body:
13
13
  templateParameters:
14
14
  name: John
15
15
  company: Peak
16
+ attachments:
17
+ - model.txt
18
+ - outputs
peak/session.py CHANGED
@@ -1,5 +1,5 @@
1
1
  #
2
- # # Copyright © 2024 Peak AI Limited. or its affiliates. All Rights Reserved.
2
+ # # Copyright © 2025 Peak AI Limited. or its affiliates. All Rights Reserved.
3
3
  # #
4
4
  # # Licensed under the Apache License, Version 2.0 (the "License"). You
5
5
  # # may not use this file except in compliance with the License. A copy of
@@ -19,6 +19,7 @@
19
19
  # # along with this program. If not, see <https://apache.org/licenses/LICENSE-2.0>
20
20
  #
21
21
  """Session module for Peak API."""
22
+
22
23
  from __future__ import annotations
23
24
 
24
25
  import os
@@ -87,9 +88,10 @@ class Session:
87
88
  *,
88
89
  params: Optional[Dict[str, Any]] = None,
89
90
  body: Optional[Dict[str, Any]] = None,
90
- path: Optional[str] = None,
91
+ path: Optional[str | list[str]] = None,
91
92
  ignore_files: Optional[list[str]] = None,
92
93
  subdomain: Optional[str] = "service",
94
+ file_key: Optional[str] = "artifact",
93
95
  ) -> Any:
94
96
  """Prepares a request to be sent over the network.
95
97
 
@@ -105,6 +107,7 @@ class Session:
105
107
  path (Optional[str] optional): path to the file or folder that will be compressed and used as artifact, required for multipart requests.
106
108
  ignore_files(Optional[list[str]]): Ignore files to be used when creating artifact, used only for multipart requests.
107
109
  subdomain (Optional[str]): Subdomain for the endpoint. Defaults to `service`.
110
+ file_key (Optional[str]): the field in which the files must be uploaded
108
111
 
109
112
  Returns:
110
113
  Any: response dict object.
@@ -124,6 +127,7 @@ class Session:
124
127
  session_meta={
125
128
  "stage": self.stage,
126
129
  },
130
+ file_key=file_key,
127
131
  ).json()
128
132
 
129
133
  def create_generator_request(
@@ -240,7 +244,7 @@ class Session:
240
244
 
241
245
  logger.info("auth_token not given, searching for 'PEAK_AUTH_TOKEN' or 'API_KEY' in env variables")
242
246
  if not os.environ.get("API_KEY") and not os.environ.get("PEAK_AUTH_TOKEN"):
243
- raise exceptions.MissingEnvironmentVariableException(env_var="PEAK_AUTH_TOKEN or API_KEY")
247
+ raise exceptions.MissingEnvironmentVariableException(env_var="PEAK_AUTH_TOKEN")
244
248
  if os.environ.get("PEAK_AUTH_TOKEN"):
245
249
  self.auth_token = os.environ["PEAK_AUTH_TOKEN"]
246
250
  return
@@ -251,12 +255,12 @@ class Session:
251
255
  self.stage = Stage(stage)
252
256
  return
253
257
 
254
- logger.info("stage not given, searching for STAGE in env variables")
255
- if not os.environ.get("STAGE"):
256
- logger.info("STAGE environment variable is not set, defaulting to PROD")
258
+ logger.info("stage not given, searching for PEAK__STAGE in env variables")
259
+ if not os.environ.get("PEAK__STAGE"):
260
+ logger.info("PEAK__STAGE environment variable is not set, defaulting to PROD")
257
261
  self.stage = Stage.PROD
258
262
  return
259
- self.stage = Stage(os.environ["STAGE"])
263
+ self.stage = Stage(os.environ["PEAK__STAGE"])
260
264
 
261
265
 
262
266
  __all__: List[str] = ["Session", "_get_default_session"]
peak/telemetry.py CHANGED
@@ -103,10 +103,11 @@ def telemetry(make_request: F) -> F:
103
103
  headers: Optional[Dict[str, str]] = None,
104
104
  params: Optional[Dict[str, Any]] = None,
105
105
  body: Optional[Dict[str, Any]] = None,
106
- path: Optional[str] = None,
106
+ path: Optional[str | list[str]] = None,
107
107
  request_kwargs: Optional[Dict[str, int | bool | str | float]] = None,
108
108
  ignore_files: Optional[list[str]] = None,
109
109
  session_meta: Optional[Dict[str, Any]] = None,
110
+ file_key: Optional[str] = "artifact",
110
111
  ) -> requests.Response:
111
112
  """A decorator that wraps over the make_request function to send telemetry requests as required.
112
113
 
@@ -122,6 +123,7 @@ def telemetry(make_request: F) -> F:
122
123
  request_kwargs(Dict[str, int | bool | str | float] | None): extra arguments to be passed when making the request.
123
124
  ignore_files(Optional[list[str]]): Ignore files to be used when creating artifact
124
125
  session_meta(Dict[str, Any]): Metadata about the session object, like - stage
126
+ file_key (Optional[str]): the field in which the files must be uploaded
125
127
 
126
128
  Returns:
127
129
  requests.Response: response json
@@ -192,6 +194,7 @@ def telemetry(make_request: F) -> F:
192
194
  path=path,
193
195
  ignore_files=ignore_files,
194
196
  request_kwargs=request_kwargs,
197
+ file_key=file_key,
195
198
  )
196
199
 
197
200
  trigger_usage_collection(res=res)
@@ -1,5 +1,5 @@
1
1
  #
2
- # # Copyright © 2024 Peak AI Limited. or its affiliates. All Rights Reserved.
2
+ # # Copyright © 2025 Peak AI Limited. or its affiliates. All Rights Reserved.
3
3
  # #
4
4
  # # Licensed under the Apache License, Version 2.0 (the "License"). You
5
5
  # # may not use this file except in compliance with the License. A copy of
@@ -84,7 +84,7 @@ def peak_contexts_processor(
84
84
  "press_deployment_id": os.getenv("PRESS_DEPLOYMENT_ID"),
85
85
  "run_id": os.getenv("PEAK_RUN_ID"),
86
86
  "exec_id": os.getenv("PEAK_EXEC_ID"),
87
- "stage": os.getenv("STAGE"),
87
+ "stage": os.getenv("PEAK__STAGE"),
88
88
  "tenant_name": os.getenv("TENANT_NAME", os.getenv("TENANT")),
89
89
  "tenant_id": os.getenv("TENANT_ID"),
90
90
  "api_name": os.getenv("PEAK_API_NAME"),
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: peak-sdk
3
- Version: 1.13.0
3
+ Version: 1.15.0
4
4
  Summary: Python SDK for interacting with the Peak platform
5
5
  Home-page: https://docs.peak.ai/sdk/latest/
6
6
  License: Apache-2.0
@@ -106,7 +106,7 @@ Follow these steps to create a virtual environment using Python's built-in `venv
106
106
  This should return a response of the following format
107
107
 
108
108
  ```bash
109
- peak-cli==1.13.0
109
+ peak-cli==1.15.0
110
110
  Python==3.12.3
111
111
  System==Darwin(23.6.0)
112
112
  ```
@@ -122,14 +122,14 @@ Follow these steps to create a virtual environment using Python's built-in `venv
122
122
  This should print the version of the SDK
123
123
 
124
124
  ```
125
- 1.13.0
125
+ 1.15.0
126
126
  ```
127
127
 
128
128
  ### Using the SDK and CLI
129
129
 
130
- - To start using the SDK and CLI, you'll need either an API Key or a Personal Access Token (PAT).
131
- - If you don't have one yet, sign up for an account on the Peak platform to obtain your API key or Personal Access token (PAT).
132
- - To export it, run the following command in your terminal and replace <peak_auth_token> with your actual API key or PAT:
130
+ - To start using the SDK and CLI, you'll need a Personal Access Token (PAT).
131
+ - If you don't have one yet, sign up for an account on the Peak platform to obtain your Personal Access token (PAT).
132
+ - To export it, run the following command in your terminal and replace <peak_auth_token> with your actual PAT:
133
133
  ```
134
134
  export PEAK_AUTH_TOKEN=<peak_auth_token>
135
135
  ```