dagster-k8s 0.24.13__tar.gz → 0.25.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dagster-k8s might be problematic. Click here for more details.

Files changed (28) hide show
  1. {dagster-k8s-0.24.13/dagster_k8s.egg-info → dagster-k8s-0.25.0}/PKG-INFO +2 -3
  2. {dagster-k8s-0.24.13 → dagster-k8s-0.25.0}/dagster_k8s/pipes.py +324 -19
  3. dagster-k8s-0.25.0/dagster_k8s/version.py +1 -0
  4. {dagster-k8s-0.24.13 → dagster-k8s-0.25.0/dagster_k8s.egg-info}/PKG-INFO +2 -3
  5. {dagster-k8s-0.24.13 → dagster-k8s-0.25.0}/dagster_k8s.egg-info/requires.txt +1 -1
  6. {dagster-k8s-0.24.13 → dagster-k8s-0.25.0}/setup.py +2 -3
  7. dagster-k8s-0.24.13/dagster_k8s/version.py +0 -1
  8. {dagster-k8s-0.24.13 → dagster-k8s-0.25.0}/LICENSE +0 -0
  9. {dagster-k8s-0.24.13 → dagster-k8s-0.25.0}/MANIFEST.in +0 -0
  10. {dagster-k8s-0.24.13 → dagster-k8s-0.25.0}/README.md +0 -0
  11. {dagster-k8s-0.24.13 → dagster-k8s-0.25.0}/dagster_k8s/__init__.py +0 -0
  12. {dagster-k8s-0.24.13 → dagster-k8s-0.25.0}/dagster_k8s/client.py +0 -0
  13. {dagster-k8s-0.24.13 → dagster-k8s-0.25.0}/dagster_k8s/container_context.py +0 -0
  14. {dagster-k8s-0.24.13 → dagster-k8s-0.25.0}/dagster_k8s/executor.py +0 -0
  15. {dagster-k8s-0.24.13 → dagster-k8s-0.25.0}/dagster_k8s/job.py +0 -0
  16. {dagster-k8s-0.24.13 → dagster-k8s-0.25.0}/dagster_k8s/kubernetes_version.py +0 -0
  17. {dagster-k8s-0.24.13 → dagster-k8s-0.25.0}/dagster_k8s/launcher.py +0 -0
  18. {dagster-k8s-0.24.13 → dagster-k8s-0.25.0}/dagster_k8s/models.py +0 -0
  19. {dagster-k8s-0.24.13 → dagster-k8s-0.25.0}/dagster_k8s/ops/__init__.py +0 -0
  20. {dagster-k8s-0.24.13 → dagster-k8s-0.25.0}/dagster_k8s/ops/k8s_job_op.py +0 -0
  21. {dagster-k8s-0.24.13 → dagster-k8s-0.25.0}/dagster_k8s/py.typed +0 -0
  22. {dagster-k8s-0.24.13 → dagster-k8s-0.25.0}/dagster_k8s/test.py +0 -0
  23. {dagster-k8s-0.24.13 → dagster-k8s-0.25.0}/dagster_k8s/utils.py +0 -0
  24. {dagster-k8s-0.24.13 → dagster-k8s-0.25.0}/dagster_k8s.egg-info/SOURCES.txt +0 -0
  25. {dagster-k8s-0.24.13 → dagster-k8s-0.25.0}/dagster_k8s.egg-info/dependency_links.txt +0 -0
  26. {dagster-k8s-0.24.13 → dagster-k8s-0.25.0}/dagster_k8s.egg-info/not-zip-safe +0 -0
  27. {dagster-k8s-0.24.13 → dagster-k8s-0.25.0}/dagster_k8s.egg-info/top_level.txt +0 -0
  28. {dagster-k8s-0.24.13 → dagster-k8s-0.25.0}/setup.cfg +0 -0
@@ -1,17 +1,16 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dagster-k8s
3
- Version: 0.24.13
3
+ Version: 0.25.0
4
4
  Summary: A Dagster integration for k8s
5
5
  Home-page: https://github.com/dagster-io/dagster/tree/master/python_modules/libraries/dagster-k8s
6
6
  Author: Dagster Labs
7
7
  Author-email: hello@dagsterlabs.com
8
8
  License: Apache-2.0
9
- Classifier: Programming Language :: Python :: 3.8
10
9
  Classifier: Programming Language :: Python :: 3.9
11
10
  Classifier: Programming Language :: Python :: 3.10
12
11
  Classifier: Programming Language :: Python :: 3.11
13
12
  Classifier: Programming Language :: Python :: 3.12
14
13
  Classifier: License :: OSI Approved :: Apache Software License
15
14
  Classifier: Operating System :: OS Independent
16
- Requires-Python: >=3.8,<3.13
15
+ Requires-Python: >=3.9,<3.13
17
16
  License-File: LICENSE
@@ -1,8 +1,13 @@
1
+ import logging
1
2
  import os
2
3
  import random
3
4
  import re
4
5
  import string
6
+ import threading
7
+ import time
8
+ from collections.abc import Callable, Generator
5
9
  from contextlib import contextmanager
10
+ from datetime import datetime
6
11
  from pathlib import Path
7
12
  from typing import Any, Iterator, Mapping, Optional, Sequence, Set, Union
8
13
 
@@ -14,6 +19,7 @@ from dagster import (
14
19
  from dagster._annotations import public
15
20
  from dagster._core.definitions.resource_annotation import TreatAsResourceParam
16
21
  from dagster._core.errors import DagsterInvariantViolationError
22
+ from dagster._core.execution.context.asset_execution_context import AssetExecutionContext
17
23
  from dagster._core.pipes.client import (
18
24
  PipesClient,
19
25
  PipesClientCompletedInvocation,
@@ -22,6 +28,7 @@ from dagster._core.pipes.client import (
22
28
  PipesParams,
23
29
  )
24
30
  from dagster._core.pipes.context import PipesMessageHandler
31
+ from dagster._core.pipes.merge_streams import LogItem, merge_streams
25
32
  from dagster._core.pipes.utils import (
26
33
  PipesEnvContextInjector,
27
34
  extract_message_or_forward_to_stdout,
@@ -43,6 +50,10 @@ from dagster_k8s.client import (
43
50
  from dagster_k8s.models import k8s_model_from_dict, k8s_snake_case_dict
44
51
  from dagster_k8s.utils import get_common_labels
45
52
 
53
+ INIT_WAIT_TIMEOUT_FOR_READY = 1800.0 # 30mins
54
+ INIT_WAIT_TIMEOUT_FOR_TERMINATE = 10.0 # 10s
55
+ WAIT_TIMEOUT_FOR_READY = 18000.0 # 5hrs
56
+
46
57
 
47
58
  def get_pod_name(run_id: str, op_name: str):
48
59
  clean_op_name = re.sub("[^a-z0-9-]", "", op_name.lower().replace("_", "-"))
@@ -88,6 +99,166 @@ class PipesK8sPodLogsMessageReader(PipesMessageReader):
88
99
  for log_line in log_chunk.split("\n"):
89
100
  extract_message_or_forward_to_stdout(handler, log_line)
90
101
 
102
+ @contextmanager
103
+ def async_consume_pod_logs(
104
+ self,
105
+ context: Union[OpExecutionContext, AssetExecutionContext],
106
+ core_api: kubernetes.client.CoreV1Api,
107
+ pod_name: str,
108
+ namespace: str,
109
+ ) -> Generator:
110
+ """Consume all logs from all containers within the pod.
111
+
112
+ Args:
113
+ context (Union[OpExecutionContext, AssetExecutionContext]): The execution context.
114
+ core_api: The k8s core API.
115
+ pod_name: The pod to collect logs from.
116
+ namespace: The namespace to collect logs from.
117
+
118
+ """
119
+ handler = check.not_none(
120
+ self._handler, "can only consume logs within scope of context manager"
121
+ )
122
+ pods = core_api.list_namespaced_pod(
123
+ namespace=namespace, field_selector=f"metadata.name={pod_name}"
124
+ ).items
125
+
126
+ containers = []
127
+ # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#containerstatus-v1-core
128
+ for pod in pods:
129
+ if pod.status.init_container_statuses:
130
+ containers.extend(
131
+ [
132
+ container_status.name
133
+ for container_status in pod.status.init_container_statuses
134
+ ]
135
+ )
136
+
137
+ if pod.status.container_statuses:
138
+ containers.extend(
139
+ [container_status.name for container_status in pod.status.container_statuses]
140
+ )
141
+
142
+ pod_exit_event = threading.Event()
143
+
144
+ logger = context.log.getChild("consume_pod_logs")
145
+ logger.setLevel(logging.WARNING)
146
+
147
+ with merge_streams(
148
+ streams={
149
+ f"{pod_name}:{container}": self._extract_logs(
150
+ pod_exit_event=pod_exit_event,
151
+ read_namespaced_pod_log=core_api.read_namespaced_pod_log,
152
+ list_namespaced_pod=core_api.list_namespaced_pod,
153
+ pod_name=pod_name,
154
+ namespace=namespace,
155
+ container=container,
156
+ logger=logger.getChild(f"_extract_logs({container})"),
157
+ )
158
+ for container in containers
159
+ },
160
+ log_handler=lambda log_line: extract_message_or_forward_to_stdout(handler, log_line),
161
+ stream_processor=_process_log_stream,
162
+ logger=logger,
163
+ ):
164
+ yield
165
+ logger.info("Setting the pod exit event to do the cleanup of the streams")
166
+ pod_exit_event.set()
167
+
168
+ def _extract_logs(
169
+ self,
170
+ pod_exit_event: threading.Event,
171
+ read_namespaced_pod_log: Callable,
172
+ list_namespaced_pod: Callable,
173
+ pod_name: str,
174
+ namespace: str,
175
+ container: str,
176
+ logger: logging.Logger,
177
+ max_attempts: int = 3,
178
+ sleep_between_attempts: float = 0.5,
179
+ sleeper: Callable = time.sleep,
180
+ ) -> Generator:
181
+ """Return the streams of the Kubernetes logs with the appropriate buffer time.
182
+
183
+ Args:
184
+ pod_exit_event (threading.Event): The threading event that indicates to the
185
+ log reading thread that the pod has exited
186
+ read_namespaced_pod_log (kubernetes.client.CoreV1Api): The Kubernetes CoreV1Api client function for reading
187
+ logs.
188
+ list_namespaced_pod (kubernetes.client.CoreV1Api): The Kubernetes CoreV1Api client function for listing
189
+ pods and their state.
190
+ pod_name (str): The name of the Pipes Pod
191
+ namespace (str): The namespace the pod lives in.
192
+ container (str): The container to read logs from.
193
+ logger (logging.Logger): A logger instance for diagnostic logs.
194
+ max_attempts (int): The number of attempts to read logs in the beginning in
195
+ case we get a failure due to pod still starting.
196
+ sleep_between_attempts (float): Sleep between attempts in the beginning.
197
+ sleeper (Callable): The time.sleep equivalent.
198
+
199
+ Yields:
200
+ The Kubernetes pod log stream generator
201
+
202
+ """
203
+ # Yield the actual stream here to hide implementation detail from caller
204
+ # If readiness/liveness probes aren't configured
205
+ # pods can reach the "Ready" state from the API perspective
206
+ # but still reject incoming communication
207
+ attempt = 0
208
+ common_args = {
209
+ "name": pod_name,
210
+ "namespace": namespace,
211
+ "container": container,
212
+ "_preload_content": False, # avoid JSON processing
213
+ "timestamps": True, # Include timestamps for ordering and deduplication
214
+ "follow": True,
215
+ }
216
+
217
+ # Attempt to get the stream for the first time
218
+ while attempt < max_attempts:
219
+ try:
220
+ yield read_namespaced_pod_log(since_seconds=3600, **common_args).stream()
221
+ break
222
+ except kubernetes.client.ApiException as e:
223
+ if e.status in ["400", 400] and "PodInitializing" in str(e):
224
+ # PodInitializing cannot accept log consumption
225
+ sleeper(sleep_between_attempts)
226
+ sleep_between_attempts *= 2 # exponential backoff
227
+ attempt += 1
228
+ continue
229
+
230
+ # After stream is initially yielded in above loop this while loop is a safeguard against the
231
+ # stream ending while the pod has not exitted. If so, we need to refresh the stream.
232
+ while not pod_exit_event.is_set():
233
+ # List the pods now and then use the status to decide whether we should exit
234
+ pods = list_namespaced_pod(
235
+ namespace=namespace, field_selector=f"metadata.name={pod_name}"
236
+ ).items
237
+
238
+ try:
239
+ yield read_namespaced_pod_log(since_seconds=5, **common_args).stream()
240
+ except Exception:
241
+ logger.exception(f"{container}: exception in getting logs")
242
+ break
243
+
244
+ # The logs are still available once the pod has exited and the above call will succeed, we add this extra
245
+ # statement where we will exit if the status of the container was terminated before we read the logs. That
246
+ # ensures that we get all of the logs (the merge_streams will deduplicate them) and we don't waste CPU
247
+ # cycles whilst trying to get more logs.
248
+ pod = pods[0] if pods else None
249
+ if pod is None:
250
+ break
251
+
252
+ all_statuses = []
253
+ all_statuses.extend(pod.status.init_container_statuses or [])
254
+ all_statuses.extend(pod.status.container_statuses or [])
255
+ if not all_statuses:
256
+ break
257
+
258
+ state_by_name = {status.name: status.state for status in all_statuses}
259
+ if state_by_name[container].terminated is not None:
260
+ break
261
+
91
262
  def no_messages_debug_text(self) -> str:
92
263
  return "Attempted to read messages by extracting them from kubernetes pod logs directly."
93
264
 
@@ -191,7 +362,7 @@ class PipesK8sClient(PipesClient, TreatAsResourceParam):
191
362
  def run(
192
363
  self,
193
364
  *,
194
- context: OpExecutionContext,
365
+ context: Union[OpExecutionContext, AssetExecutionContext],
195
366
  extras: Optional[PipesExtras] = None,
196
367
  image: Optional[str] = None,
197
368
  command: Optional[Union[str, Sequence[str]]] = None,
@@ -200,10 +371,13 @@ class PipesK8sClient(PipesClient, TreatAsResourceParam):
200
371
  base_pod_meta: Optional[Mapping[str, Any]] = None,
201
372
  base_pod_spec: Optional[Mapping[str, Any]] = None,
202
373
  ignore_containers: Optional[Set] = None,
374
+ enable_multi_container_logs: bool = False,
203
375
  ) -> PipesClientCompletedInvocation:
204
376
  """Publish a kubernetes pod and wait for it to complete, enriched with the pipes protocol.
205
377
 
206
378
  Args:
379
+ context (Union[OpExecutionContext, AssetExecutionContext]):
380
+ The execution context.
207
381
  image (Optional[str]):
208
382
  The image to set the first container in the pod spec to use.
209
383
  command (Optional[Union[str, Sequence[str]]]):
@@ -214,11 +388,11 @@ class PipesK8sClient(PipesClient, TreatAsResourceParam):
214
388
  env (Optional[Mapping[str,str]]):
215
389
  A mapping of environment variable names to values to set on the first
216
390
  container in the pod spec, on top of those configured on resource.
217
- base_pod_meta (Optional[Mapping[str, Any]]:
391
+ base_pod_meta (Optional[Mapping[str, Any]]):
218
392
  Raw k8s config for the k8s pod's metadata
219
393
  (https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/object-meta/#ObjectMeta)
220
394
  Keys can either snake_case or camelCase. The name value will be overridden.
221
- base_pod_spec (Optional[Mapping[str, Any]]:
395
+ base_pod_spec (Optional[Mapping[str, Any]]):
222
396
  Raw k8s config for the k8s pod's pod spec
223
397
  (https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec).
224
398
  Keys can either snake_case or camelCase. The dagster context will be readable
@@ -232,10 +406,12 @@ class PipesK8sClient(PipesClient, TreatAsResourceParam):
232
406
  Override the default ext protocol message reader.
233
407
  ignore_containers (Optional[Set]): Ignore certain containers from waiting for termination. Defaults to
234
408
  None.
409
+ enable_multi_container_logs (bool): Whether or not to enable multi-container log consumption.
235
410
 
236
411
  Returns:
237
412
  PipesClientCompletedInvocation: Wrapper containing results reported by the external
238
413
  process.
414
+
239
415
  """
240
416
  self._load_k8s_config()
241
417
  client = DagsterKubernetesClient.production_client()
@@ -262,31 +438,79 @@ class PipesK8sClient(PipesClient, TreatAsResourceParam):
262
438
  )
263
439
  client.core_api.create_namespaced_pod(namespace, pod_body)
264
440
  try:
265
- # if were doing direct pod reading, wait for pod to start and then stream logs out
266
- if isinstance(self.message_reader, PipesK8sPodLogsMessageReader):
441
+ # Consume pod logs if possible
442
+ with self.consume_pod_logs(
443
+ context=context,
444
+ client=client,
445
+ namespace=namespace,
446
+ pod_name=pod_name,
447
+ enable_multi_container_logs=enable_multi_container_logs,
448
+ ):
449
+ # We need to wait for the pod to start up so that the log streaming is successful afterwards.
267
450
  client.wait_for_pod(
268
451
  pod_name,
269
452
  namespace,
270
- wait_for_state=WaitForPodState.Ready,
453
+ wait_for_state=WaitForPodState.Terminated,
454
+ ignore_containers=ignore_containers,
271
455
  wait_time_between_attempts=self.poll_interval,
272
456
  )
273
- self.message_reader.consume_pod_logs(
274
- core_api=client.core_api,
275
- pod_name=pod_name,
276
- namespace=namespace,
277
- )
278
-
279
- client.wait_for_pod(
280
- pod_name,
281
- namespace,
282
- wait_for_state=WaitForPodState.Terminated,
283
- ignore_containers=ignore_containers,
284
- wait_time_between_attempts=self.poll_interval,
285
- )
286
457
  finally:
287
458
  client.core_api.delete_namespaced_pod(pod_name, namespace)
459
+
288
460
  return PipesClientCompletedInvocation(pipes_session)
289
461
 
462
+ @contextmanager
463
+ def consume_pod_logs(
464
+ self,
465
+ context: Union[OpExecutionContext, AssetExecutionContext],
466
+ client: DagsterKubernetesClient,
467
+ namespace: str,
468
+ pod_name: str,
469
+ enable_multi_container_logs: bool = False,
470
+ ) -> Iterator:
471
+ """Consume pod logs in the background if possible simple context manager to setup pod log consumption.
472
+
473
+ This will be a no-op if the message_reader is of the wrong type.
474
+
475
+ Args:
476
+ context (Union[OpExecutionContext, AssetExecutionContext]): The execution context.
477
+ client (kubernetes.client): _description_
478
+ namespace (str): The namespace the pod lives in
479
+ pod_name (str): The name of the Pipes Pod
480
+ enable_multi_container_logs (bool): Whether or not to enable multi-container log consumption
481
+
482
+ """
483
+ if isinstance(self.message_reader, PipesK8sPodLogsMessageReader):
484
+ # We need to wait for the pod to start up so that the log streaming is successful afterwards.
485
+ client.wait_for_pod(
486
+ pod_name,
487
+ namespace,
488
+ wait_for_state=WaitForPodState.Ready,
489
+ wait_time_between_attempts=self.poll_interval,
490
+ # After init container gains a status in the first while loop, there is still a check for
491
+ # the ready state in the second while loop, which respects the below timeout only.
492
+ # Very rarely, the pod will be Evicted there and we have to wait the default, unless set.
493
+ wait_timeout=WAIT_TIMEOUT_FOR_READY,
494
+ )
495
+
496
+ if enable_multi_container_logs:
497
+ with self.message_reader.async_consume_pod_logs(
498
+ context=context,
499
+ core_api=client.core_api,
500
+ namespace=namespace,
501
+ pod_name=pod_name,
502
+ ):
503
+ yield
504
+ return
505
+ else:
506
+ self.message_reader.consume_pod_logs(
507
+ core_api=client.core_api,
508
+ namespace=namespace,
509
+ pod_name=pod_name,
510
+ )
511
+
512
+ yield
513
+
290
514
 
291
515
  def _detect_current_namespace(
292
516
  kubeconfig_file: Optional[str], namespace_secret_path: Path = _NAMESPACE_SECRET_PATH
@@ -423,3 +647,84 @@ def build_pod_body(
423
647
  "spec": spec,
424
648
  },
425
649
  )
650
+
651
+
652
+ def _process_log_stream(stream: Iterator[bytes]) -> Iterator[LogItem]:
653
+ """This expects the logs to be of the format b'<timestamp> <msg>' and only the
654
+ '<msg>' is forwarded to Dagster. If the <timestamp> is not there then the lines
655
+ will be joined together. There is a limitation that the first item in the stream
656
+ needs to always contain a timestamp as the first element.
657
+
658
+ The timestamp is expected to be in '2024-03-22T02:17:29.885548Z' format and
659
+ if the subsecond part will be truncated to microseconds.
660
+
661
+ If we fail parsing the timestamp, then the priority will be set to zero in
662
+ order to not drop any log items.
663
+
664
+ Args:
665
+ stream (Iterator[bytes]): A stream of log chunks
666
+
667
+ Yields:
668
+ Iterator[LogItem]: A log containing the timestamp and msg
669
+ """
670
+ timestamp = ""
671
+ log = ""
672
+
673
+ for log_chunk in stream:
674
+ for line in log_chunk.decode("utf-8").split("\n"):
675
+ maybe_timestamp, _, tail = line.partition(" ")
676
+ if not timestamp:
677
+ # The first item in the stream will always have a timestamp.
678
+ timestamp = maybe_timestamp
679
+ log = tail
680
+ elif maybe_timestamp == timestamp:
681
+ # We have multiple messages with the same timestamp in this chunk, add them separated
682
+ # with a new line
683
+ log += f"\n{tail}"
684
+ elif not (
685
+ len(maybe_timestamp) == len(timestamp) and _is_kube_timestamp(maybe_timestamp)
686
+ ):
687
+ # The line is continuation of a long line that got truncated and thus doesn't
688
+ # have a timestamp in the beginning of the line.
689
+ # Since all timestamps in the RFC format returned by Kubernetes have the same
690
+ # length (when represented as strings) we know that the value won't be a timestamp
691
+ # if the string lengths differ, however if they do not differ, we need to parse the
692
+ # timestamp.
693
+ log += line
694
+ else:
695
+ # New log line has been observed, send in the next cycle
696
+ yield LogItem(timestamp=timestamp, log=log)
697
+ timestamp = maybe_timestamp
698
+ log = tail
699
+
700
+ # Send the last message that we were building
701
+ if log or timestamp:
702
+ yield LogItem(timestamp=timestamp, log=log)
703
+
704
+
705
+ def _is_kube_timestamp(maybe_timestamp: str) -> bool:
706
+ # This extra stripping logic is necessary, as Python's strptime fn doesn't
707
+ # handle valid ISO 8601 timestamps with nanoseconds which we receive in k8s
708
+ # e.g. 2024-03-22T02:17:29.185548486Z
709
+
710
+ # This is likely fine. We're just trying to confirm whether or not it's a
711
+ # valid timestamp, not trying to parse it with full correctness.
712
+ if maybe_timestamp.endswith("Z"):
713
+ maybe_timestamp = maybe_timestamp[:-1] # Strip the "Z"
714
+ if "." in maybe_timestamp:
715
+ # Split at the decimal point to isolate the fractional seconds
716
+ date_part, frac_part = maybe_timestamp.split(".")
717
+ maybe_timestamp = f"{date_part}.{frac_part[:6]}Z"
718
+ else:
719
+ maybe_timestamp = f"{maybe_timestamp}Z" # Add the "Z" back if no fractional part
720
+ try:
721
+ datetime.strptime(maybe_timestamp, "%Y-%m-%dT%H:%M:%S.%fZ")
722
+ return True
723
+ except ValueError:
724
+ return False
725
+ else:
726
+ try:
727
+ datetime.strptime(maybe_timestamp, "%Y-%m-%dT%H:%M:%S%z")
728
+ return True
729
+ except ValueError:
730
+ return False
@@ -0,0 +1 @@
1
+ __version__ = "0.25.0"
@@ -1,17 +1,16 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dagster-k8s
3
- Version: 0.24.13
3
+ Version: 0.25.0
4
4
  Summary: A Dagster integration for k8s
5
5
  Home-page: https://github.com/dagster-io/dagster/tree/master/python_modules/libraries/dagster-k8s
6
6
  Author: Dagster Labs
7
7
  Author-email: hello@dagsterlabs.com
8
8
  License: Apache-2.0
9
- Classifier: Programming Language :: Python :: 3.8
10
9
  Classifier: Programming Language :: Python :: 3.9
11
10
  Classifier: Programming Language :: Python :: 3.10
12
11
  Classifier: Programming Language :: Python :: 3.11
13
12
  Classifier: Programming Language :: Python :: 3.12
14
13
  Classifier: License :: OSI Approved :: Apache Software License
15
14
  Classifier: Operating System :: OS Independent
16
- Requires-Python: >=3.8,<3.13
15
+ Requires-Python: >=3.9,<3.13
17
16
  License-File: LICENSE
@@ -1,3 +1,3 @@
1
- dagster==1.8.13
1
+ dagster==1.9.0
2
2
  kubernetes<32
3
3
  google-auth!=2.23.1
@@ -32,7 +32,6 @@ setup(
32
32
  description="A Dagster integration for k8s",
33
33
  url="https://github.com/dagster-io/dagster/tree/master/python_modules/libraries/dagster-k8s",
34
34
  classifiers=[
35
- "Programming Language :: Python :: 3.8",
36
35
  "Programming Language :: Python :: 3.9",
37
36
  "Programming Language :: Python :: 3.10",
38
37
  "Programming Language :: Python :: 3.11",
@@ -42,9 +41,9 @@ setup(
42
41
  ],
43
42
  packages=find_packages(exclude=["dagster_k8s_tests*"]),
44
43
  include_package_data=True,
45
- python_requires=">=3.8,<3.13",
44
+ python_requires=">=3.9,<3.13",
46
45
  install_requires=[
47
- "dagster==1.8.13",
46
+ "dagster==1.9.0",
48
47
  f"kubernetes<{KUBERNETES_VERSION_UPPER_BOUND}",
49
48
  # exclude a google-auth release that added an overly restrictive urllib3 pin that confuses dependency resolvers
50
49
  "google-auth!=2.23.1",
@@ -1 +0,0 @@
1
- __version__ = "0.24.13"
File without changes
File without changes
File without changes
File without changes