sibi-dst 0.3.32__tar.gz → 0.3.33__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/PKG-INFO +1 -1
  2. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/pyproject.toml +1 -1
  3. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/df_helper/_parquet_artifact.py +63 -0
  4. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/df_helper/_parquet_reader.py +36 -0
  5. sibi_dst-0.3.33/sibi_dst/df_helper/backends/django/_db_connection.py +88 -0
  6. sibi_dst-0.3.33/sibi_dst/df_helper/backends/django/_io_dask.py +450 -0
  7. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/df_helper/backends/django/_load_from_db.py +96 -1
  8. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/df_helper/backends/django/_sql_model_builder.py +132 -6
  9. sibi_dst-0.3.33/sibi_dst/df_helper/backends/http/_http_config.py +101 -0
  10. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/df_helper/backends/parquet/_filter_handler.py +28 -0
  11. sibi_dst-0.3.33/sibi_dst/df_helper/backends/parquet/_parquet_options.py +205 -0
  12. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/df_helper/backends/sqlalchemy/_db_connection.py +17 -0
  13. sibi_dst-0.3.33/sibi_dst/df_helper/backends/sqlalchemy/_load_from_db.py +141 -0
  14. sibi_dst-0.3.33/sibi_dst/df_helper/backends/sqlalchemy/_sql_model_builder.py +192 -0
  15. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/df_helper/core/_params_config.py +59 -0
  16. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/geopy_helper/geo_location_service.py +14 -0
  17. sibi_dst-0.3.33/sibi_dst/geopy_helper/utils.py +89 -0
  18. sibi_dst-0.3.33/sibi_dst/osmnx_helper/base_osm_map.py +419 -0
  19. sibi_dst-0.3.33/sibi_dst/osmnx_helper/utils.py +489 -0
  20. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/utils/clickhouse_writer.py +27 -0
  21. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/utils/data_utils.py +32 -1
  22. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/utils/data_wrapper.py +94 -6
  23. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/utils/date_utils.py +35 -0
  24. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/utils/log_utils.py +19 -2
  25. sibi_dst-0.3.32/sibi_dst/df_helper/backends/django/_db_connection.py +0 -48
  26. sibi_dst-0.3.32/sibi_dst/df_helper/backends/django/_io_dask.py +0 -242
  27. sibi_dst-0.3.32/sibi_dst/df_helper/backends/http/_http_config.py +0 -50
  28. sibi_dst-0.3.32/sibi_dst/df_helper/backends/parquet/_parquet_options.py +0 -101
  29. sibi_dst-0.3.32/sibi_dst/df_helper/backends/sqlalchemy/_load_from_db.py +0 -63
  30. sibi_dst-0.3.32/sibi_dst/df_helper/backends/sqlalchemy/_sql_model_builder.py +0 -131
  31. sibi_dst-0.3.32/sibi_dst/geopy_helper/utils.py +0 -55
  32. sibi_dst-0.3.32/sibi_dst/osmnx_helper/base_osm_map.py +0 -165
  33. sibi_dst-0.3.32/sibi_dst/osmnx_helper/utils.py +0 -267
  34. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/README.md +0 -0
  35. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/__init__.py +0 -0
  36. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/df_helper/__init__.py +0 -0
  37. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/df_helper/_df_helper.py +0 -0
  38. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/df_helper/backends/__init__.py +0 -0
  39. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/df_helper/backends/django/__init__.py +0 -0
  40. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/df_helper/backends/http/__init__.py +0 -0
  41. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/df_helper/backends/parquet/__init__.py +0 -0
  42. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/df_helper/backends/sqlalchemy/__init__.py +0 -0
  43. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/df_helper/backends/sqlalchemy/_filter_handler.py +0 -0
  44. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/df_helper/backends/sqlalchemy/_io_dask.py +0 -0
  45. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/df_helper/core/__init__.py +0 -0
  46. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/df_helper/core/_defaults.py +0 -0
  47. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/df_helper/core/_filter_handler.py +0 -0
  48. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/df_helper/core/_query_config.py +0 -0
  49. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/df_helper/data_cleaner.py +0 -0
  50. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/geopy_helper/__init__.py +0 -0
  51. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/osmnx_helper/__init__.py +0 -0
  52. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/osmnx_helper/basemaps/__init__.py +0 -0
  53. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/osmnx_helper/basemaps/calendar_html.py +0 -0
  54. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/osmnx_helper/basemaps/router_plotter.py +0 -0
  55. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/tests/__init__.py +0 -0
  56. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/tests/test_data_wrapper_class.py +0 -0
  57. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/utils/__init__.py +0 -0
  58. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/utils/airflow_manager.py +0 -0
  59. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/utils/credentials.py +0 -0
  60. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/utils/df_utils.py +0 -0
  61. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/utils/file_utils.py +0 -0
  62. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/utils/filepath_generator.py +0 -0
  63. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/utils/parquet_saver.py +0 -0
  64. {sibi_dst-0.3.32 → sibi_dst-0.3.33}/sibi_dst/utils/storage_manager.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sibi-dst
3
- Version: 0.3.32
3
+ Version: 0.3.33
4
4
  Summary: Data Science Toolkit
5
5
  Author: Luis Valverde
6
6
  Author-email: lvalverdeb@gmail.com
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "sibi-dst"
3
- version = "0.3.32"
3
+ version = "0.3.33"
4
4
  description = "Data Science Toolkit"
5
5
  authors = ["Luis Valverde <lvalverdeb@gmail.com>"]
6
6
  readme = "README.md"
@@ -9,11 +9,74 @@ from sibi_dst.utils import DateUtils
9
9
 
10
10
 
11
11
  class ParquetArtifact(DfHelper):
12
+ """
13
+ Class designed to manage Parquet data storage and retrieval using a specified
14
+ DataWrapper class for data processing. It provides functionality for loading,
15
+ updating, rebuilding, and generating Parquet files within a configurable
16
+ storage filesystem. The class ensures that all essential configurations and
17
+ filesystems are properly set up before operations.
18
+
19
+ Detailed functionality includes support for dynamically managing and generating
20
+ Parquet files based on time periods, with customizable options for paths,
21
+ filenames, date fields, and more. It is an abstraction for efficiently handling
22
+ storage tasks related to distributed or local file systems.
23
+
24
+ :ivar config: Configuration dictionary containing all configurable parameters
25
+ for managing Parquet data storage, such as paths, filenames,
26
+ and date ranges.
27
+ :type config: dict
28
+ :ivar df: Cached Dask DataFrame used to store and manipulate data loaded
29
+ from the Parquet file.
30
+ :type df: Optional[dask.dataframe.DataFrame]
31
+ :ivar data_wrapper_class: Class responsible for abstracting data processing
32
+ operations required for Parquet file generation.
33
+ :type data_wrapper_class: type
34
+ :ivar date_field: Name of the field used to identify and process data by date.
35
+ :type date_field: Optional[str]
36
+ :ivar parquet_storage_path: Filesystem path to store Parquet files.
37
+ :type parquet_storage_path: Optional[str]
38
+ :ivar parquet_filename: Name of the Parquet file to be generated and managed.
39
+ :type parquet_filename: Optional[str]
40
+ :ivar parquet_start_date: Date string specifying the start date for data range
41
+ processing.
42
+ :type parquet_start_date: Optional[str]
43
+ :ivar parquet_end_date: Date string specifying the end date for data range
44
+ processing.
45
+ :type parquet_end_date: Optional[str]
46
+ :ivar filesystem_type: Type of the filesystem used for managing storage
47
+ operations (e.g., `file`, `s3`, etc.).
48
+ :type filesystem_type: str
49
+ :ivar filesystem_options: Additional options for configuring the filesystem.
50
+ :type filesystem_options: dict
51
+ :ivar fs: Filesystem object used for storage operations.
52
+ :type fs: fsspec.AbstractFileSystem
53
+ """
12
54
  DEFAULT_CONFIG = {
13
55
  'backend': 'parquet'
14
56
  }
15
57
 
16
58
  def __init__(self, data_wrapper_class, **kwargs):
59
+ """
60
+ Initializes an instance of the class with given configuration and validates
61
+ required parameters. Sets up the filesystem to handle storage, ensuring
62
+ necessary directories exist. The configuration supports a variety of options
63
+ to manage parquet storage requirements, including paths, filenames, and date
64
+ ranges.
65
+
66
+ :param data_wrapper_class: The class responsible for wrapping data to be managed
67
+ by this instance.
68
+ :type data_wrapper_class: type
69
+ :param kwargs: Arbitrary keyword arguments to override default configuration.
70
+ Includes settings for `date_field`, `parquet_storage_path`,
71
+ `parquet_filename`, `parquet_start_date`, `parquet_end_date`,
72
+ `filesystem_type`, `filesystem_options`, and `fs`.
73
+ :type kwargs: dict
74
+
75
+ :raises ValueError: If any of the required configuration options
76
+ (`date_field`, `parquet_storage_path`,
77
+ `parquet_filename`, `parquet_start_date`,
78
+ or `parquet_end_date`) are missing or not set properly.
79
+ """
17
80
  self.config = {
18
81
  **self.DEFAULT_CONFIG,
19
82
  **kwargs,
@@ -7,6 +7,42 @@ from sibi_dst.df_helper import DfHelper
7
7
 
8
8
 
9
9
  class ParquetReader(DfHelper):
10
+ """
11
+ This class is a specialized helper for reading and managing Parquet files.
12
+
13
+ The `ParquetReader` class is designed to facilitate working with Parquet
14
+ datasets stored across different filesystems. It initializes the required
15
+ resources, ensures the existence of the specified Parquet directory,
16
+ and provides an abstraction to load the data into a Dask DataFrame.
17
+
18
+ The class requires configuration for the storage path and dates defining
19
+ a range of interest. It also supports various filesystem types through
20
+ `fsspec`.
21
+
22
+ :ivar config: Holds the final configuration for this instance, combining
23
+ `DEFAULT_CONFIG` with user-provided configuration.
24
+ :type config: dict
25
+ :ivar df: Stores the loaded Dask DataFrame after the `load()` method is
26
+ invoked. Initially set to None.
27
+ :type df: Optional[dd.DataFrame]
28
+ :ivar parquet_storage_path: The path to the Parquet storage directory.
29
+ :type parquet_storage_path: str
30
+ :ivar parquet_start_date: Start date for Parquet data selection. Must
31
+ be set in the configuration.
32
+ :type parquet_start_date: str
33
+ :ivar parquet_end_date: End date for Parquet data selection. Must be
34
+ set in the configuration.
35
+ :type parquet_end_date: str
36
+ :ivar filesystem_type: The type of filesystem the Parquet files are
37
+ stored on (e.g., "file", "s3").
38
+ :type filesystem_type: str
39
+ :ivar filesystem_options: Any additional options required for the
40
+ specified filesystem type.
41
+ :type filesystem_options: dict
42
+ :ivar fs: Instance of `fsspec` filesystem used to interact with the
43
+ Parquet storage.
44
+ :type fs: fsspec.AbstractFileSystem
45
+ """
10
46
  DEFAULT_CONFIG = {
11
47
  'backend': 'parquet'
12
48
  }
@@ -0,0 +1,88 @@
1
+ from typing import Any
2
+
3
+ from pydantic import BaseModel, model_validator
4
+
5
+ from ._sql_model_builder import DjangoSqlModelBuilder
6
+
7
+
8
+ class DjangoConnectionConfig(BaseModel):
9
+ """
10
+ Represents a configuration for establishing a Django database connection.
11
+
12
+ This class is used for defining the configurations necessary to establish a Django
13
+ database connection. It supports dynamic model generation if the model is not
14
+ provided explicitly. It also validates the connection configuration to ensure it
15
+ is properly set up before being used.
16
+
17
+ :ivar live: Indicates whether the connection is live. Automatically set to False if
18
+ a table is provided without a pre-built model.
19
+ :type live: bool
20
+ :ivar connection_name: The name of the database connection to use. This is a mandatory
21
+ parameter and must be provided.
22
+ :type connection_name: str
23
+ :ivar table: The name of the database table to use. Required for dynamic model
24
+ generation when no model is provided.
25
+ :type table: str
26
+ :ivar model: The Django model that represents the database table. If not provided,
27
+ this can be generated dynamically by using the table name.
28
+ :type model: Any
29
+ """
30
+ live: bool = False
31
+ connection_name: str = None
32
+ table: str = None
33
+ model: Any = None
34
+
35
+ @model_validator(mode="after")
36
+ def check_model(self):
37
+ """
38
+ Validates and modifies the instance based on the provided attributes and conditions.
39
+ This method ensures that all required parameters are populated and consistent, and it
40
+ dynamically builds a model if necessary. The method also ensures the connection is
41
+ validated after the model preparation process.
42
+
43
+ :raises ValueError: If `connection_name` is not provided.
44
+ :raises ValueError: If `table` name is not specified when building the model dynamically.
45
+ :raises ValueError: If there are errors during the dynamic model-building process.
46
+ :raises ValueError: If `validate_connection` fails due to invalid configuration.
47
+ :return: The validated and potentially mutated instance.
48
+ """
49
+ # connection_name is mandatory
50
+ if self.connection_name is None:
51
+ raise ValueError("Connection name must be specified")
52
+
53
+ # If table is provided, enforce live=False
54
+ if self.table:
55
+ self.live = False
56
+
57
+ # If model is not provided, build it dynamically
58
+ if not self.model:
59
+ if not self.table:
60
+ raise ValueError("Table name must be specified to build the model")
61
+ try:
62
+ self.model = DjangoSqlModelBuilder(
63
+ connection_name=self.connection_name, table=self.table
64
+ ).build_model()
65
+ except Exception as e:
66
+ raise ValueError(f"Failed to build model: {e}")
67
+ else:
68
+ self.live = True
69
+ # Validate the connection after building the model
70
+ self.validate_connection()
71
+ return self
72
+
73
+ def validate_connection(self):
74
+ """
75
+ Ensures the database connection is valid by performing a simple
76
+ query. Raises a ValueError if the connection is broken or if any
77
+ other exception occurs during the query.
78
+
79
+ :raises ValueError: If the connection to the database cannot be
80
+ established or if the query fails.
81
+ """
82
+ try:
83
+ # Perform a simple query to test the connection
84
+ self.model.objects.using(self.connection_name).exists()
85
+ except Exception as e:
86
+ raise ValueError(
87
+ f"Failed to connect to the database '{self.connection_name}': {e}"
88
+ )
@@ -0,0 +1,450 @@
1
+ import itertools
2
+
3
+ import dask.dataframe as dd
4
+ import django
5
+ import pandas as pd
6
+ from django.core.cache import cache
7
+ from django.core.exceptions import FieldDoesNotExist
8
+ from django.db import models
9
+ from django.db.models import Field
10
+ from django.utils.encoding import force_str as force_text
11
+
12
+
13
+ class ReadFrameDask:
14
+ """
15
+ Handles Django ORM QuerySet to Dask DataFrame conversion with support for field
16
+ type inference, chunked data retrieval, and verbose updates.
17
+
18
+ This class provides methods to efficiently convert a Django QuerySet into a
19
+ Dask DataFrame while preserving field types and incorporating additional
20
+ capabilities such as replacing fields with verbose choices or related object
21
+ information. The class design leverages static and class methods to maintain
22
+ flexibility and reusability for handling Django model fields and their data
23
+ types.
24
+
25
+ :ivar qs: The Django QuerySet to be converted into a Dask DataFrame.
26
+ :type qs: django.db.models.query.QuerySet
27
+ :ivar coerce_float: Whether to attempt to coerce numeric values to floats.
28
+ :type coerce_float: bool
29
+ :ivar chunk_size: The number of records to fetch and process per chunk from
30
+ the QuerySet.
31
+ :type chunk_size: int
32
+ :ivar verbose: If True, provides verbose updates during DataFrame creation
33
+ by replacing fields with readable representations (e.g., verbose names).
34
+ :type verbose: bool
35
+ """
36
+ FieldDoesNotExist = (
37
+ django.core.exceptions.FieldDoesNotExist
38
+ if django.VERSION < (1, 8)
39
+ else django.core.exceptions.FieldDoesNotExist
40
+ )
41
+
42
+ def __init__(
43
+ self,
44
+ qs,
45
+ **kwargs,
46
+ ):
47
+ """
48
+ An initialization method for a class that sets class attributes based on provided
49
+ arguments or default values using the keyword arguments. The method allows
50
+ customization of behaviors like coercing data types, handling chunked operations,
51
+ and verbosity level during execution.
52
+
53
+ :param qs: A data source or query set for processing; its type is dependent
54
+ on the expected data being handled.
55
+ :param kwargs: Additional keyword arguments that may include:
56
+ - coerce_float: A boolean indicating whether floats should be coerced
57
+ during handling. Default is False.
58
+ - chunk_size: An integer value representing the size of chunks for
59
+ data processing. Default is 1000.
60
+ - verbose: A boolean to specify if verbose logging or output
61
+ should occur during execution. Default is True.
62
+ """
63
+ self.qs = qs
64
+ self.coerce_float = kwargs.setdefault("coerce_float", False)
65
+ self.chunk_size = kwargs.setdefault("chunk_size", 1000)
66
+ self.verbose = kwargs.setdefault("verbose", True)
67
+
68
+ @staticmethod
69
+ def replace_from_choices(choices):
70
+ """
71
+ Provides a method to replace elements in a list of values based on a mapping of choices.
72
+
73
+ This static method generates a closure function that replaces items in a list by
74
+ looking up their corresponding values in a provided dictionary of choices. If an
75
+ item cannot be found in the dictionary, it is left unchanged.
76
+
77
+ :param choices:
78
+ Dictionary where keys are original values and values are their replacements.
79
+ :return:
80
+ A function that takes a list of values and replaces elements using the
81
+ provided choices dictionary.
82
+ """
83
+ def inner(values):
84
+ return [choices.get(v, v) for v in values]
85
+
86
+ return inner
87
+
88
+ @staticmethod
89
+ def get_model_name(model):
90
+ """
91
+ Retrieves the model name from a given Django model instance.
92
+
93
+ This method accesses the `_meta.model_name` attribute of the provided
94
+ model object to extract and return the model's name.
95
+
96
+ :param model: A Django model instance from which the model name is
97
+ derived.
98
+ :type model: object
99
+ :return: The name of the model as a string.
100
+ :rtype: str
101
+ """
102
+ return model._meta.model_name
103
+
104
+ @staticmethod
105
+ def get_related_model(field):
106
+ """
107
+ Retrieve the related model from the provided field.
108
+
109
+ This function determines the related model associated with the given field.
110
+ It checks various attributes commonly used to indicate relations in models and
111
+ retrieves the related model if present.
112
+
113
+ :param field: The field from which the related model is to be extracted.
114
+ It must be an object that potentially contains attributes like
115
+ `related_model` or `rel`.
116
+ :return: The related model associated with the provided field, or None if
117
+ no such model is found.
118
+ """
119
+ model = None
120
+ if hasattr(field, "related_model") and field.related_model:
121
+ model = field.related_model
122
+ elif hasattr(field, "rel") and field.rel:
123
+ model = field.rel.to
124
+ return model
125
+
126
+ @classmethod
127
+ def get_base_cache_key(cls, model):
128
+ """
129
+ Generates a base cache key for caching purposes.
130
+
131
+ This method constructs a base cache key that can be used in conjunction with
132
+ Django models to uniquely identify cache entries. The key is formatted to
133
+ include the app label and model name, ensuring that cache entries are
134
+ namespaced accordingly.
135
+
136
+ :param model: A Django model instance for which the base cache key is generated.
137
+ :type model: Model
138
+ :return: The string template for the base cache key, where `%s` can be replaced
139
+ with specific identifiers to create unique keys.
140
+ :rtype: str
141
+ """
142
+ return (
143
+ f"dask_{model._meta.app_label}_{cls.get_model_name(model)}_%s_rendering"
144
+ )
145
+
146
+ @classmethod
147
+ def replace_pk(cls, model):
148
+ """
149
+ Generates a function that replaces primary keys in a pandas Series with their
150
+ corresponding cached values or database-retrieved representations.
151
+
152
+ The function uses a cache mechanism to retrieve pre-stored values for primary
153
+ keys in the series. If some primary keys are not found in the cache, it queries
154
+ the database for their representations, updates the cache, and replaces the
155
+ primary keys in the series accordingly.
156
+
157
+ :param model: The Django model class associated with the primary keys to be
158
+ processed.
159
+ :type model: Type[Model]
160
+
161
+ :return: A function that takes a pandas Series of primary keys as input and
162
+ returns a Series with replaced values based on cache or database retrieval.
163
+ :rtype: callable
164
+ """
165
+ base_cache_key = cls.get_base_cache_key(model)
166
+
167
+ def get_cache_key_from_pk(pk):
168
+ return None if pk is None else base_cache_key % str(pk)
169
+
170
+ def inner(pk_series):
171
+ pk_series = pk_series.astype(object).where(pk_series.notnull(), None)
172
+ cache_keys = pk_series.apply(get_cache_key_from_pk, convert_dtype=False)
173
+ unique_cache_keys = list(filter(None, cache_keys.unique()))
174
+ if not unique_cache_keys:
175
+ return pk_series
176
+
177
+ out_dict = cache.get_many(unique_cache_keys)
178
+ if len(out_dict) < len(unique_cache_keys):
179
+ out_dict = dict(
180
+ [
181
+ (base_cache_key % obj.pk, force_text(obj))
182
+ for obj in model.objects.filter(
183
+ pk__in=list(filter(None, pk_series.unique()))
184
+ )
185
+ ]
186
+ )
187
+ cache.set_many(out_dict)
188
+ return list(map(out_dict.get, cache_keys))
189
+
190
+ return inner
191
+
192
+ @classmethod
193
+ def build_update_functions(cls, fieldnames, fields):
194
+ """
195
+ This method is responsible for building update functions based on the provided
196
+ fieldnames and fields. It performs validation for the field type, checks for
197
+ specific conditions such as `choices` or `ForeignKey` field types, and generates
198
+ a generator of update functions for the given fieldnames and fields.
199
+
200
+ :param fieldnames: A list of field names to be processed.
201
+ :type fieldnames: list[str]
202
+ :param fields: A list of field objects corresponding to the fieldnames.
203
+ :type fields: list[Field]
204
+ :return: A generator yielding tuples where the first element is a fieldname,
205
+ and the second element is the corresponding update function or None.
206
+ :rtype: generator[tuple[str, Callable | None]]
207
+ """
208
+ for fieldname, field in zip(fieldnames, fields):
209
+ if not isinstance(field, Field):
210
+ yield fieldname, None
211
+ else:
212
+ if field.choices:
213
+ choices = dict([(k, force_text(v)) for k, v in field.flatchoices])
214
+ yield fieldname, cls.replace_from_choices(choices)
215
+ elif field.get_internal_type() == "ForeignKey":
216
+ yield fieldname, cls.replace_pk(cls.get_related_model(field))
217
+
218
+ @classmethod
219
+ def update_with_verbose(cls, df, fieldnames, fields):
220
+ """
221
+ Updates the provided dataframe by applying transformation functions to specified fields.
222
+ The method iterates over the provided field names and their corresponding functions, applying
223
+ each transformation function to its related column in the dataframe.
224
+
225
+ :param df: The input dataframe to be updated.
226
+ :param fieldnames: A list of field names in the dataframe that need to be updated.
227
+ :param fields: A list of transformation functions or mappings corresponding to the field names.
228
+ :return: The dataframe with updated fields.
229
+ """
230
+ for fieldname, function in cls.build_update_functions(fieldnames, fields):
231
+ if function is not None:
232
+ df[fieldname] = df[fieldname].map_partitions(lambda x: function(x))
233
+
234
+ @classmethod
235
+ def to_fields(cls, qs, fieldnames):
236
+ """
237
+ Converts field names from a queryset into corresponding field objects, resolving relationships
238
+ and related objects if necessary. This method is typically used to yield fully-resolved field
239
+ objects for further interaction.
240
+
241
+ :param qs: A QuerySet object from which the fields are resolved. This object provides access
242
+ to the model and its metadata from which the fields are retrieved.
243
+ :type qs: QuerySet
244
+
245
+ :param fieldnames: A list of field name strings. These can include nested fields separated by
246
+ double underscores (__) to denote relationships or subfields.
247
+ :type fieldnames: List[str]
248
+
249
+ :return: A generator that yields resolved field objects corresponding to the provided field names.
250
+ :rtype: Generator[Field, None, None]
251
+ """
252
+ for fieldname in fieldnames:
253
+ model = qs.model
254
+ for fieldname_part in fieldname.split("__"):
255
+ try:
256
+ field = model._meta.get_field(fieldname_part)
257
+ except cls.FieldDoesNotExist:
258
+ try:
259
+ rels = model._meta.get_all_related_objects_with_model()
260
+ except AttributeError:
261
+ field = fieldname
262
+ else:
263
+ for relobj, _ in rels:
264
+ if relobj.get_accessor_name() == fieldname_part:
265
+ field = relobj.field
266
+ model = field.model
267
+ break
268
+ else:
269
+ model = cls.get_related_model(field)
270
+ yield field
271
+
272
+ @staticmethod
273
+ def is_values_queryset(qs):
274
+ """
275
+ Determines whether the provided queryset is a values queryset.
276
+
277
+ This method checks if the `_iterable_class` attribute of the queryset corresponds
278
+ to `django.db.models.query.ValuesIterable`. If an exception occurs during the check,
279
+ the method returns `False`.
280
+
281
+ :param qs: The queryset to be checked.
282
+ :type qs: django.db.models.query.QuerySet
283
+ :return: A boolean indicating whether the queryset is a values queryset.
284
+ :rtype: bool
285
+ """
286
+ try:
287
+ return qs._iterable_class == django.db.models.query.ValuesIterable
288
+ except:
289
+ return False
290
+
291
+ @staticmethod
292
+ def object_to_dict(obj, fields=None):
293
+ """
294
+ Converts an object to a dictionary representation.
295
+
296
+ This static method transforms an object's attributes into a dictionary.
297
+ If no specific fields are provided, all attribute key-value pairs are
298
+ included. The "_state" attribute, if present, is safely removed in this
299
+ case. When specific fields are supplied, only those fields are included
300
+ in the resulting dictionary.
301
+
302
+ :param obj: The object to be serialized into a dictionary. This object
303
+ must have the `__dict__` attribute available.
304
+ :param fields: A list of strings representing the attribute names to
305
+ include in the dictionary. If None or not provided, all attributes
306
+ are included except for "_state".
307
+ :return: A dictionary representation of the object's attributes. If the
308
+ provided object is None, an empty dictionary is returned.
309
+ :rtype: dict
310
+ """
311
+ if obj is None:
312
+ return {} # Return an empty dictionary if obj is None
313
+ if not fields:
314
+ obj.__dict__.pop("_state", None) # Remove _state safely
315
+ return obj.__dict__
316
+ return {field: obj.__dict__.get(field) for field in fields if field is not None}
317
+
318
+ @staticmethod
319
+ def infer_dtypes_from_django(qs):
320
+ """
321
+ Infer dtypes from a Django QuerySet model and annotated fields.
322
+
323
+ This method infers the appropriate data types (dtypes) for a given
324
+ Django QuerySet (`qs`) based on the fields defined in its model and
325
+ any annotated fields included in the QuerySet. The function maps
326
+ Django model field types to corresponding dtypes compatible with
327
+ Dask or Pandas dataframes.
328
+
329
+ - Fields in the model are identified through their metadata.
330
+ - Reverse relationships and non-concrete fields are ignored.
331
+ - Annotated fields are processed separately and default to object
332
+ dtype if their type cannot be determined.
333
+
334
+ :param qs: Django QuerySet whose model is used to infer dtypes.
335
+ :type qs: QuerySet
336
+ :return: A mapping of field names to inferred dtypes.
337
+ :rtype: dict
338
+ """
339
+ django_to_dask_dtype = {
340
+ 'AutoField': 'Int64', # Use nullable integer
341
+ 'BigAutoField': 'Int64',
342
+ 'BigIntegerField': 'Int64',
343
+ 'BooleanField': 'bool',
344
+ 'CharField': 'object',
345
+ 'DateField': 'datetime64[ns]',
346
+ 'DateTimeField': 'datetime64[ns]',
347
+ 'DecimalField': 'float64',
348
+ 'FloatField': 'float64',
349
+ 'IntegerField': 'Int64', # Use nullable integer
350
+ 'PositiveIntegerField': 'Int64',
351
+ 'SmallIntegerField': 'Int64',
352
+ 'TextField': 'object',
353
+ 'TimeField': 'object',
354
+ 'UUIDField': 'object',
355
+ 'ForeignKey': 'Int64', # Use nullable integer for FK fields
356
+ }
357
+
358
+ dtypes = {}
359
+ # Handle model fields
360
+ for field in qs.model._meta.get_fields():
361
+ # Skip reverse relationships and non-concrete fields
362
+ if not getattr(field, 'concrete', False):
363
+ continue
364
+
365
+ # Check for AutoField or BigAutoField explicitly
366
+ if isinstance(field, (models.AutoField, models.BigAutoField)):
367
+ dtypes[field.name] = 'Int64' # Nullable integer for autoincremented fields
368
+ else:
369
+ # Use field type to infer dtype
370
+ field_type = field.get_internal_type()
371
+ dtypes[field.name] = django_to_dask_dtype.get(field_type, 'object')
372
+
373
+ # Handle annotated fields
374
+ for annotation_name, annotation in qs.query.annotation_select.items():
375
+ if hasattr(annotation, 'output_field'):
376
+ field_type = annotation.output_field.get_internal_type()
377
+ dtype = django_to_dask_dtype.get(field_type, 'object')
378
+ else:
379
+ dtype = 'object' # Default to object for untyped annotations
380
+ dtypes[annotation_name] = dtype
381
+
382
+ return dtypes
383
+
384
+ def read_frame(self, fillna_value=None):
385
+ """
386
+ Reads a Django QuerySet and returns a dask DataFrame by iterating over the QuerySet in chunks. It
387
+ handles data type inference, missing values, timezone awareness, and creates partitions to form a
388
+ single dask DataFrame efficiently.
389
+
390
+ This method includes functionality for managing missing values, inferring data types from Django fields,
391
+ and handling timezone-aware datetime objects. It processes data in chunks to optimize memory usage and
392
+ supports converting chunks into pandas DataFrames before combining them into a unified dask DataFrame.
393
+
394
+ :param fillna_value: The value to fill NaN values in the DataFrame. If None, NaNs are not filled.
395
+ :type fillna_value: Any
396
+ :return: A dask DataFrame constructed from the QuerySet after processing and combining all
397
+ its partitions.
398
+ :rtype: dask.dataframe.DataFrame
399
+ """
400
+ qs = self.qs
401
+ coerce_float = self.coerce_float
402
+ verbose = self.verbose
403
+ chunk_size = self.chunk_size
404
+
405
+ fields = qs.model._meta.fields
406
+ fieldnames = [f.name for f in fields]
407
+ fieldnames += list(qs.query.annotation_select.keys())
408
+ fieldnames = tuple(fieldnames)
409
+ # Infer dtypes from Django fields
410
+ dtypes = self.infer_dtypes_from_django(qs)
411
+ if fieldnames:
412
+ dtypes = {field: dtype for field, dtype in dtypes.items() if field in fieldnames}
413
+
414
+ # Create partitions for Dask by iterating through chunks
415
+ partitions = []
416
+ iterator = iter(qs.iterator(chunk_size=chunk_size))
417
+
418
+ while True:
419
+ chunk = list(itertools.islice(iterator, chunk_size))
420
+ if not chunk:
421
+ break
422
+
423
+ # Convert chunk to DataFrame with inferred dtypes
424
+ df = pd.DataFrame.from_records(
425
+ [self.object_to_dict(obj, fieldnames) for obj in chunk],
426
+ columns=fieldnames,
427
+ coerce_float=coerce_float,
428
+ )
429
+ # Handle NaN values before casting, if specified
430
+ if fillna_value is not None:
431
+ df = df.fillna(fillna_value)
432
+
433
+ # Convert timezone-aware columns to timezone-naive if needed
434
+ for col in df.columns:
435
+ if isinstance(df[col].dtype, pd.DatetimeTZDtype):
436
+ df[col] = df[col].dt.tz_localize(None)
437
+
438
+ # Convert to the appropriate data types
439
+ df = df.astype(dtypes)
440
+ partitions.append(dd.from_pandas(df, npartitions=1))
441
+
442
+ # Concatenate partitions into a single Dask DataFrame
443
+ # Ensure all partitions have the same columns
444
+
445
+ dask_df = dd.concat(partitions, axis=0, ignore_index=True)
446
+
447
+ if verbose:
448
+ self.update_with_verbose(dask_df, fieldnames, fields)
449
+
450
+ return dask_df