gov-uk-dashboards 21.2.2__py3-none-any.whl → 26.26.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. gov_uk_dashboards/__init__.py +1 -1
  2. gov_uk_dashboards/assets/__init__.py +1 -0
  3. gov_uk_dashboards/assets/dashboard.css +177 -0
  4. gov_uk_dashboards/assets/download-map.js +39 -0
  5. gov_uk_dashboards/assets/get_assets_folder.py +1 -0
  6. gov_uk_dashboards/assets/images/CHASE_icon.svg +17 -0
  7. gov_uk_dashboards/assets/images/explore_data_logo.svg +87 -0
  8. gov_uk_dashboards/assets/index.html +3 -0
  9. gov_uk_dashboards/assets/register_maps +15 -0
  10. gov_uk_dashboards/assets/scripts.js +4 -0
  11. gov_uk_dashboards/colours.py +23 -0
  12. gov_uk_dashboards/components/__init__.py +1 -0
  13. gov_uk_dashboards/components/dash/__init__.py +1 -3
  14. gov_uk_dashboards/components/dash/apply_and_reset_filters_buttons.py +1 -0
  15. gov_uk_dashboards/components/dash/banners.py +21 -0
  16. gov_uk_dashboards/components/dash/card.py +1 -0
  17. gov_uk_dashboards/components/dash/card_full_width.py +1 -0
  18. gov_uk_dashboards/components/dash/collapsible_panel.py +1 -0
  19. gov_uk_dashboards/components/dash/comparison_la_filter_button.py +1 -0
  20. gov_uk_dashboards/components/dash/context_banner.py +2 -1
  21. gov_uk_dashboards/components/dash/context_card.py +978 -0
  22. gov_uk_dashboards/components/dash/data_quality_banner.py +91 -0
  23. gov_uk_dashboards/components/dash/details.py +1 -0
  24. gov_uk_dashboards/components/dash/download_button.py +22 -36
  25. gov_uk_dashboards/components/dash/filter_panel.py +1 -0
  26. gov_uk_dashboards/components/dash/footer.py +81 -27
  27. gov_uk_dashboards/components/dash/graph.py +1 -0
  28. gov_uk_dashboards/components/dash/green_button.py +25 -0
  29. gov_uk_dashboards/components/dash/header.py +62 -9
  30. gov_uk_dashboards/components/dash/heading.py +8 -5
  31. gov_uk_dashboards/components/dash/home_page_link_button.py +9 -8
  32. gov_uk_dashboards/components/dash/html_list.py +1 -0
  33. gov_uk_dashboards/components/dash/key_value_pair.py +1 -0
  34. gov_uk_dashboards/components/dash/main_content.py +25 -2
  35. gov_uk_dashboards/components/dash/notification_banner.py +9 -5
  36. gov_uk_dashboards/components/dash/paragraph.py +1 -0
  37. gov_uk_dashboards/components/dash/phase_banner.py +7 -4
  38. gov_uk_dashboards/components/dash/row_component.py +1 -0
  39. gov_uk_dashboards/components/dash/table.py +62 -124
  40. gov_uk_dashboards/components/dash/tooltip.py +2 -1
  41. gov_uk_dashboards/components/dash/tooltip_title.py +2 -1
  42. gov_uk_dashboards/components/dash/visualisation_commentary.py +1 -0
  43. gov_uk_dashboards/components/dash/visualisation_title.py +1 -0
  44. gov_uk_dashboards/components/dash/warning_text.py +1 -0
  45. gov_uk_dashboards/components/helpers/display_chart_or_table_with_header.py +61 -12
  46. gov_uk_dashboards/components/helpers/get_chart_for_download.py +18 -15
  47. gov_uk_dashboards/components/helpers/plotting_helper_functions.py +0 -1
  48. gov_uk_dashboards/components/leaflet/leaflet_choropleth_map.py +108 -31
  49. gov_uk_dashboards/components/plotly/captioned_figure.py +6 -3
  50. gov_uk_dashboards/components/plotly/enums.py +2 -0
  51. gov_uk_dashboards/components/plotly/stacked_barchart.py +166 -73
  52. gov_uk_dashboards/components/plotly/time_series_chart.py +159 -20
  53. gov_uk_dashboards/constants.py +35 -1
  54. gov_uk_dashboards/figures/__init__.py +4 -2
  55. gov_uk_dashboards/figures/enums/__init__.py +1 -0
  56. gov_uk_dashboards/figures/enums/dash_patterns.py +1 -0
  57. gov_uk_dashboards/figures/line_chart.py +71 -71
  58. gov_uk_dashboards/figures/styles/__init__.py +1 -0
  59. gov_uk_dashboards/figures/styles/line_style.py +1 -0
  60. gov_uk_dashboards/formatting/human_readable.py +1 -0
  61. gov_uk_dashboards/formatting/number_formatting.py +14 -0
  62. gov_uk_dashboards/formatting/round_and_add_prefix_and_suffix.py +1 -0
  63. gov_uk_dashboards/formatting/text_functions.py +11 -0
  64. gov_uk_dashboards/lib/dap/dap_deployment.py +1 -0
  65. gov_uk_dashboards/lib/dap/get_dataframe_from_cds.py +96 -95
  66. gov_uk_dashboards/lib/datetime_functions/datetime_functions.py +118 -0
  67. gov_uk_dashboards/lib/download_functions/download_csv_with_headers.py +106 -83
  68. gov_uk_dashboards/lib/http_headers.py +10 -2
  69. gov_uk_dashboards/lib/logging.py +1 -0
  70. gov_uk_dashboards/lib/testing_functions/__init__.py +0 -0
  71. gov_uk_dashboards/lib/testing_functions/barchart_data_test_assertions.py +48 -0
  72. gov_uk_dashboards/lib/testing_functions/data_test_assertions.py +124 -0
  73. gov_uk_dashboards/lib/testing_functions/data_test_helper_functions.py +257 -0
  74. gov_uk_dashboards/lib/testing_functions/timeseries_data_test_assertions.py +29 -0
  75. gov_uk_dashboards/lib/warning_text_sensitive.py +44 -0
  76. gov_uk_dashboards/log_kpi.py +37 -0
  77. gov_uk_dashboards/symbols.py +1 -0
  78. gov_uk_dashboards/template.html +37 -0
  79. gov_uk_dashboards/template.py +14 -3
  80. {gov_uk_dashboards-21.2.2.dist-info → gov_uk_dashboards-26.26.0.dist-info}/METADATA +6 -7
  81. gov_uk_dashboards-26.26.0.dist-info/RECORD +128 -0
  82. {gov_uk_dashboards-21.2.2.dist-info → gov_uk_dashboards-26.26.0.dist-info}/WHEEL +1 -1
  83. gov_uk_dashboards/axes.py +0 -21
  84. gov_uk_dashboards/figures/chart_data.py +0 -24
  85. gov_uk_dashboards-21.2.2.dist-info/RECORD +0 -113
  86. {gov_uk_dashboards-21.2.2.dist-info → gov_uk_dashboards-26.26.0.dist-info}/licenses/LICENSE +0 -0
  87. {gov_uk_dashboards-21.2.2.dist-info → gov_uk_dashboards-26.26.0.dist-info}/top_level.txt +0 -0
@@ -1,109 +1,110 @@
1
- """ Returns a dataframe after connecting to CDS, otherwise uses a csv already saved in the file"""
2
- import os
3
- import json
4
- import pandas as pd
5
- import pyodbc
6
- import boto3
1
+ """Returns a dataframe after connecting to CDS, otherwise uses a csv already saved in the file"""
7
2
 
3
+ # import os
4
+ # import json
5
+ # import pyodbc
6
+ # import boto3
8
7
 
9
- def get_data_from_cds_or_fallback_to_csv(
10
- cds_sql_query: str, csv_path: str, secret_name: str, cds_server_name: str
11
- ) -> pd.DataFrame:
12
- """Tries to return dataframe from CDS first via Pydash credentials,
13
- otherwise via Amazon WorkSpaces,
14
- otherwise via a file from folder.
15
- Inputs:
16
- cds_sql_query(str): SQL query string
17
- csv_path(str): Filepath for location of csv to fallback to
18
- secret_name(str): AWS Secrets Manager, secret name containing CDS credentials.
19
- cds_server_name(str): CDS Server name used in connection string
20
- Returns:
21
- pd.DataFrame
22
- """
23
- if (
24
- "DATA_FOLDER_LOCATION" in os.environ
25
- and os.environ["DATA_FOLDER_LOCATION"] == "tests/"
26
- ) or ("STAGE" in os.environ and os.environ["STAGE"] == "testing"):
27
- return pd.read_csv(csv_path)
28
8
 
29
- try:
30
- conn = pyodbc.connect(
31
- _get_pydash_connection_string(secret_name, cds_server_name)
32
- )
33
- print("Dataframe has been loaded from CDS using Pydash credentials")
9
+ # def get_data_from_cds_or_fallback_to_csv(
10
+ # cds_sql_query: str, csv_path: str, secret_name: str, cds_server_name: str
11
+ # ) -> pd.DataFrame:
12
+ # """Tries to return dataframe from CDS first via Pydash credentials,
13
+ # otherwise via Amazon WorkSpaces,
14
+ # otherwise via a file from folder.
15
+ # Inputs:
16
+ # cds_sql_query(str): SQL query string
17
+ # csv_path(str): Filepath for location of csv to fallback to
18
+ # secret_name(str): AWS Secrets Manager, secret name containing CDS credentials.
19
+ # cds_server_name(str): CDS Server name used in connection string
20
+ # Returns:
21
+ # pd.DataFrame
22
+ # """
23
+ # if (
24
+ # "DATA_FOLDER_LOCATION" in os.environ
25
+ # and os.environ["DATA_FOLDER_LOCATION"] == "tests/"
26
+ # ) or ("STAGE" in os.environ and os.environ["STAGE"] == "testing"):
27
+ # return pd.read_csv(csv_path)
34
28
 
35
- return pd.read_sql_query(
36
- cds_sql_query,
37
- conn,
38
- )
29
+ # try:
30
+ # conn = pyodbc.connect(
31
+ # _get_pydash_connection_string(secret_name, cds_server_name)
32
+ # )
33
+ # print("Dataframe has been loaded from CDS using Pydash credentials")
39
34
 
40
- except Exception as credential_error: # pylint: disable=broad-except
41
- try:
42
- print(
43
- "Failed to load dataframe using Pydash credentials: ", credential_error
44
- )
45
- conn = pyodbc.connect(
46
- "Driver={SQL Server};"
47
- f"Server={cds_server_name};"
48
- "Database=Dashboards;"
49
- "Trusted_Connection=yes;"
50
- )
51
- print(
52
- "Dataframe has been loaded from CDS using Windows login authentication"
53
- )
35
+ # return pd.read_sql_query(
36
+ # cds_sql_query,
37
+ # conn,
38
+ # )
54
39
 
55
- return pd.read_sql_query(
56
- cds_sql_query,
57
- conn,
58
- )
40
+ # except Exception as credential_error: # pylint: disable=broad-except
41
+ # try:
42
+ # print(
43
+ # "Failed to load dataframe using Pydash credentials: ", credential_error
44
+ # )
45
+ # conn = pyodbc.connect(
46
+ # "Driver={SQL Server};"
47
+ # f"Server={cds_server_name};"
48
+ # "Database=Dashboards;"
49
+ # "Trusted_Connection=yes;"
50
+ # )
51
+ # print(
52
+ # "Dataframe has been loaded from CDS using Windows login authentication"
53
+ # )
59
54
 
60
- except pyodbc.Error as conn_error_except:
61
- print(
62
- "Failed to load dataframe using Windows login authentication: ",
63
- conn_error_except,
64
- )
65
- print("Dataframe has been loaded from CSV")
66
- return pd.read_csv(csv_path)
55
+ # return pd.read_sql_query(
56
+ # cds_sql_query,
57
+ # conn,
58
+ # )
67
59
 
60
+ # except pyodbc.Error as conn_error_except:
61
+ # print(
62
+ # "Failed to load dataframe using Windows login authentication: ",
63
+ # conn_error_except,
64
+ # )
65
+ # print("Dataframe has been loaded from CSV")
66
+ # return pd.read_csv(csv_path)
68
67
 
69
- def _get_pydash_connection_string(secret_name: str, cds_server_name: str):
70
- """
71
- Pydash aka DAP Hosting requires username and password
72
- Inputs:
73
- secret_name(str): AWS Secrets Manager, secret name containing CDS credentials.
74
- cds_server_name(str): CDS Server name used in connection string
75
- """
76
- credentials = _pydash_sql_credentials(secret_name)
77
- conn_string_dap = (
78
- "Driver={/usr/lib/libmsodbcsql-18.so};"
79
- f"Server={cds_server_name};"
80
- "TrustServerCertificate=yes;"
81
- "Database=Dashboards;"
82
- )
83
- return (
84
- f"{conn_string_dap}UID={credentials['username']};PWD={credentials['password']};"
85
- )
86
68
 
69
+ # def _get_pydash_connection_string(secret_name: str, cds_server_name: str):
70
+ # """
71
+ # Pydash aka DAP Hosting requires username and password
72
+ # Inputs:
73
+ # secret_name(str): AWS Secrets Manager, secret name containing CDS credentials.
74
+ # cds_server_name(str): CDS Server name used in connection string
75
+ # """
76
+ # credentials = _pydash_sql_credentials(secret_name)
77
+ # conn_string_dap = (
78
+ # "Driver={/usr/lib/libmsodbcsql-18.so};"
79
+ # f"Server={cds_server_name};"
80
+ # "TrustServerCertificate=yes;"
81
+ # "Database=Dashboards;"
82
+ # )
83
+ # return (
84
+ # f"{conn_string_dap}UID={credentials['username']};PWD={credentials['password']};"
85
+ # )
87
86
 
88
- def _pydash_sql_credentials(secret_name: str):
89
- """
90
- Logging into CDS from Pydash requires user name and password.
91
- This method will return a dictionary containing the keys "username" and "password".
92
- Raises `botocore.exceptions.ClientError` if no credentials could be obtained
93
- Inputs:
94
- secret_name(str): AWS Secrets Manager, secret name containing CDS credentials.
95
- Returns:
96
- dict: a dictionary containing the keys "username" and "password"
97
- """
98
- region_name = "eu-west-1"
99
- # Create a Secrets Manager client
100
- session = boto3.session.Session()
101
- client = session.client(service_name="secretsmanager", region_name=region_name)
102
- # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/secretsmanager.html#SecretsManager.Client.get_secret_value
103
87
 
104
- get_secret_value_response = client.get_secret_value(SecretId=secret_name)
88
+ # def _pydash_sql_credentials(secret_name: str):
89
+ # """
90
+ # Logging into CDS from Pydash requires user name and password.
91
+ # This method will return a dictionary containing the keys "username" and "password".
92
+ # Raises `botocore.exceptions.ClientError` if no credentials could be obtained
93
+ # Inputs:
94
+ # secret_name(str): AWS Secrets Manager, secret name containing CDS credentials.
95
+ # Returns:
96
+ # dict: a dictionary containing the keys "username" and "password"
97
+ # """
98
+ # region_name = "eu-west-1"
99
+ # # Create a Secrets Manager client
100
+ # session = boto3.session.Session()
101
+ # client = session.client(service_name="secretsmanager", region_name=region_name)
102
+ # # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/secretsmanager
103
+ # .html#SecretsManager.Client.get_secret_value
105
104
 
106
- secret = get_secret_value_response["SecretString"]
105
+ # get_secret_value_response = client.get_secret_value(SecretId=secret_name)
107
106
 
108
- credentials = json.loads(secret)
109
- return credentials
107
+ # secret = get_secret_value_response["SecretString"]
108
+
109
+ # credentials = json.loads(secret)
110
+ # return credentials
@@ -7,6 +7,91 @@ import re
7
7
  from typing import Optional
8
8
 
9
9
 
10
+ def convert_date(
11
+ date_input,
12
+ input_format=None,
13
+ output_format=None,
14
+ convert_to_datetime=False,
15
+ abbreviate_jun_jul=False,
16
+ ):
17
+ """
18
+ Convert a date input (string, date, or datetime) into either a datetime object or a formatted
19
+ string.
20
+
21
+ Behaviour:
22
+ - If `date_input` is a string, `input_format` must be provided and is used with
23
+ `datetime.strptime`.
24
+ - If `convert_to_datetime` is True, returns a `datetime.datetime` (at midnight if the input was
25
+ a `date`), and `output_format` is ignored.
26
+ - If `convert_to_datetime` is False, `output_format` must be provided and is used with
27
+ `strftime`.
28
+
29
+ Month abbreviation tweak:
30
+ - If `abbreviate_jun_jul` is False (default), and your `output_format` produces abbreviated
31
+ months (e.g., via `%b`), any standalone "Jun" or "Jul" tokens in the formatted output are
32
+ expanded to "June" / "July".
33
+ - If `abbreviate_jun_jul` is True, the output is left exactly as produced by `strftime`.
34
+
35
+ Args:
36
+ date_input (str | datetime.datetime | datetime.date):
37
+ The date to convert.
38
+ input_format (str | None):
39
+ Format string for parsing `date_input` when it is a string. Required if `date_input` is
40
+ a string.
41
+ output_format (str | None):
42
+ Format string used when returning a string. Required if `convert_to_datetime` is False.
43
+ convert_to_datetime (bool):
44
+ If True, return a `datetime.datetime`. If False, return a formatted string.
45
+ abbreviate_jun_jul (bool):
46
+ If False, expand "Jun"/"Jul" to "June"/"July" in the final formatted string.
47
+
48
+ Returns:
49
+ datetime.datetime | str:
50
+ A datetime object if `convert_to_datetime` is True, otherwise a formatted string.
51
+
52
+ Raises:
53
+ ValueError:
54
+ If `date_input` is a string and `input_format` is None, or if parsing fails.
55
+ If `convert_to_datetime` is False and `output_format` is None.
56
+ TypeError:
57
+ If `date_input` is not a string, date, or datetime.
58
+ """
59
+ # Parse / normalise to datetime
60
+ if isinstance(date_input, str):
61
+ if input_format is None:
62
+ raise ValueError(
63
+ "input_format must be provided when date_input is a string"
64
+ )
65
+ try:
66
+ dt = datetime.strptime(date_input, input_format)
67
+ except ValueError as e:
68
+ raise ValueError(
69
+ f"Could not parse date_input={date_input!r} with input_format={input_format!r}"
70
+ ) from e
71
+ elif isinstance(date_input, datetime):
72
+ dt = date_input
73
+ elif isinstance(date_input, date):
74
+ dt = datetime.combine(date_input, datetime.min.time())
75
+ else:
76
+ raise TypeError("date_input must be a str, datetime.datetime, or datetime.date")
77
+
78
+ if convert_to_datetime:
79
+ return dt
80
+
81
+ if output_format is None:
82
+ raise ValueError(
83
+ "output_format must be provided when convert_to_datetime is False"
84
+ )
85
+
86
+ output_str = dt.strftime(output_format)
87
+
88
+ if not abbreviate_jun_jul:
89
+ output_str = re.sub(r"\bJun\b", "June", output_str)
90
+ output_str = re.sub(r"\bJul\b", "July", output_str)
91
+
92
+ return output_str
93
+
94
+
10
95
  def convert_date_string_to_text_string(
11
96
  date_str: str,
12
97
  date_format: Optional[str] = "%Y-%m-%d",
@@ -163,6 +248,11 @@ def get_todays_date_for_downloaded_csv() -> str:
163
248
  return str(datetime.today().strftime("%d/%m/%Y"))
164
249
 
165
250
 
251
+ def convert_datetime_to_dd_mm_yyy_string(input_datetime: datetime):
252
+ """convert datetime to DD/MM/YYYY string"""
253
+ return str(input_datetime.strftime("%d/%m/%Y"))
254
+
255
+
166
256
  def date_string_is_full_month_and_full_year_format(date_string: str):
167
257
  """checks if a date string is in the format $B $Y"""
168
258
  try:
@@ -343,3 +433,31 @@ def convert_financial_quarter_to_financial_quarter_text(quarter: int):
343
433
  4: "Q4 (Jan-Mar)",
344
434
  }
345
435
  return quarter_map[quarter]
436
+
437
+
438
+ def financial_year_january(date_str: str) -> str:
439
+ """
440
+ Given a date string, return the 1st of January (formatted as "YYYY-MM-DD")
441
+ for the financial year that the date falls into.
442
+
443
+ UK financial years run from April 1st to March 31st:
444
+ - If the date is between January 1st and March 31st (inclusive),
445
+ the financial year ends in that same year.
446
+ - Otherwise, the financial year ends in the following year.
447
+
448
+ Args:
449
+ date_str (str): Date string in the format "YYYY-MM-DD".
450
+
451
+ Returns:
452
+ str: A string in the format "YYYY-MM-DD" representing January 1st
453
+ of the relevant financial year.
454
+
455
+ Examples:
456
+ >>> financial_year_january("2024-02-15")
457
+ '2024-01-01'
458
+ >>> financial_year_january("2024-05-10")
459
+ '2025-01-01'
460
+ """
461
+ dt = datetime.strptime(date_str, "%Y-%m-%d").date()
462
+ year = dt.year if dt.month < 4 else dt.year + 1
463
+ return f"{year}-01-01"
@@ -1,4 +1,5 @@
1
1
  """download_csv_with_headers"""
2
+
2
3
  import io
3
4
  import polars as pl
4
5
  from dash import dcc
@@ -8,108 +9,130 @@ from gov_uk_dashboards.lib.datetime_functions.datetime_functions import (
8
9
 
9
10
 
10
11
  def download_csv_with_headers(
11
- list_of_df_title_subtitle_dicts: list[dict[str, str]],
12
+ list_of_df_title_subtitle_dicts: list[dict],
12
13
  name: str,
13
14
  sensitivity_label: str,
15
+ last_updated_date: str = None,
14
16
  additional_text: list[str] = None,
15
- ): # pylint: disable=too-many-locals
16
- """Adds a header above multiple dataframes,
17
- separates them with blank rows, and downloads as CSV.
18
-
19
- Args:
20
- list_of_df_title_subtitle_dicts (list[dict[]]): List of dictionaries containing keys: "df",
21
- "title" and "subtitle"
22
- name (str): Filename for CSV.
23
- sensitivity_label (str): Sensitivity label. Str or None.
24
- additional_text (list[str]): Additional text to inlcude in headers after data downloaded.
25
- Str or None.
17
+ ):
26
18
  """
27
-
19
+ Prepares and returns a CSV download with one or more DataFrames, each optionally preceded by
20
+ titles, subtitles, footnotes, and metadata headers (e.g. sensitivity label, download date).
21
+
22
+ Parameters:
23
+ list_of_df_title_subtitle_dicts (list[dict]):
24
+ A list of dictionaries, each containing a Polars DataFrame ('df'), a title,
25
+ and optionally a subtitle and footnote.
26
+ name (str):
27
+ The filename (without extension) for the downloaded CSV.
28
+ sensitivity_label (str):
29
+ A label (e.g. OFFICIAL-SENSITIVE) to prepend at the top of the file.
30
+ last_updated_date (str, optional):
31
+ A string to indicate when the data was last updated.
32
+ additional_text (list[str], optional):
33
+ Extra lines to include before the data sections (e.g. disclaimers).
34
+
35
+ Returns:
36
+ flask.Response: A CSV file response using Dash's `dcc.send_string`.
37
+ """
38
+ # pylint: disable=too-many-locals
28
39
  csv_buffer = io.StringIO()
40
+ max_columns = _get_number_of_max_columns_from_all_dfs(
41
+ list_of_df_title_subtitle_dicts
42
+ )
43
+ # Get first df and first col to use to add header data
44
+ first_df = list_of_df_title_subtitle_dicts[0]["df"]
45
+ first_col = first_df.columns[0]
29
46
 
30
- column_list = list(list_of_df_title_subtitle_dicts[0]["df"].columns)
31
- column_dict = {column_name: column_name for column_name in column_list}
32
- blank_dict = {
33
- f"{i}": None
34
- for i in range(
35
- _get_number_of_max_columns_from_all_dfs(list_of_df_title_subtitle_dicts)
36
- - len(column_list)
37
- )
38
- } # range is missing columns in first df compared to max columns across all dfs
39
-
40
- subtitle = list_of_df_title_subtitle_dicts[0]["subtitle"]
41
- footnote = list_of_df_title_subtitle_dicts[0].get("footnote")
42
- header_data = [
43
- {column_list[0]: "Date downloaded: " + get_todays_date_for_downloaded_csv()},
44
- *(
45
- [{column_list[0]: text} for text in additional_text]
46
- + [{column_list[0]: None}]
47
- if additional_text is not None
48
- else []
49
- ),
50
- {column_list[0]: list_of_df_title_subtitle_dicts[0]["title"]},
51
- *(
52
- [{column_list[0]: subtitle}] if subtitle is not None else []
53
- ), # Uses unpacking (*) to add the subtitle row if subtitle is not None. If subtitle is
54
- # None, it unpacks an empty list, effectively skipping the row.
55
- {column_list[0]: None}, # Blank row
56
- *([{column_list[0]: footnote}] if footnote is not None else []),
57
- {**column_dict, **blank_dict},
58
- ]
47
+ header_data = []
59
48
 
60
49
  if sensitivity_label:
61
- header_data = [{column_list[0]: sensitivity_label}] + header_data
62
-
63
- pl.DataFrame(header_data).write_csv(csv_buffer, include_header=False)
50
+ header_data.append({first_col: sensitivity_label})
51
+
52
+ header_data.extend(
53
+ [
54
+ {first_col: f"Date downloaded: {get_todays_date_for_downloaded_csv()}"},
55
+ *(
56
+ [{first_col: f"Last updated: {last_updated_date}"}]
57
+ if last_updated_date
58
+ else []
59
+ ),
60
+ {first_col: None},
61
+ *(
62
+ [{first_col: text} for text in additional_text] + [{first_col: None}]
63
+ if additional_text
64
+ else []
65
+ ),
66
+ {first_col: list_of_df_title_subtitle_dicts[0]["title"]},
67
+ *(
68
+ [{first_col: list_of_df_title_subtitle_dicts[0]["subtitle"]}]
69
+ if list_of_df_title_subtitle_dicts[0]["subtitle"]
70
+ else []
71
+ ),
72
+ {first_col: None},
73
+ *(
74
+ [{first_col: list_of_df_title_subtitle_dicts[0].get("footnote")}]
75
+ if list_of_df_title_subtitle_dicts[0].get("footnote")
76
+ else []
77
+ ),
78
+ ]
79
+ )
80
+ _write_padded_rows_to_buffer(header_data, max_columns, csv_buffer)
64
81
  for i, data in enumerate(list_of_df_title_subtitle_dicts):
65
- df = data["df"]
66
- title = data["title"]
67
- subtitle = data["subtitle"]
68
- footnote = data.get("footnote")
69
- if i > 0 and title is not None:
70
- column_dict = {column_name: column_name for column_name in list(df.columns)}
71
- header_data = [
72
- {column_list[0]: title},
73
- *(
74
- [{column_list[0]: subtitle}] if subtitle is not None else []
75
- ), # Uses unpacking (*) to add the subtitle row if subtitle is not None. If
76
- # subtitle is None, it unpacks an empty list, effectively skipping the row.
77
- {column_list[0]: None}, # Blank row
78
- *([{column_list[0]: footnote}] if footnote is not None else []),
82
+ df, title, subtitle, footnote = (
83
+ data["df"],
84
+ data["title"],
85
+ data["subtitle"],
86
+ data.get("footnote"),
87
+ )
88
+
89
+ if i > 0 and title:
90
+ meta_rows = [
91
+ {first_col: title},
92
+ *([{first_col: subtitle}] if subtitle else []),
93
+ {first_col: None},
94
+ *([{first_col: footnote}] if footnote else []),
79
95
  ]
80
- pl.DataFrame(header_data).write_csv(csv_buffer, include_header=False)
81
- df.write_csv(csv_buffer, include_header=i > 0)
96
+ _write_padded_rows_to_buffer(meta_rows, max_columns, csv_buffer)
97
+
98
+ # Pad DF if needed
99
+ if df.shape[1] < max_columns:
100
+ column_names = list(df.columns)
101
+ header_row = {col: col for col in column_names}
102
+ data_rows = df.to_dicts()
103
+ data_rows.insert(0, header_row)
104
+ padded_rows = [pad_row(row, max_columns) for row in data_rows]
105
+ output_df = pl.DataFrame(padded_rows)
106
+ output_df.columns = [str(i) for i in range(max_columns)]
107
+ output_df.write_csv(csv_buffer, include_header=False)
108
+ else:
109
+
110
+ df.write_csv(csv_buffer)
82
111
 
83
112
  if i < len(list_of_df_title_subtitle_dicts) - 1:
84
- blank_row = pl.DataFrame({df.columns[0]: [None]})
113
+ blank_row = pl.DataFrame([pad_row({}, max_columns)])
85
114
  blank_row.write_csv(csv_buffer, include_header=False)
86
115
 
116
+ # Return CSV for download
87
117
  csv_buffer.seek(0)
88
- csv_data = (
89
- "\ufeff" + csv_buffer.getvalue()
90
- ) # Adding \ufeff ensures the correct character encoding is detected for £
91
-
118
+ csv_data = "\ufeff" + csv_buffer.getvalue()
92
119
  return dcc.send_string(csv_data, f"{name}.csv")
93
120
 
94
121
 
95
- def _get_number_of_max_columns_from_all_dfs(list_of_df_title_subtitle_dicts):
96
- max_columns = 0
97
- index_of_max_cols = -1
98
-
99
- for idx, dic in enumerate(list_of_df_title_subtitle_dicts):
100
- # Get the DataFrame
101
- df = dic["df"]
122
+ def pad_row(row: dict, max_columns: int) -> dict:
123
+ """Pad a row with None values to match max column width."""
124
+ padded = list(row.values()) + [None] * (max_columns - len(row))
125
+ return {str(i): val for i, val in enumerate(padded)}
102
126
 
103
- # Get the number of columns
104
- num_columns = df.shape[1]
105
127
 
106
- # Update if this DataFrame has more columns
107
- if num_columns > max_columns:
108
- max_columns = num_columns
109
- index_of_max_cols = idx
128
+ def _write_padded_rows_to_buffer(
129
+ rows: list[dict], max_columns: int, buffer: io.StringIO
130
+ ):
131
+ """Pad and write a list of rows to the CSV buffer."""
132
+ padded_rows = [pad_row(row, max_columns) for row in rows]
133
+ pl.DataFrame(padded_rows).write_csv(buffer, include_header=False)
110
134
 
111
- max_columns = len(
112
- list(list_of_df_title_subtitle_dicts[index_of_max_cols]["df"].columns)
113
- )
114
135
 
115
- return max_columns
136
+ def _get_number_of_max_columns_from_all_dfs(list_of_dicts: list[dict]) -> int:
137
+ """Get max column count across all DataFrames in list."""
138
+ return max(len(data["df"].columns) for data in list_of_dicts)
@@ -1,6 +1,5 @@
1
1
  """http_headers"""
2
2
 
3
-
4
3
  import os
5
4
  import dash
6
5
 
@@ -12,7 +11,16 @@ def setup_application_http_response_headers(dash_app: dash.Dash):
12
11
  @server.after_request
13
12
  def add_headers(response):
14
13
  content_security_policy = (
15
- "default-src 'self' 'unsafe-eval' 'unsafe-inline' data:"
14
+ "default-src 'self' 'unsafe-eval' 'unsafe-inline' data:; "
15
+ "script-src 'self' 'unsafe-inline' https://*.googletagmanager.com "
16
+ "https://cdn.jsdelivr.net/npm/web-vitals@4.2.4/dist/web-vitals.min.js "
17
+ "https://cdn.jsdelivr.net/npm/html2canvas@1.4.1/dist/html2canvas.min.js "
18
+ "https://*.google-analytics.com https://js.monitor.azure.com/scripts/b/ai.3.gbl.min.js;"
19
+ "connect-src 'self' https://*.googletagmanager.com https://*.google-analytics.com "
20
+ "https://*.analytics.google.com https://js.monitor.azure.com "
21
+ "https://*.applicationinsights.azure.com/v2/track; "
22
+ "img-src 'self' https://*.googletagmanager.com https://*.google-analytics.com "
23
+ "https://*.analytics.google.com data: https://*.tile.openstreetmap.org; "
16
24
  )
17
25
  frame_ancestors = os.environ.get("ALLOWED_FRAME_ANCESTORS")
18
26
  if frame_ancestors:
@@ -1,4 +1,5 @@
1
1
  """Module for logging configuration"""
2
+
2
3
  import os
3
4
 
4
5
  import logging
File without changes
@@ -0,0 +1,48 @@
1
+ """test_barchart_data"""
2
+
3
+ import polars as pl
4
+ from gov_uk_dashboards.components.plotly.stacked_barchart import StackedBarChart
5
+
6
+
7
+ def validate_barchart_instance(barchart_instance: StackedBarChart, **context):
8
+ """
9
+ Helper function to validate a barchart_instance dataframe.
10
+
11
+ Args:
12
+ barchart_instance: The barchart object with dataframe and metadata
13
+ **context: Arbitrary key-value pairs (e.g., la="LA1", expenditure_area="Health").
14
+ These will be included in the assertion message if provided.
15
+ """
16
+ barchart_df = barchart_instance.df
17
+ trace_name_list = barchart_instance.trace_name_list
18
+ trace_name_column = barchart_instance.trace_name_column
19
+ x_axis_column = barchart_instance.x_axis_column
20
+ unique_x_axis_values = set(barchart_df[x_axis_column].unique())
21
+ context_str = ", ".join(f"{k}={v}" for k, v in context.items())
22
+
23
+ for trace_name in trace_name_list:
24
+ if not trace_name_column:
25
+ df = barchart_df
26
+ else:
27
+ trace_values = barchart_df[trace_name_column].unique().to_list()
28
+ if trace_name not in trace_values:
29
+ raise AssertionError(
30
+ f"Trace '{trace_name}' expected in column '{trace_name_column}' "
31
+ f"but not found for {context_str}"
32
+ )
33
+ df = barchart_df.filter(pl.col(trace_name_column) == trace_name)
34
+
35
+ # Check trace has at least one row
36
+ assert df.height > 0, f"Trace '{trace_name}' has no data for {context_str}"
37
+
38
+ # Check trace has all required x-axis values
39
+ x_values_for_trace = set(df[x_axis_column].unique())
40
+ assert x_values_for_trace == unique_x_axis_values, (
41
+ f"{unique_x_axis_values - x_values_for_trace} missing for trace '{trace_name}' for "
42
+ f"{context_str}"
43
+ )
44
+
45
+ # Check x-axis uniqueness
46
+ assert (
47
+ df[x_axis_column].is_unique().to_numpy()[0]
48
+ ), f"x_axis-column: {x_axis_column} contains duplicate values. For {context_str}"