From 14f7a65806b71f769f1474a382c8344184ca776f Mon Sep 17 00:00:00 2001 From: m26dvd <31007572+m26dvd@users.noreply.github.com> Date: Sun, 10 Nov 2024 22:30:46 +0000 Subject: [PATCH] fix: Bradford MDC fix: #984 --- .../uk_bin_collection/councils/BradfordMDC.py | 42 ++++++++++++++++--- 1 file changed, 36 insertions(+), 6 deletions(-) diff --git a/uk_bin_collection/uk_bin_collection/councils/BradfordMDC.py b/uk_bin_collection/uk_bin_collection/councils/BradfordMDC.py index 47ff058130..0617ab2959 100644 --- a/uk_bin_collection/uk_bin_collection/councils/BradfordMDC.py +++ b/uk_bin_collection/uk_bin_collection/councils/BradfordMDC.py @@ -1,3 +1,5 @@ +import re + import requests from bs4 import BeautifulSoup @@ -89,12 +91,40 @@ def parse_data(self, page: str, **kwargs) -> dict: ) ).strftime(date_format) - # Build data dict for each entry - dict_data = { - "type": bin_type, - "collectionDate": bin_date, - } - data["bins"].append(dict_data) + # Build data dict for each entry + dict_data = { + "type": bin_type, + "collectionDate": bin_date, + } + data["bins"].append(dict_data) + + for bin in soup.find_all(attrs={"id": re.compile(r"CTID-D0TUYGxO-\d+-A")}): + dict_data = { + "type": "General Waste", + "collectionDate": datetime.strptime( + bin.text.strip(), + "%a %b %d %Y", + ).strftime(date_format), + } + data["bins"].append(dict_data) + for bin in soup.find_all(attrs={"id": re.compile(r"CTID-d3gapLk-\d+-A")}): + dict_data = { + "type": "Recycling Waste", + "collectionDate": datetime.strptime( + bin.text.strip(), + "%a %b %d %Y", + ).strftime(date_format), + } + data["bins"].append(dict_data) + for bin in soup.find_all(attrs={"id": re.compile(r"CTID-L8OidMPA-\d+-A")}): + dict_data = { + "type": "Garden Waste (Subscription Only)", + "collectionDate": datetime.strptime( + bin.text.strip(), + "%a %b %d %Y", + ).strftime(date_format), + } + data["bins"].append(dict_data) data["bins"].sort( key=lambda x: datetime.strptime(x.get("collectionDate"), date_format)