Skip to content

Commit

Permalink
Merge pull request #917 from m26dvd/master
Browse files Browse the repository at this point in the history
  • Loading branch information
robbrad authored Oct 21, 2024
2 parents 4f93c5a + d328286 commit ccc3503
Show file tree
Hide file tree
Showing 6 changed files with 316 additions and 2 deletions.
30 changes: 29 additions & 1 deletion uk_bin_collection/tests/input.json
Original file line number Diff line number Diff line change
Expand Up @@ -299,6 +299,12 @@
"wiki_name": "Cornwall Council",
"wiki_note": "Use https://uprn.uk/ to find your UPRN."
},
"CoventryCityCouncil": {
"url": "https://www.coventry.gov.uk/directory-record/56384/abberton-way-",
"wiki_command_url_override": "https://www.coventry.gov.uk/directory_record/XXXXXX/XXXXXX",
"wiki_name": "Coventry City Council",
"wiki_note": "Follow the instructions [here](https://www.coventry.gov.uk/bin-collection-calendar) until you get the page that shows the weekly collections for your address then copy the URL and replace the URL in the command."
},
"CrawleyBoroughCouncil": {
"house_number": "9701076",
"skip_get_url": true,
Expand Down Expand Up @@ -537,6 +543,13 @@
"wiki_name": "Halton Borough Council",
"wiki_note": "Pass the House number and post code"
},
"HarboroughDistrictCouncil": {
"url": "https://www.harborough.gov.uk",
"wiki_command_url_override": "https://www.harborough.gov.uk",
"uprn": "100030489072",
"wiki_name": "Harborough District Council",
"wiki_note": "You will need to use [FindMyAddress](https://www.findmyaddress.co.uk/search) to find the UPRN."
},
"HaringeyCouncil": {
"skip_get_url": true,
"uprn": "100021203052",
Expand Down Expand Up @@ -741,7 +754,7 @@
"wiki_note": "Pass the house name/number plus the name of the street with the postcode parameter, wrapped in double quotes. Check the address in the web site first. This version will only pick the first SHOW button returned by the search or if it is fully unique. The search is not very predictable (e.g. house number 4 returns 14,24,4,44 etc.)."
},
"MidlothianCouncil": {
"url": "https://www.midlothian.gov.uk/directory_record/92551426/glenesk_bonnyrigg_eh19_3je",
"url": "https://www.midlothian.gov.uk/directory_record/92594377/glenesk_bonnyrigg_eh19_3je",
"wiki_command_url_override": "https://www.midlothian.gov.uk/directory_record/XXXXXX/XXXXXX",
"wiki_name": "Midlothian Council",
"wiki_note": "Follow the instructions [here](https://www.midlothian.gov.uk/info/1054/bins_and_recycling/343/bin_collection_days) until you get the page that shows the weekly collections for your address then copy the URL and replace the URL in the command."
Expand Down Expand Up @@ -1229,6 +1242,14 @@
"url": "https://tdcws01.tandridge.gov.uk/TDCWebAppsPublic/tfaBranded/408?utm_source=pressrelease&utm_medium=smposts&utm_campaign=check_my_bin_day",
"wiki_name": "Tandridge District Council"
},
"TeignbridgeCouncil": {
"url": "https://www.google.co.uk",
"wiki_command_url_override": "https://www.google.co.uk",
"uprn": "100040338776",
"web_driver": "http://selenium:4444",
"wiki_name": "Teignbridge Council",
"wiki_note": "Provide Google as the URL as the real URL breaks the integration. You will need to use [FindMyAddress](https://www.findmyaddress.co.uk/search) to find the UPRN."
},
"TelfordAndWrekinCouncil": {
"skip_get_url": true,
"uprn": "000452015013",
Expand Down Expand Up @@ -1338,6 +1359,13 @@
"wiki_name": "Warwick District Council",
"wiki_note": "Replace XXXXXXXX with UPRN."
},
"WatfordBoroughCouncil": {
"url": "https://www.watford.gov.uk",
"wiki_command_url_override": "https://www.watford.gov.uk",
"uprn": "100080942183",
"wiki_name": "Watford Borough Council",
"wiki_note": "You will need to use [FindMyAddress](https://www.findmyaddress.co.uk/search) to find the UPRN."
},
"WaverleyBoroughCouncil": {
"house_number": "23",
"postcode": "GU9 9QG",
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
import requests
from bs4 import BeautifulSoup
from dateutil.relativedelta import relativedelta

from uk_bin_collection.uk_bin_collection.common import *
from uk_bin_collection.uk_bin_collection.get_bin_data import AbstractGetBinDataClass


# import the wonderful Beautiful Soup and the URL grabber
class CouncilClass(AbstractGetBinDataClass):
"""
Concrete classes have to implement all abstract operations of the
base class. They can also override some operations with a default
implementation.
"""

def parse_data(self, page: str, **kwargs) -> dict:

bindata = {"bins": []}
curr_date = datetime.today()

soup = BeautifulSoup(page.content, features="html.parser")
button = soup.find("a", text="Find out which bin will be collected when.")

if button["href"]:
URI = button["href"]
# Make the GET request
response = requests.get(URI)
soup = BeautifulSoup(response.content, features="html.parser")
divs = soup.find_all("div", {"class": "editor"})
for div in divs:
lis = div.find_all("li")
for li in lis:
collection = li.text.split(": ")
collection_date = datetime.strptime(
collection[0],
"%A %d %B",
).replace(year=curr_date.year)
if curr_date.month == 12 and collection_date.month == 1:
collection_date = collection_date + relativedelta(years=1)
bin_types = collection[1].split(" and ")
for bin_type in bin_types:
dict_data = {
"type": bin_type,
"collectionDate": collection_date.strftime("%d/%m/%Y"),
}
bindata["bins"].append(dict_data)
else:
print("Failed to find bin schedule")

return bindata
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
import requests
from bs4 import BeautifulSoup

from uk_bin_collection.uk_bin_collection.common import *
from uk_bin_collection.uk_bin_collection.get_bin_data import AbstractGetBinDataClass


# import the wonderful Beautiful Soup and the URL grabber
class CouncilClass(AbstractGetBinDataClass):
"""
Concrete classes have to implement all abstract operations of the
base class. They can also override some operations with a default
implementation.
"""

def parse_data(self, page: str, **kwargs) -> dict:

user_uprn = kwargs.get("uprn")
check_uprn(user_uprn)
bindata = {"bins": []}

URI = "https://harborough.fccenvironment.co.uk/detail-address"

headers = {
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0",
"Referer": "https://harborough.fccenvironment.co.uk/",
}
params = {"Uprn": user_uprn}
response = requests.post(URI, headers=headers, json=params)

soup = BeautifulSoup(response.content, features="html.parser")
bin_collection = soup.find(
"div", {"class": "blocks block-your-next-scheduled-bin-collection-days"}
)
lis = bin_collection.find_all("li")
for li in lis:
try:
split = re.match(r"(.+)\s(\d{1,2} \w+ \d{4})$", li.text)
bin_type = split.group(1).strip()
date = split.group(2)

dict_data = {
"type": bin_type,
"collectionDate": datetime.strptime(
date,
"%d %B %Y",
).strftime("%d/%m/%Y"),
}
bindata["bins"].append(dict_data)
except:
continue

bindata["bins"].sort(
key=lambda x: datetime.strptime(x.get("collectionDate"), "%d/%m/%Y")
)

return bindata
59 changes: 59 additions & 0 deletions uk_bin_collection/uk_bin_collection/councils/TeignbridgeCouncil.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
from bs4 import BeautifulSoup
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait

from uk_bin_collection.uk_bin_collection.common import *
from uk_bin_collection.uk_bin_collection.get_bin_data import AbstractGetBinDataClass


# import the wonderful Beautiful Soup and the URL grabber
class CouncilClass(AbstractGetBinDataClass):
"""
Concrete classes have to implement all abstract operations of the
base class. They can also override some operations with a default
implementation.
"""

def parse_data(self, page: str, **kwargs) -> dict:

user_uprn = kwargs.get("uprn")
web_driver = kwargs.get("web_driver")
headless = kwargs.get("headless")
check_uprn(user_uprn)
bindata = {"bins": []}

URI = f"https://www.teignbridge.gov.uk/repositories/hidden-pages/bin-finder?uprn={user_uprn}"

driver = create_webdriver(web_driver, headless, None, __name__)
driver.get(URI)

soup = BeautifulSoup(driver.page_source, features="html.parser")

collection_dates = soup.find_all(
"h3"
) # Assuming bin types are inside <h3> tags
bin_type_headers = soup.find_all(
"div", {"class": "binInfoContainer"}
) # Assuming collection dates are inside <p> tags

# Iterate over the results and extract bin type and collection dates
for i, date in enumerate(collection_dates):
collection_date = date.get_text(strip=True)

bin_types = bin_type_headers[i].find_all("div")
for bin_type in bin_types:
dict_data = {
"type": bin_type.text.strip(),
"collectionDate": datetime.strptime(
collection_date,
"%d %B %Y%A",
).strftime("%d/%m/%Y"),
}
bindata["bins"].append(dict_data)

bindata["bins"].sort(
key=lambda x: datetime.strptime(x.get("collectionDate"), "%d/%m/%Y")
)

return bindata
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
import time

import requests
from bs4 import BeautifulSoup

from uk_bin_collection.uk_bin_collection.common import *
from uk_bin_collection.uk_bin_collection.get_bin_data import AbstractGetBinDataClass


# import the wonderful Beautiful Soup and the URL grabber
class CouncilClass(AbstractGetBinDataClass):
"""
Concrete classes have to implement all abstract operations of the
base class. They can also override some operations with a default
implementation.
"""

def parse_data(self, page: str, **kwargs) -> dict:

user_uprn = kwargs.get("uprn")
check_uprn(user_uprn)
bindata = {"bins": []}

SESSION_URL = "https://watfordbc-self.achieveservice.com/authapi/isauthenticated?uri=https%253A%252F%252Fwatfordbc-self.achieveservice.com%252Fen%252Fservice%252FBin_collections%253Faccept%253Dyes%2526consentMessageIds%255B%255D%253D4&hostname=watfordbc-self.achieveservice.com&withCredentials=true"

API_URL = "https://watfordbc-self.achieveservice.com/apibroker/runLookup"

data = {
"formValues": {
"Address": {
"echoUprn": {"value": user_uprn},
},
},
}

headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"User-Agent": "Mozilla/5.0",
"X-Requested-With": "XMLHttpRequest",
"Referer": "https://watfordbc-self.achieveservice.com/fillform/?iframe_id=fillform-frame-1&db_id=",
}
s = requests.session()
r = s.get(SESSION_URL)
r.raise_for_status()
session_data = r.json()
sid = session_data["auth-session"]
params = {
"id": "5e79edf15b2ec",
"repeat_against": "",
"noRetry": "true",
"getOnlyTokens": "undefined",
"log_id": "",
"app_name": "AF-Renderer::Self",
# unix_timestamp
"_": str(int(time.time() * 1000)),
"sid": sid,
}
r = s.post(API_URL, json=data, headers=headers, params=params)
r.raise_for_status()
data = r.json()
dispHTML = data["integration"]["transformed"]["rows_data"]["0"]["dispHTML"]
soup = BeautifulSoup(dispHTML, features="html.parser")

collections = soup.find_all("li")
for collection in collections:
bin_type = collection.find("h3").text
collection_date = collection.find("strong").text.strip()
dict_data = {"type": bin_type, "collectionDate": collection_date}
bindata["bins"].append(dict_data)

return bindata
Loading

0 comments on commit ccc3503

Please sign in to comment.