Automatically exported from code.google.com/p/planningalerts
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

86 rivejä
3.0 KiB

  1. import urllib2
  2. import urllib
  3. import urlparse
  4. import datetime, time
  5. import cgi
  6. import re
  7. from BeautifulSoup import BeautifulSoup
  8. from PlanningUtils import PlanningApplication, \
  9. PlanningAuthorityResults, \
  10. getPostcodeFromText
  11. date_format = "%d/%m/%Y"
  12. class HerefordshireParser:
  13. comments_email_address = "Developmentcontrol@barnsley.gov.uk"
  14. def __init__(self, *args):
  15. self.authority_name = "Herefordshire Council"
  16. self.authority_short_name = "Herefordshire"
  17. self.base_url = "http://www.herefordshire.gov.uk/gis/planListResults.aspx?pc=&address=&querytype=current&startdate=%(date)s&enddate=%(date)s&startrecord=0"
  18. #As we are going to the info page, we may as well pick up the comment url from there.
  19. # self.comment_url = "http://www.herefordshire.gov.uk/gis/planDetailCommentAddress.aspx?ApplicationId=%s" # This need the reference inserting
  20. self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)
  21. def getResultsByDayMonthYear(self, day, month, year):
  22. search_day = datetime.date(year, month, day)
  23. post_data = urllib.urlencode(
  24. (("show", "0"),
  25. ("Go", "GO"),
  26. )
  27. )
  28. # Now get the search page
  29. response = urllib2.urlopen(self.base_url %{"date": search_day.strftime(date_format)})
  30. soup = BeautifulSoup(response.read())
  31. if not soup.find(text=re.compile("Sorry, no matches found")):
  32. # There were apps for this date
  33. trs = soup.find("table", {"class": "gis_table"}).findAll("tr")[2:]
  34. for tr in trs:
  35. application = PlanningApplication()
  36. application.date_received = search_day
  37. application.info_url = urlparse.urljoin(self.base_url, tr.a['href'])
  38. application.council_reference = tr.a.string
  39. # application.comment_url = self.comment_url %(application.council_reference)
  40. tds = tr.findAll("td")
  41. application.address = tds[1].string
  42. application.postcode = getPostcodeFromText(application.address)
  43. # This just gets us an initial segment of the description.
  44. # We are going to have to download the info page...
  45. #application.description = tds[2].string.strip()
  46. info_response = urllib.urlopen(application.info_url)
  47. info_soup = BeautifulSoup(info_response.read())
  48. application.description = info_soup.find(text="Proposal:").findNext("td").string.strip()
  49. application.comment_url = urlparse.urljoin(self.base_url, info_soup.find("a", title="Link to Planning Application Comment page")['href'])
  50. self._results.addApplication(application)
  51. return self._results
  52. def getResults(self, day, month, year):
  53. return self.getResultsByDayMonthYear(int(day), int(month), int(year)).displayXML()
  54. if __name__ == '__main__':
  55. parser = HerefordshireParser()
  56. print parser.getResults(31,8,2008)