Automatically exported from code.google.com/p/planningalerts
25개 이상의 토픽을 선택하실 수 없습니다. Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

Lichfield.py 2.7 KiB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374
  1. """
  2. Lichfield District council has no nice search page, but it does have a page
  3. which has the applications received in the last 7 days, so we'll use this,
  4. ignoring the date passed in.
  5. """
  6. import urllib2
  7. import urlparse
  8. import re
  9. import datetime
  10. import BeautifulSoup
  11. from PlanningUtils import PlanningApplication, \
  12. PlanningAuthorityResults, \
  13. getPostcodeFromText
  14. date_format = "%d/%m/%Y"
  15. date_received_re = re.compile("(\d\d?)[a-z]{2} ([a-zA-Z]*) (\d{4})")
  16. class LichfieldParser:
  17. def __init__(self, *args):
  18. self.authority_name = "Lichfield District Council"
  19. self.authority_short_name = "Lichfield"
  20. self.base_url = "http://www.lichfielddc.gov.uk/site/scripts/planning_list.php"
  21. self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)
  22. def getResultsByDayMonthYear(self, day, month, year):
  23. response = urllib2.urlopen(self.base_url)
  24. soup = BeautifulSoup.BeautifulSoup(response.read())
  25. trs = soup.find("table", {"class": "planningtable"}).tbody.findAll("tr")
  26. for tr in trs:
  27. application = PlanningApplication()
  28. tds = tr.findAll("td")
  29. application.council_reference = tds[0].a.string.strip()
  30. application.info_url = urlparse.urljoin(self.base_url, tds[0].a['href'])
  31. application.address = ' '.join(tds[1].contents[1].strip().split()[1:])
  32. application.postcode = getPostcodeFromText(application.address)
  33. # We're going to need to download the info page in order to get
  34. # the comment link, the date received, and the description.
  35. info_response = urllib2.urlopen(application.info_url)
  36. info_soup = BeautifulSoup.BeautifulSoup(info_response.read())
  37. application.description = info_soup.find(text="Proposal").findNext(text=True).strip()
  38. date_received_str = info_soup.find(text="Date Application Valid").findNext(text=True).split(",")[1].strip()
  39. # This is a nasty botch, but the easiest way I can see to get a date out of this is to make another string and use strptime
  40. better_date_str = "%s %s %s" %date_received_re.match(date_received_str).groups()
  41. application.date_received = datetime.datetime.strptime(better_date_str, "%d %B %Y").date()
  42. application.comment_url = info_soup.find("a", title="Comment on this planning application.")['href']
  43. self._results.addApplication(application)
  44. return self._results
  45. def getResults(self, day, month, year):
  46. return self.getResultsByDayMonthYear(int(day), int(month), int(year)).displayXML()
  47. if __name__ == '__main__':
  48. parser = LichfieldParser()
  49. print parser.getResults(20,11,2008)