Automatically exported from code.google.com/p/planningalerts
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

Crawley.py 3.0 KiB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869
  1. import urllib2
  2. import cgi
  3. import urlparse
  4. import datetime, time
  5. import BeautifulSoup
  6. from PlanningUtils import PlanningApplication, PlanningAuthorityResults
  7. date_format = "%d/%m/%Y"
  8. class CrawleyParser:
  9. comment_url_template = "http://www.crawley.gov.uk/stellent/idcplg?IdcService=SS_GET_PAGE&nodeId=561&pageCSS=&pAppNo=%(pAppNo)s&pAppDocName=%(pAppDocName)s"
  10. def __init__(self, *args):
  11. self.authority_name = "Crawley Borough Council"
  12. self.authority_short_name = "Crawley"
  13. self.base_url = "http://www.crawley.gov.uk/stellent/idcplg?IdcService=SS_GET_PAGE&nodeId=560&is_NextRow=1&accept=yes&strCSS=null&pApplicationNo=&pProposal=&pLocation=&pPostcode=&pWard=&pDateType=received&pDayFrom=%(dayFrom)s&pMonthFrom=%(monthFrom)s&pYearFrom=%(yearFrom)s&pDayTo=%(dayTo)s&pMonthTo=%(monthTo)s&pYearTo=%(yearTo)s&submit=Search"
  14. self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)
  15. def getResultsByDayMonthYear(self, day, month, year):
  16. search_day = datetime.date(year, month, day)
  17. #- Crawley only allows searches from-to, so:
  18. next = self.base_url %{
  19. "dayFrom": day,
  20. "monthFrom": month,
  21. "yearFrom": year,
  22. "dayTo": day,
  23. "monthTo": month,
  24. "yearTo": year,
  25. }
  26. # Now get the search page
  27. response = urllib2.urlopen(next)
  28. soup = BeautifulSoup.BeautifulSoup(response.read())
  29. if soup.table: #- Empty result set has no table
  30. trs = soup.table.findAll("tr")[1:] # First one is just headers
  31. for tr in trs:
  32. tds = tr.findAll("td")
  33. application = PlanningApplication()
  34. application.council_reference = tds[0].a.contents[0].strip().replace("/", "/")
  35. application.info_url = urlparse.urljoin(self.base_url, tds[0].a['href'])
  36. info_qs = cgi.parse_qs(urlparse.urlsplit(application.info_url)[3])
  37. comment_qs = {
  38. "pAppNo": application.council_reference,
  39. "pAppDocName": info_qs["ssDocName"][0],
  40. }
  41. application.comment_url = self.comment_url_template %comment_qs
  42. application.address = tds[1].string.strip()
  43. if tds[2].string: #- if postcode present, append it to the address too
  44. application.postcode = tds[2].string.replace(" ", " ").strip()
  45. application.address += ", " + application.postcode
  46. application.description = tds[3].string.strip()
  47. application.date_received = datetime.datetime(*(time.strptime(tds[4].string.strip(), date_format)[0:6]))
  48. self._results.addApplication(application)
  49. return self._results
  50. def getResults(self, day, month, year):
  51. return self.getResultsByDayMonthYear(int(day), int(month), int(year)).displayXML()
  52. if __name__ == '__main__':
  53. parser = CrawleyParser()
  54. print parser.getResults(12,6,2008)