Automatically exported from code.google.com/p/planningalerts
Non puoi selezionare più di 25 argomenti Gli argomenti devono iniziare con una lettera o un numero, possono includere trattini ('-') e possono essere lunghi fino a 35 caratteri.

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788
  1. import urllib2
  2. import urllib
  3. import urlparse
  4. import datetime, time
  5. import cgi
  6. from BeautifulSoup import BeautifulSoup
  7. from PlanningUtils import PlanningApplication, \
  8. PlanningAuthorityResults, \
  9. getPostcodeFromText
  10. class FifeParser:
  11. def __init__(self, *args):
  12. self.authority_name = "Fife Council"
  13. self.authority_short_name = "Fife"
  14. self.base_url = "http://www.fifedirect.org.uk/topics/index.cfm"
  15. self.comment_url = "http://www.ukplanning.com/ukp/showCaseFile.do?councilName=Fife+Council&appNumber=%s"
  16. self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)
  17. def getResultsByDayMonthYear(self, day, month, year):
  18. search_date = datetime.date(year, month, day)
  19. search_data = urllib.urlencode(
  20. [("fuseaction", "planapps.list"),
  21. ("SUBJECTID", "104CC166-3ED1-4D22-B9F1E2FB8438478A"),
  22. ("src_fromdayRec", day),
  23. ("src_frommonthRec", month),
  24. ("src_fromyearRec", year),
  25. ("src_todayRec", day),
  26. ("src_tomonthRec", month),
  27. ("src_toyearRec", year),
  28. ("findroadworks", "GO"),
  29. ]
  30. )
  31. search_url = self.base_url + "?" + search_data
  32. response = urllib2.urlopen(search_url)
  33. soup = BeautifulSoup(response.read())
  34. results_table = soup.find("table", id="results")
  35. # Apart from the first tr, which contains headers, the trs come in pairs for each application
  36. trs = results_table.findAll("tr")[1:]
  37. tr_count = 0
  38. while tr_count < len(trs):
  39. tr = trs[tr_count]
  40. if tr_count % 2 == 0:
  41. application = PlanningApplication()
  42. application.date_received = search_date
  43. tds = tr.findAll("td")
  44. application.council_reference = tds[0].a.string.strip()
  45. application.comment_url = self.comment_url %(application.council_reference)
  46. application.info_url = urlparse.urljoin(self.base_url, tds[0].a['href'])
  47. application.address = ', '.join([x.strip() for x in tds[1].findAll(text=True)])
  48. application.postcode = getPostcodeFromText(application.address)
  49. else:
  50. # Get rid of the "Details: " at the beginning.
  51. application.description = tr.td.string.strip()[9:]
  52. self._results.addApplication(application)
  53. tr_count += 1
  54. return self._results
  55. def getResults(self, day, month, year):
  56. return self.getResultsByDayMonthYear(int(day), int(month), int(year)).displayXML()
  57. if __name__ == '__main__':
  58. parser = FifeParser()
  59. print parser.getResults(21,5,2008)
  60. # TODO
  61. # Paginates at 50. Unlikely on a single day, so we'll worry about it later.