Automatically exported from code.google.com/p/planningalerts
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

366 lines
15 KiB

  1. #!/usr/local/bin/python
  2. import urllib, urllib2
  3. import HTMLParser
  4. import urlparse
  5. import datetime, time
  6. import cookielib
  7. cookie_jar = cookielib.CookieJar()
  8. from PlanningUtils import fixNewlines, getPostcodeFromText, PlanningAuthorityResults, PlanningApplication
  9. search_form_url_end = "DcApplication/application_searchform.aspx"
  10. search_results_url_end = "DcApplication/application_searchresults.aspx"
  11. comments_url_end = "DcApplication/application_comments_entryform.aspx"
  12. class PublicAccessParser(HTMLParser.HTMLParser):
  13. """This is the class which parses the PublicAccess search results page.
  14. """
  15. def __init__(self,
  16. authority_name,
  17. authority_short_name,
  18. base_url,
  19. debug=False):
  20. HTMLParser.HTMLParser.__init__(self)
  21. self.authority_name = authority_name
  22. self.authority_short_name = authority_short_name
  23. self.base_url = base_url
  24. self.debug = debug
  25. # this will change to True when we enter the table of results
  26. self._in_results_table = False
  27. # this will be set to True when we have passed the header row
  28. # in the results table
  29. self._past_header_row = False
  30. # this will be true when we are in a <td> in the results table
  31. self._in_td = False
  32. # For each row, this will say how many tds we have seen so far
  33. self._td_count = 0
  34. # The object which stores our set of planning application results
  35. self._results = PlanningAuthorityResults(self.authority_name, self.authority_short_name)
  36. # This will store the planning application we are currently working on.
  37. self._current_application = None
  38. def handle_starttag(self, tag, attrs):
  39. if tag == "table":
  40. self.handle_start_table(attrs)
  41. # we are only interested in tr tags if we are in the results table
  42. elif self._in_results_table and tag == "tr":
  43. self.handle_start_tr(attrs)
  44. # we are only interested in td tags if we are in the results table
  45. elif self._in_results_table and tag == "td":
  46. self.handle_start_td(attrs)
  47. # we are only interested in <a> tags if we are in the 6th td in
  48. # the results table.
  49. # UPDATE: It seems that, in the case of Chiltern, we are interested in
  50. # td 5.
  51. elif self._in_td and (self._td_count == 5 or self._td_count == 6) and tag == "a":
  52. self.handle_start_a(attrs)
  53. # If the tag is not one of these then we aren't interested
  54. def handle_endtag(self, tag):
  55. # we only need to consider end tags if we are in the results table
  56. if self._in_results_table:
  57. if tag == "table":
  58. self.handle_end_table()
  59. if tag == "tr":
  60. self.handle_end_tr()
  61. if tag == "td":
  62. self.handle_end_td()
  63. def handle_start_table(self, attrs):
  64. for attr,value in attrs:
  65. if attr == "class":
  66. if value == "cResultsForm":
  67. self._in_results_table = True
  68. break
  69. def handle_end_table(self):
  70. # If we see an end table tag, then note that we have left the
  71. # results table. This method is only called if we are in that table.
  72. self._in_results_table = False
  73. def handle_start_tr(self, attrs):
  74. # The first tr we meet in the results table is just headers
  75. # We will set a flag at the end of that tr to avoid creating
  76. # a blank PlanningApplication
  77. if self._past_header_row:
  78. # Create a candidate result object
  79. self._current_application = PlanningApplication()
  80. self._td_count = 0
  81. def handle_end_tr(self):
  82. # If we are in the results table, and not finishing the header row
  83. # append the current result to the results list.
  84. if self._past_header_row:
  85. self._results.addApplication(self._current_application)
  86. else:
  87. # The first row of the results table is headers
  88. # We want to do nothing until after it
  89. self._past_header_row = True
  90. def handle_start_td(self, attrs):
  91. # increase the td count by one
  92. self._td_count += 1
  93. # note that we are now in a td
  94. self._in_td = True
  95. def handle_end_td(self):
  96. # note that we are now not in a td
  97. self._in_td = False
  98. def handle_start_a(self, attrs):
  99. # this method is only getting called if we are in the
  100. # 6th td of a non-header row of the results table.
  101. # go through the attributes of the <a> looking for one
  102. # named 'href'
  103. for attr,value in attrs:
  104. if attr == "href":
  105. # the value of this tag is a relative url.
  106. # parse it so we can get the query string from it
  107. parsed_info_url = urlparse.urlparse(value)
  108. # the 4th part of the tuple is the query string
  109. query_string = parsed_info_url[4]
  110. # join this query string to the search URL, and store this as
  111. # the info URL of the current planning application
  112. self._current_application.info_url = urlparse.urljoin(self.base_url, value)
  113. # Join this query string to the comments URL, and store this as
  114. # the comments URL of the current planning application
  115. comments_url = urlparse.urljoin(self.base_url, comments_url_end)
  116. self._current_application.comment_url = "?".join([comments_url, query_string])
  117. # while we're here, let's follow some links to find the postcode...
  118. # the postcode is in an input tag in the property page. This page
  119. # can be found by following the info url.
  120. # The newlines in the info page need fixing.
  121. info_file_contents = fixNewlines(urllib2.urlopen(self._current_application.info_url).read())
  122. info_file_parser = PublicAccessInfoPageParser()
  123. info_file_parser.feed(info_file_contents)
  124. property_page_url = urlparse.urljoin(self._current_application.info_url, info_file_parser.property_page_url)
  125. # the newlines in this page need fixing
  126. property_file_contents = fixNewlines(urllib2.urlopen(property_page_url).read())
  127. property_file_parser = PublicAccessPropertyPageParser()
  128. property_file_parser.feed(property_file_contents)
  129. # Set the postcode on the current planning application from the
  130. # one found on the property page
  131. if property_file_parser.postcode is not None:
  132. self._current_application.postcode = property_file_parser.postcode
  133. else:
  134. # If there is no postcode in here, then we'll have to make do with regexing one out of the address.
  135. self._current_application.postcode = getPostcodeFromText(self._current_application.address)
  136. # There is no need for us to look at any more attributes.
  137. break
  138. def handle_data(self, data):
  139. if self._in_td:
  140. # The first td contains the reference
  141. if self._td_count == 1:
  142. self._current_application.council_reference = data
  143. # The second td contains the date the application was received
  144. elif self._td_count == 2:
  145. year, month, day = time.strptime(data, "%d/%m/%Y")[:3]
  146. received_date = datetime.date(year, month, day)
  147. self._current_application.date_received = received_date
  148. # The third td contains the address
  149. elif self._td_count == 3:
  150. #data = data.replace("^M","\n")
  151. self._current_application.address = data
  152. # The fourth td contains the description
  153. elif self._td_count == 4:
  154. self._current_application.description = data
  155. # 5 is status - we don't need it.
  156. # 6 is a button - this is where we will get our postcode,
  157. # comment_url, and info_url from (when handling the <a> tag).
  158. def getResultsByDayMonthYear(self, day, month, year):
  159. # First download the search form (in order to get a session cookie
  160. search_form_request = urllib2.Request(urlparse.urljoin(self.base_url, search_form_url_end))
  161. search_form_response = urllib2.urlopen(search_form_request)
  162. cookie_jar.extract_cookies(search_form_response, search_form_request)
  163. # We are only doing this first search in order to get a cookie
  164. # The paging on the site doesn't work with cookies turned off.
  165. search_data1 = urllib.urlencode({"searchType":"ADV",
  166. "caseNo":"",
  167. "PPReference":"",
  168. "AltReference":"",
  169. "srchtype":"",
  170. "srchstatus":"",
  171. "srchdecision":"",
  172. "srchapstatus":"",
  173. "srchappealdecision":"",
  174. "srchwardcode":"",
  175. "srchparishcode":"",
  176. "srchagentdetails":"",
  177. "srchDateReceivedStart":"%(day)02d/%(month)02d/%(year)d" %{"day":day ,"month": month ,"year": year},
  178. "srchDateReceivedEnd":"%(day)02d/%(month)02d/%(year)d" %{"day":day, "month":month, "year":year} })
  179. if self.debug:
  180. print search_data1
  181. search_url = urlparse.urljoin(self.base_url, search_results_url_end)
  182. request1 = urllib2.Request(search_url, search_data1)
  183. cookie_jar.add_cookie_header(request1)
  184. response1 = urllib2.urlopen(request1)
  185. # This search is the one we will actually use.
  186. # a maximum of 100 results are returned on this site,
  187. # hence setting "pagesize" to 100. I doubt there will ever
  188. # be more than 100 in one day in PublicAccess...
  189. # "currentpage" = 1 gets us to the first page of results
  190. # (there will only be one anyway, as we are asking for 100 results...)
  191. #http://planning.york.gov.uk/PublicAccess/tdc/DcApplication/application_searchresults.aspx?szSearchDescription=Applications%20received%20between%2022/02/2007%20and%2022/02/2007&searchType=ADV&bccaseno=&currentpage=2&pagesize=10&module=P3
  192. search_data2 = urllib.urlencode((("szSearchDescription","Applications received between %(day)02d/%(month)02d/%(year)d and %(day)02d/%(month)02d/%(year)d"%{"day":day ,"month": month ,"year": year}), ("searchType","ADV"), ("bccaseno",""), ("currentpage","1"), ("pagesize","100"), ("module","P3")))
  193. if self.debug:
  194. print search_data2
  195. # This time we want to do a get request, so add the search data into the url
  196. request2_url = urlparse.urljoin(self.base_url, search_results_url_end + "?" + search_data2)
  197. request2 = urllib2.Request(request2_url)
  198. # add the cookie we stored from our first search
  199. cookie_jar.add_cookie_header(request2)
  200. response2 = urllib2.urlopen(request2)
  201. contents = fixNewlines(response2.read())
  202. if self.debug:
  203. print contents
  204. self.feed(contents)
  205. return self._results
  206. def getResults(self, day, month, year):
  207. return self.getResultsByDayMonthYear(int(day), int(month), int(year)).displayXML()
  208. class PublicAccessInfoPageParser(HTMLParser.HTMLParser):
  209. """A parser to get the URL for the property details page out of the
  210. info page (this url is needed in order to get the postcode of the
  211. application.
  212. """
  213. def __init__(self):
  214. HTMLParser.HTMLParser.__init__(self)
  215. self.property_page_url = None
  216. def handle_starttag(self, tag, attrs):
  217. """The URL of the property details page is contained in an <a> tag in
  218. an attribute with key 'A_btnPropertyDetails'. There is some garbage on
  219. either side of it which we will have to clear up before storing it...
  220. We go through the <a> tags looking for one with an attribute with
  221. key 'id' and value 'A_btnPropertyDetails'. When we find it we go through
  222. its attributes looking for one with key 'href' - the value of this attribute
  223. contains the URL we want, after a bit of tidying up.
  224. Once we have got the URL, there is no need for us to look at any more <a> tags.
  225. """
  226. if tag == "a" and self.property_page_url is None:
  227. #print attrs
  228. if attrs.count(("id","A_btnPropertyDetails")) > 0:
  229. for attr,value in attrs:
  230. if attr == "href":
  231. the_link = value
  232. # this may have some garbage on either side of it...
  233. # let's strip that off
  234. # If the stripping fails, take the whole link
  235. # the garbage on the left is separated by whitespace.
  236. # the garbage on the right is separated by a "'".
  237. try:
  238. self.property_page_url = the_link.split()[1].split("'")[0]
  239. except IndexError:
  240. self.property_page_url = the_link
  241. class PublicAccessPropertyPageParser(HTMLParser.HTMLParser):
  242. """A parser to get the postcode out of the property details page."""
  243. def __init__(self):
  244. HTMLParser.HTMLParser.__init__(self)
  245. self.postcode = None
  246. def handle_starttag(self, tag, attrs):
  247. """The postcode is contained in an <input> tag.
  248. This tag has an attribute 'name' with value postcode.
  249. It also has an attribute 'value' with value the postcode of this application.
  250. We go through the input tags looking for one with an attribute with
  251. key 'name' and value 'postcode'. When we find one,
  252. we look through its attributes for one with key 'value' - we store the value of this
  253. attribute as self.postcode.
  254. Once we have the postcode, there is no need to look at any more input tags.
  255. """
  256. if tag == "input" and self.postcode is None:
  257. if attrs.count(("name","postcode")) > 0:
  258. for attr,value in attrs:
  259. if attr == "value":
  260. self.postcode = value
  261. if __name__ == '__main__':
  262. day = 1
  263. month = 8
  264. year = 2008
  265. #parser = PublicAccessParser("East Northants", "East Northants", "http://publicaccesssrv.east-northamptonshire.gov.uk/PublicAccess/tdc/", True)
  266. #parser = PublicAccessParser("Cherwell District Council", "Cherwell", "http://cherweb.cherwell-dc.gov.uk/publicaccess/tdc/", False)
  267. #parser = PublicAccessParser("Hambleton District Council", "Hambleton", "http://planning.hambleton.gov.uk/publicaccess/tdc/", True)
  268. #parser = PublicAccessParser("Durham City Council", "Durham", "http://publicaccess.durhamcity.gov.uk/publicaccess/tdc/", True)
  269. #parser = PublicAccessParser("Moray Council", "Moray", "http://public.moray.gov.uk/publicaccess/tdc/", True)
  270. # parser = PublicAccessParser("Sheffield City Council", "Sheffield", "http://planning.sheffield.gov.uk/publicaccess/tdc/")
  271. parser = PublicAccessParser("London Borough of Barking and Dagenham", "Barking and Dagenham", "http://paweb.barking-dagenham.gov.uk/PublicAccess/tdc/")
  272. print parser.getResults(day, month, year)