Planning applications tracker for InLinkUK from BT kiosks. https://kiosks.adrianshort.org/
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 

164 lines
5.4 KiB

  1. require 'scraperwiki'
  2. require 'petrify'
  3. require 'csv'
  4. require 'json'
  5. require 'rss'
  6. if ENV['KIOSKS_SITEURL']
  7. SITEURL = ENV['KIOSKS_SITEURL']
  8. else
  9. puts "KIOSKS_SITEURL environment variable must be set to the base URL of the site without a trailing slash, eg:"
  10. puts "export KIOSKS_SITEURL=https://kiosks.adrianshort.org"
  11. exit 1
  12. end
  13. class Site
  14. def self.generate
  15. Petrify.setup
  16. # Home page
  17. summary = ScraperWiki.select("
  18. authority_name, status, decision, appeal_status, appeal_decision,
  19. count(*) as applications
  20. from applications
  21. group by authority_name, status, decision, appeal_status, appeal_decision
  22. ")
  23. q = ScraperWiki.select("
  24. scraped_at
  25. from applications
  26. order by scraped_at desc
  27. limit 1")
  28. last_updated = DateTime.parse(q[0]['scraped_at'])
  29. path = '.'
  30. Petrify.page(path, 'index', \
  31. { summary: summary, last_updated: last_updated })
  32. Petrify.csv(path, 'kiosks-summary', summary)
  33. # Generate a JSON file with all the data
  34. apps = ScraperWiki.select("* from applications")
  35. Petrify.file(path, 'data.json', apps.to_json)
  36. # New applications page
  37. apps = ScraperWiki.select("* from `applications`
  38. order by date_received desc limit 60")
  39. Petrify.page('new-applications', 'new-applications', { apps: apps, title: "New applications" })
  40. # Latest decisions page
  41. apps = ScraperWiki.select("* from `applications`
  42. order by date_decision desc limit 60")
  43. path = 'decisions'
  44. Petrify.page(path, 'decisions', { apps: apps, title: "Latest decisions" })
  45. Petrify.csv(path, 'kiosks-decisions', apps)
  46. # Appeals page
  47. summary = ScraperWiki.select("
  48. authority_name, status, decision, appeal_status, appeal_decision,
  49. count(*) as applications
  50. from applications
  51. where (appeal_status is not null
  52. and appeal_status != 'Unknown')
  53. or status like '%appeal%'
  54. group by authority_name, appeal_status, appeal_decision
  55. collate nocase
  56. ")
  57. apps = ScraperWiki.select("
  58. * from applications
  59. where (appeal_status is not null
  60. and appeal_status != 'Unknown')
  61. or status like '%appeal%'
  62. collate nocase
  63. ")
  64. path = 'appeals'
  65. Petrify.page(path, 'appeals', { summary: summary, apps: apps, title: "Appeals" })
  66. Petrify.csv(path, 'kiosks-appeals', apps)
  67. # Media page
  68. stories = CSV.read('media.csv', :headers => true )
  69. Petrify.page('media', 'media', { stories: stories, title: "Media" })
  70. feed = RSS::Maker.make("2.0") do |maker|
  71. maker.channel.title = "InLinkUK kiosks media coverage"
  72. maker.channel.description = "News and views about Google's UK street kiosk network."
  73. maker.channel.link = "#{SITEURL}/media/"
  74. maker.channel.updated = Time.now.to_s
  75. stories.each do |story|
  76. maker.items.new_item do |item|
  77. item.link = story['url']
  78. item.title = "%s: %s" % [ story['publication'], story['title'] ]
  79. item.updated = story['publish_date']
  80. if story['authorities']
  81. links = []
  82. story['authorities'].split('|').each do |auth|
  83. auth.strip!
  84. links << "<a href=%s>%s</a>" % [ SITEURL + authority_url(auth), auth ]
  85. end
  86. item.description = links.join(', ')
  87. end
  88. end
  89. end
  90. end
  91. Petrify.file('media', 'index.xml', feed)
  92. # Authority pages
  93. auths = ScraperWiki.select("distinct(authority_name) as authority_name
  94. from applications")
  95. auths.each do |auth|
  96. summary = ScraperWiki.select("
  97. status, decision, appeal_status, appeal_decision, count(*) as qty
  98. from applications
  99. where authority_name = ?
  100. group by status, decision, appeal_status, appeal_decision
  101. ", auth['authority_name'])
  102. apps = ScraperWiki.select("* from applications where authority_name = ?
  103. order by date_received desc", auth['authority_name'])
  104. this_stories = stories.select do |story|
  105. if story['authorities']
  106. story['authorities'].match(auth['authority_name'])
  107. end
  108. end
  109. path = ['authorities', slug(auth['authority_name'])]
  110. Petrify.page(path, 'authority', \
  111. { apps: apps, auth: auth, summary: summary, stories: this_stories, title: auth['authority_name'] })
  112. Petrify.csv(path, slug(auth['authority_name']), apps)
  113. # RSS feed for this authority's media stories
  114. feed = RSS::Maker.make("2.0") do |maker|
  115. maker.channel.title = "#{auth['authority_name']} InLinkUK kiosks media coverage"
  116. maker.channel.description = "News and views about Google's UK street kiosk network in #{auth['authority_name']}."
  117. maker.channel.link = "#{SITEURL}#{authority_url(auth['authority_name'])}"
  118. maker.channel.updated = Time.now.to_s
  119. this_stories.each do |story|
  120. maker.items.new_item do |item|
  121. item.link = story['url']
  122. item.title = "%s: %s" % [ story['publication'], story['title'] ]
  123. item.updated = story['publish_date']
  124. if story['authorities']
  125. links = []
  126. story['authorities'].split('|').each do |auth|
  127. auth.strip!
  128. links << "<a href=%s>%s</a>" % [ SITEURL + authority_url(auth), auth ]
  129. end
  130. item.description = links.join(', ')
  131. end
  132. end
  133. end
  134. end
  135. Petrify.file(path, 'media.xml', feed)
  136. end
  137. end
  138. end