Planning applications tracker for InLinkUK from BT kiosks. https://kiosks.adrianshort.org/
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 

162 lines
5.4 KiB

  1. require 'scraperwiki'
  2. require 'petrify'
  3. require 'csv'
  4. require 'json'
  5. require 'rss'
  6. if ENV['KIOSKS_SITEURL']
  7. SITEURL = ENV['KIOSKS_SITEURL']
  8. else
  9. puts "KIOSKS_SITEURL environment variable must be set to the base URL of the site without a trailing slash"
  10. puts "eg https://kiosks.adrianshort.org"
  11. exit 1
  12. end
  13. class Site
  14. def self.generate
  15. # Home page
  16. summary = ScraperWiki.select("
  17. authority_name, status, decision, appeal_status, appeal_decision,
  18. count(*) as applications
  19. from applications
  20. group by authority_name, status, decision, appeal_status, appeal_decision
  21. ")
  22. q = ScraperWiki.select("
  23. scraped_at
  24. from applications
  25. order by scraped_at desc
  26. limit 1")
  27. last_updated = DateTime.parse(q[0]['scraped_at'])
  28. path = '.'
  29. Petrify.page(path, 'index', \
  30. { summary: summary, last_updated: last_updated })
  31. Petrify.csv(path, 'inlink-summary', summary)
  32. # Generate a JSON file with all the data
  33. apps = ScraperWiki.select("* from applications")
  34. Petrify.file(path, 'data.json', apps.to_json)
  35. # New applications page
  36. apps = ScraperWiki.select("* from `applications`
  37. order by date_received desc limit 60")
  38. Petrify.page('new-applications', 'new-applications', { apps: apps, title: "New applications" })
  39. # Latest decisions page
  40. apps = ScraperWiki.select("* from `applications`
  41. order by date_decision desc limit 60")
  42. path = 'decisions'
  43. Petrify.page(path, 'decisions', { apps: apps, title: "Latest decisions" })
  44. Petrify.csv(path, 'inlink-decisions', apps)
  45. # Appeals page
  46. summary = ScraperWiki.select("
  47. authority_name, status, decision, appeal_status, appeal_decision,
  48. count(*) as applications
  49. from applications
  50. where (appeal_status is not null
  51. and appeal_status != 'Unknown')
  52. or status like '%appeal%'
  53. group by authority_name, appeal_status, appeal_decision
  54. collate nocase
  55. ")
  56. apps = ScraperWiki.select("
  57. * from applications
  58. where (appeal_status is not null
  59. and appeal_status != 'Unknown')
  60. or status like '%appeal%'
  61. collate nocase
  62. ")
  63. path = 'appeals'
  64. Petrify.page(path, 'appeals', { summary: summary, apps: apps, title: "Appeals" })
  65. Petrify.csv(path, 'inlink-appeals', apps)
  66. # Media page
  67. stories = CSV.read('media.csv', :headers => true )
  68. Petrify.page('media', 'media', { stories: stories, title: "Media" })
  69. feed = RSS::Maker.make("2.0") do |maker|
  70. maker.channel.title = "InLinkUK kiosks media coverage"
  71. maker.channel.description = "News and views about Google's UK street kiosk network."
  72. maker.channel.link = "#{SITEURL}/media/"
  73. maker.channel.updated = Time.now.to_s
  74. stories.each do |story|
  75. maker.items.new_item do |item|
  76. item.link = story['url']
  77. item.title = "%s: %s" % [ story['publication'], story['title'] ]
  78. item.updated = story['publish_date']
  79. if story['authorities']
  80. links = []
  81. story['authorities'].split('|').each do |auth|
  82. auth.strip!
  83. links << "<a href=%s>%s</a>" % [ SITEURL + authority_url(auth), auth ]
  84. end
  85. item.description = links.join(', ')
  86. end
  87. end
  88. end
  89. end
  90. Petrify.file('media', 'index.xml', feed)
  91. # Authority pages
  92. auths = ScraperWiki.select("distinct(authority_name) as authority_name
  93. from applications")
  94. auths.each do |auth|
  95. summary = ScraperWiki.select("
  96. status, decision, appeal_status, appeal_decision, count(*) as qty
  97. from applications
  98. where authority_name = ?
  99. group by status, decision, appeal_status, appeal_decision
  100. ", auth['authority_name'])
  101. apps = ScraperWiki.select("* from applications where authority_name = ?
  102. order by date_received desc", auth['authority_name'])
  103. this_stories = stories.select do |story|
  104. if story['authorities']
  105. story['authorities'].match(auth['authority_name'])
  106. end
  107. end
  108. path = ['authorities', slug(auth['authority_name'])]
  109. Petrify.page(path, 'authority', \
  110. { apps: apps, auth: auth, summary: summary, stories: this_stories, title: auth['authority_name'] })
  111. Petrify.csv(path, slug(auth['authority_name']), apps)
  112. # RSS feed for this authority's media stories
  113. feed = RSS::Maker.make("2.0") do |maker|
  114. maker.channel.title = "#{auth['authority_name']} InLinkUK kiosks media coverage"
  115. maker.channel.description = "News and views about Google's UK street kiosk network in #{auth['authority_name']}."
  116. maker.channel.link = "#{SITEURL}#{authority_url(auth['authority_name'])}"
  117. maker.channel.updated = Time.now.to_s
  118. this_stories.each do |story|
  119. maker.items.new_item do |item|
  120. item.link = story['url']
  121. item.title = "%s: %s" % [ story['publication'], story['title'] ]
  122. item.updated = story['publish_date']
  123. if story['authorities']
  124. links = []
  125. story['authorities'].split('|').each do |auth|
  126. auth.strip!
  127. links << "<a href=%s>%s</a>" % [ SITEURL + authority_url(auth), auth ]
  128. end
  129. item.description = links.join(', ')
  130. end
  131. end
  132. end
  133. end
  134. Petrify.file(path, 'media.xml', feed)
  135. end
  136. end
  137. end