Planning applications tracker for InLinkUK from BT kiosks. https://kiosks.adrianshort.org/
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

6 年之前
6 年之前
6 年之前
6 年之前
6 年之前
6 年之前
6 年之前
6 年之前
6 年之前
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. require 'scraperwiki'
  2. require 'petrify'
  3. require 'csv'
  4. require 'json'
  5. require 'rss'
  6. class Site
  7. def self.generate
  8. # Home page
  9. summary = ScraperWiki.select("
  10. authority_name, status, decision, appeal_status, appeal_decision,
  11. count(*) as applications
  12. from applications
  13. group by authority_name, status, decision, appeal_status, appeal_decision
  14. ")
  15. q = ScraperWiki.select("
  16. scraped_at
  17. from applications
  18. order by scraped_at desc
  19. limit 1")
  20. last_updated = DateTime.parse(q[0]['scraped_at'])
  21. path = '.'
  22. Petrify.page(path, 'index', \
  23. { summary: summary, last_updated: last_updated })
  24. Petrify.csv(path, 'inlink-summary', summary)
  25. # Generate a JSON file with all the data
  26. apps = ScraperWiki.select("* from applications")
  27. Petrify.file(path, 'data.json', apps.to_json)
  28. # New applications page
  29. apps = ScraperWiki.select("* from `applications`
  30. order by date_received desc limit 60")
  31. Petrify.page('new-applications', 'new-applications', { apps: apps, title: "New applications" })
  32. # Latest decisions page
  33. apps = ScraperWiki.select("* from `applications`
  34. order by date_decision desc limit 60")
  35. path = 'decisions'
  36. Petrify.page(path, 'decisions', { apps: apps, title: "Latest decisions" })
  37. Petrify.csv(path, 'inlink-decisions', apps)
  38. # Appeals page
  39. summary = ScraperWiki.select("
  40. authority_name, status, decision, appeal_status, appeal_decision,
  41. count(*) as applications
  42. from applications
  43. where (appeal_status is not null
  44. and appeal_status != 'Unknown')
  45. or status like '%appeal%'
  46. group by authority_name, appeal_status, appeal_decision
  47. collate nocase
  48. ")
  49. apps = ScraperWiki.select("
  50. * from applications
  51. where (appeal_status is not null
  52. and appeal_status != 'Unknown')
  53. or status like '%appeal%'
  54. collate nocase
  55. ")
  56. path = 'appeals'
  57. Petrify.page(path, 'appeals', { summary: summary, apps: apps, title: "Appeals" })
  58. Petrify.csv(path, 'inlink-appeals', apps)
  59. # Media page
  60. stories = CSV.read('media.csv', :headers => true )
  61. Petrify.page('media', 'media', { stories: stories, title: "Media" })
  62. feed = RSS::Maker.make("2.0") do |maker|
  63. maker.channel.title = "InLinkUK kiosks media coverage"
  64. maker.channel.description = "News and views about Google's UK street kiosk network."
  65. maker.channel.link = "https://kiosks.adrianshort.org/media/"
  66. maker.channel.updated = Time.now.to_s
  67. stories.each do |story|
  68. maker.items.new_item do |item|
  69. item.link = story['url']
  70. item.title = "%s: %s" % [ story['publication'], story['title'] ]
  71. item.updated = story['publish_date']
  72. end
  73. end
  74. end
  75. Petrify.file('media', 'index.xml', feed)
  76. # Authority pages
  77. auths = ScraperWiki.select("distinct(authority_name) as authority_name
  78. from applications")
  79. auths.each do |auth|
  80. summary = ScraperWiki.select("
  81. status, decision, appeal_status, appeal_decision, count(*) as qty
  82. from applications
  83. where authority_name = ?
  84. group by status, decision, appeal_status, appeal_decision
  85. ", auth['authority_name'])
  86. apps = ScraperWiki.select("* from applications where authority_name = ?
  87. order by date_received desc", auth['authority_name'])
  88. this_stories = stories.select do |story|
  89. if story['authorities']
  90. story['authorities'].match(auth['authority_name'])
  91. end
  92. end
  93. path = ['authorities', slug(auth['authority_name'])]
  94. Petrify.page(path, 'authority', \
  95. { apps: apps, auth: auth, summary: summary, stories: this_stories, title: auth['authority_name'] })
  96. Petrify.csv(path, slug(auth['authority_name']), apps)
  97. end
  98. end
  99. end