Planning applications tracker for InLinkUK from BT kiosks. https://kiosks.adrianshort.org/
您最多选择25个主题 主题必须以字母或数字开头,可以包含连字符 (-),并且长度不得超过35个字符

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586
  1. #!/usr/bin/env ruby
  2. require 'scraperwiki'
  3. require 'haml'
  4. require 'pp'
  5. require 'logger'
  6. require 'csv'
  7. require_relative '../lib/helpers'
  8. OUTPUT_DIR = '_site'
  9. VIEWS_DIR = File.join('views')
  10. LAYOUT_FN = File.join(VIEWS_DIR, 'layout.haml')
  11. def write_page(path_items, template, locals = {})
  12. dir = File.join(OUTPUT_DIR, path_items)
  13. FileUtils.mkdir_p(dir)
  14. @log.debug dir
  15. fn = File.join(dir, 'index.html')
  16. # https://stackoverflow.com/questions/6125265/using-layouts-in-haml-files-independently-of-rails
  17. html = Haml::Engine.new(File.read(LAYOUT_FN)).render do
  18. Haml::Engine.new(File.read(File.join(VIEWS_DIR, "#{template}.haml"))).render(Object.new, locals)
  19. end
  20. File.write(fn, html)
  21. @log.info fn
  22. @pages += 1
  23. # TODO - add page to sitemap.xml or sitemap.txt
  24. # https://support.google.com/webmasters/answer/183668?hl=en&ref_topic=4581190
  25. end
  26. def write_csv(path_items, filename, data)
  27. dir = File.join(OUTPUT_DIR, path_items)
  28. FileUtils.mkdir_p(dir)
  29. @log.debug dir
  30. fn = File.join(dir, filename + '.csv')
  31. File.write(fn, data)
  32. @log.info fn
  33. end
  34. def create_output_dir
  35. # Recursively delete working directory to ensure no redundant files are left behind from previous builds.
  36. # FileUtils.rm_rf(@working_dir)
  37. Dir.mkdir(@working_dir) unless File.directory?(@working_dir)
  38. # Dir.chdir(@working_dir)
  39. # Copy `public` dir to output dir
  40. FileUtils.copy_entry('public', @working_dir)
  41. end
  42. def gen_homepage
  43. decisions = ScraperWiki.select("* from `applications` order by date_decision desc limit 20")
  44. write_page('.', 'index', { decisions: decisions })
  45. end
  46. def gen_authorities
  47. auths = ScraperWiki.select("distinct(authority_name) as authority_name from applications order by authority_name")
  48. write_page('authorities', 'authorities', { auths: auths })
  49. auths.each do |auth|
  50. summary = ScraperWiki.select("
  51. status, decision, appeal_status, appeal_decision, count(*) as qty
  52. from applications
  53. where authority_name = '#{auth['authority_name']}'
  54. group by status, decision, appeal_status, appeal_decision
  55. ")
  56. apps = ScraperWiki.select("* from applications where authority_name='#{auth['authority_name']}' order by date_received")
  57. write_page(['authorities', slug(auth['authority_name'])], 'authority', { apps: apps, auth: auth, summary: summary })
  58. csv_string = CSV.generate do |csv|
  59. csv << apps.first.keys # header row
  60. apps.each { |app| csv << app.values }
  61. end
  62. write_csv(['authorities', slug(auth['authority_name'])], slug(auth['authority_name']), csv_string)
  63. end
  64. end
  65. @working_dir = File.join(Dir.pwd, OUTPUT_DIR)
  66. puts @working_dir
  67. # exit
  68. @log = Logger.new($stdout)
  69. @pages = 0
  70. create_output_dir
  71. gen_homepage
  72. gen_authorities