#!/usr/bin/env ruby

require 'scraperwiki'
require 'haml'
require 'pp'
require 'logger'
require 'csv'
require_relative '../lib/helpers'

OUTPUT_DIR = '_site'
VIEWS_DIR = File.join('views')
LAYOUT_FN = File.join(VIEWS_DIR, 'layout.haml')

def write_page(path_items, template, locals = {})
  dir = File.join(OUTPUT_DIR, path_items)
  FileUtils.mkdir_p(dir)
  @log.debug dir
  fn = File.join(dir, 'index.html')
  
  # https://stackoverflow.com/questions/6125265/using-layouts-in-haml-files-independently-of-rails
  html = Haml::Engine.new(File.read(LAYOUT_FN)).render do
    Haml::Engine.new(File.read(File.join(VIEWS_DIR, "#{template}.haml"))).render(Object.new, locals)
  end

  File.write(fn, html)
  @log.info fn
  @pages += 1
  # TODO - add page to sitemap.xml or sitemap.txt
  # https://support.google.com/webmasters/answer/183668?hl=en&ref_topic=4581190
end

def write_csv(path_items, filename, data)
  dir = File.join(OUTPUT_DIR, path_items)
  FileUtils.mkdir_p(dir)
  @log.debug dir
  fn = File.join(dir, filename + '.csv')
  
  csv_string = CSV.generate do |csv|
    csv << data.first.keys # header row
    data.each { |row| csv << row.values }
  end
  
  File.write(fn, csv_string)
  @log.info fn
end

def create_output_dir
  # Recursively delete working directory to ensure no redundant files are left behind from previous builds.
  # FileUtils.rm_rf(@working_dir)
  Dir.mkdir(@working_dir) unless File.directory?(@working_dir)
  # Dir.chdir(@working_dir)

  # Copy `public` dir to output dir
  FileUtils.copy_entry('public', @working_dir)
end

def gen_homepage
  summary = ScraperWiki.select("
    authority_name, status, decision, appeal_status, appeal_decision, count(*) as applications
    from applications
    group by authority_name, status, decision, appeal_status, appeal_decision
  ")
  write_page('.', 'index', { summary: summary })

  # Summary CSV file
  write_csv('.', 'inlink-summary', summary)  

  # Full CSV file  
  apps = ScraperWiki.select("* from applications")
  write_csv('.', 'inlink-full', apps)  
end

def gen_new
  apps = ScraperWiki.select("* from `applications` order by date_received desc limit 30")
  write_page('new', 'new', { apps: apps })
end

def gen_decisions
  apps = ScraperWiki.select("* from `applications` order by date_decision desc limit 30")
  write_page('decisions', 'decisions', { apps: apps })
end

def gen_authorities
  auths = ScraperWiki.select("distinct(authority_name) as authority_name from applications order by authority_name")
  write_page('authorities', 'authorities', { auths: auths })

  auths.each do |auth|
    summary = ScraperWiki.select("
      status, decision, appeal_status, appeal_decision, count(*) as qty
      from applications
      where authority_name = '#{auth['authority_name']}'
      group by status, decision, appeal_status, appeal_decision
      ")

    apps = ScraperWiki.select("* from applications where authority_name='#{auth['authority_name']}' order by date_received")

    write_page(['authorities', slug(auth['authority_name'])], 'authority', { apps: apps, auth: auth, summary: summary })
    write_csv(['authorities', slug(auth['authority_name'])], slug(auth['authority_name']), apps)
  end
end

@working_dir = File.join(Dir.pwd, OUTPUT_DIR)
puts @working_dir
# exit
@log = Logger.new($stdout)
@log.level = Logger::INFO
@pages = 0
create_output_dir
gen_homepage
gen_new
gen_decisions
gen_authorities
