← ↑ → csv2municipalcandidates.awk
Generated on Sun, 12 Nov 2023 05:13:31 -0500 from csv2municipalcandidates.awk
#! /bin/gawk -f
# Program : csv2municipalcandidates.awk
# Purpose : Generate copy'n'paste-able code for Poliblog's candidate lists for municipal elections
# Author : Bob Jonkman bjonkman@sobac.com
# Date : 2 July 2022
# Based on : csv2municipalcandidates.awk (for provincial and federal elections with Offices)
# Copyright (c) 2020 Bob Jonkman and/or SOBAC Microcomputer Services
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Usage : gawk -f csv2municipalcandidates.awk candidates.csv
# Records : 1st line: Fields are headers <h1> to <h6>
# : 2nd line: Field header names
# : 3rd line to end: Data
# Fields : Fieldnames with * are required, cannot be empty. Other fields appear only if not empty
# Fieldnames with ! are used to group entries and are used for layout. Fieldnames with ! must exist, fields may be empty. Itemized in variable skipAttributes, used to skip in attribute lists
# Fields are referenced by header name, the second line MUST be a field header line
#
# Static Fields (must exist, if blank the entry is ignored):
# 1 Name
# 2 Office
# 3 Municipality
# Address (single line monolithic, at the moment. May use sub-fields later)
# Website
# E mail
# Phone
# "Twitter","Facebook","Instagram","YouTube","LinkedIn", &c.
# (social media URLs) field numbers may vary (will eventually be parsed for @Twitter and @instagram)
@include "library.awk"
function makeid(candidatenumber)
{
return (valnameid(Candidates[candidatenumber]["Name"] "-" Candidates[candidatenumber]["Municipality"] "-" Candidates[candidatenumber]["Office"]))
}
BEGIN {
# IFS = ","
# OFS = " "
# IGNORECASE = 1 # for sorting
# staticfields = 7
# Municipalities[0][0]="" # Initialize 2D array
## Special Purpose fields are ignored in the candidates' list of resources
### Some are used for images, some for class names (CSS styling), some for page layout
skipAttributes["Name"]=1 # Used for header, ID
skipAttributes["Office"]=1 # Used for grouping, ID
skipAttributes["Elect"]=1 # Additional info for Office
skipAttributes["Municipality"]=1 # Used for grouping, ID
skipAttributes["MunicipalityURL"]=1 # URL to municipal website
skipAttributes["MunicipalityLogo"]=1 # URL to image
skipAttributes["ImageURL"]=1 # URL to image
skipAttributes["Address"]=1 # Used for OpenStreetMap lookup
skipAttributes["Incumbent"]=1 # Class name (styling), indicator
classarray["Incumbent"]=1 # for building the class= attribute
# skipAttributes["Withdrawn"]=1 ### Changed to include reason for withdrawal, eg. "Deceased"
classarray["Withdrawn"]=1
skipAttributes["Unregistered"]=1 # Class name (styling), indicator
classarray["Unregistered"]=1
# skipAttributes["Elected"]=1 # Class name (styling), indicator ### Modified 2022-10-24 to show "Elected" or "Acclaimed"
classarray["Elected"]=1
skipAttributes["candidateclass"]=1 # Used internally to transfer styling from table to candidate list
} # BEGIN
# Read the title and subtitle(s)
(NR == 1) {
parsecsv($0, h)
} # (NR == 1)
# Read field headers
(NR == 2) {
numfields = getheaders($0, headernums, headernames)
} # (NR == 2)
# Read rest of the file
(NR > 2) {
numfields = parsecsv($0, inputarray)
## Move input line to candidates record
for (field = 1; field <= numfields; field++) {
Candidates[NR][headernames[field]] = inputarray[field]
}
## Populate Municipalities arrays
if ("" != Candidates[NR]["Name"]) {
Municipalities[Candidates[NR]["Municipality"]]++
MunicipalitiesURL[Candidates[NR]["Municipality"]] = Candidates[NR]["MunicipalityURL"]
MunicipalitiesLogo[Candidates[NR]["Municipality"]] = Candidates[NR]["MunicipalityLogo"]
## Populate Offices arrays
Offices[Candidates[NR]["Office"]]++
Elect[Candidates[NR]["Office"],Candidates[NR]["Municipality"]] = Candidates[NR]["Elect"] # Build an array of Offices, use "Elect" for later use
CandidatesInOffice[Candidates[NR]["Office"],Candidates[NR]["Municipality"]]++
} # if ("" != Candidates[NR]["Name"]
}
# (NR > 2)
END {
#
#
#
printhtmlhead(h[1] " - " h[2], "noclose")
print (" <link rel=\"stylesheet\" href=\"../candidates.css\" type=\"text/css\" />")
print ("</head>")
print ("<!--")
print ("This information is provided as-is. Data is public information and not subject to copyright.")
print ("Images and logos are copyright and trademarked by their respective owners.")
print ("Other material on this page is Copyright (c) " strftime("%Y") " by Bob Jonkman")
print ("and released under a Creative Commons Attribution-only Share-Alike license: https://creativecommons.org/licenses/by-sa/4.0/")
print ("-->")
print ("<body>")
print ("<div id=\"top_of_page\" class=\"header\">")
print (" <!--#include virtual=\"header.shtml\" -->")
print ("</div> <!-- class=header -->")
for (i = 1; i <= 6; i++) {
if ("" != h[i]) {
print ("<h" i ">" makehtml(h[i]) "</h" i ">")
}
}
# Get Municipalities and Offices in alphabetical order
numMunicipalities = asorti(Municipalities)
numOffices = asorti(Offices)
# Create table of Municipalities and Offices
print (" <table")
print (" class=\"candidates\"")
print (">")
print (" <thead>")
print (" <tr>")
print (" <th> </th>")
for (Municipality = 1; Municipality <= numMunicipalities; Municipality++) {
if ("" != Municipalities[Municipality]) {
print (" <th><a class=\"internal\" href=\"#" valnameid(Municipalities[Municipality]) "\" title=\"Jump to " Municipalities[Municipality] "\">" Municipalities[Municipality] "</a></th>")
} # if ("" == Municipalities[])
} # for Municipality in Municipalities[]
print (" </tr>")
print (" </thead>")
print (" <tbody>")
for (Office = 1; Office <= numOffices; Office++) {
if ("" != Offices[Office]) {
print (" <tr>")
print (" <th>" Offices[Office] "</th>")
# list candidates for Municipality here
for (Municipality = 1; Municipality <= numMunicipalities; Municipality++) {
if ("" != Municipalities[Municipality]) {
print " <td>"
for (Candidate in Candidates) {
if ((Candidates[Candidate]["Municipality"] == Municipalities[Municipality]) && (Candidates[Candidate]["Office"] == Offices[Office])) {
candidateclass = " " # clear previous value, space to provide separator between existing class string
# Construct a class for this candidate (used for styling)
for (classflag in classarray) {
if ("" != Candidates[Candidate][classflag]) {
candidateclass = candidateclass " " tolower(classflag)
}
}
Candidates[Candidate]["candidateclass"] = candidateclass # save for use in list of candidates
print "<a href=\"#" makeid(Candidate) "\" title=\"" Candidates[Candidate]["Name"] "\" class=\"internal " candidateclass " \" >" Candidates[Candidate]["Name"]
if (Candidates[Candidate]["Incumbent"]) {
print " (I)"
}
if (Candidates[Candidate]["Unregistered"]) {
print " (Unregistered)"
}
if (Candidates[Candidate]["Withdrawn"]) {
print " (" Candidates[Candidate]["Withdrawn"] ")"
}
if (Candidates[Candidate]["Elected"]) {
print " (" Candidates[Candidate]["Elected"] ")"
} print "</a><br />"
} # if (candidate matches)
} # for (Candidate in Candidates)
print (" </td>")
} # if ("" != Municipalities[])
} # for (Municipality in Municipalities)
print (" </tr>")
} # if ("" != Offices[Office]["Name"])
} # for (Office in Offices)
print (" </tbody>")
print (" </table>")
# Create the list of candidates
for (Municipality = 1; Municipality <= numMunicipalities; Municipality++) {
if ("" != Municipalities[Municipality]) {
print (" <h3 id=\"" valnameid(Municipalities[Municipality]) "\"><a href=\"#top_of_page\" title=\"Go to the top of this page\" class=\"internal navaid\"> ^ </a> " Municipalities[Municipality])
if ("" != MunicipalitiesURL[Municipalities[Municipality]]) {
print (" <a href=\"" MunicipalitiesURL[Municipalities[Municipality]] "\" title=\"Information about " Municipalities[Municipality] "\">")
if ("" != MunicipalitiesLogo[Municipalities[Municipality]]) {
print (" <img class=\"icon\" src=\"" MunicipalitiesLogo[Municipalities[Municipality]] "\" alt=\"Municipality logo\" /> Info")
} else {
print ("Information about " Municipalities[Municipality] "")
} # if (Municpalities logo exists)
print (" </a>")
} # if (Municipalities URL exists)
print (" </h3>")
### Construct an array of Candidates in Office so we avoid printing Offices with no candidates (or Offices that don't apply to a Muncipality)
# for ( Candidate in Candidate ) {
# if ( Candidates[Candidate]
for (Office = 1; Office <= numOffices; Office++) {
if ( CandidatesInOffice[Offices[Office],Municipalities[Municipality]] ) { # If there is at least one candidate running for this office
print "<h4 id=\"" valnameid(Offices[Office] "-" Municipalities[Municipality]) "\">"
print "<a href=\"#top_of_page\" title=\"Go to the top of this page\" class=\"internal navaid\"> ^ </a> "
print " <a href=\"#" valnameid(Municipalities[Municipality]) "\" title=\"Up to the start of " Municipalities[Municipality] "\" class=\"internal navaid\"> ⇑ </a> "
print Offices[Office] " - " Municipalities[Municipality]
if ( Elect[Offices[Office],Municipalities[Municipality]] > 0 ) {
print " : Elect " Elect[Offices[Office],Municipalities[Municipality]]
} # if ( Elect > 0 )
print "</h4>"
print "<dl>"q
for (Candidate in Candidates) {
if (("" != Candidates[Candidate]["Name"]) && (Candidates[Candidate]["Municipality"] == Municipalities[Municipality]) && (Candidates[Candidate]["Office"] == Offices[Office])) {
print " <dt id=\"" makeid(Candidate) "\" class=\"h-card\">"
print "<a href=\"#top_of_page\" title=\"Go to the top of this page\" class=\"internal navaid\"> ^ </a> "
print "<a href=\"#" valnameid(Municipalities[Municipality]) "\" title=\"Up to the start of " Municipalities[Municipality] "\" class=\"internal navaid\"> ⇑ </a> "
print "<a href=\"#" valnameid(Offices[Office] "-" Municipalities[Municipality]) "\" title=\"Up to " Offices[Office] " - " Municipalities[Municipality] "\" class=\"internal navaid\" > ↑ </a> "
print "<span class=\"" Candidates[Candidate]["candidateclass"] "\">" Candidates[Candidate]["Name"]
if (Candidates[Candidate]["Unregistered"]) {
print " <strong>(Unregistered)</strong>"
}
if (Candidates[Candidate]["Incumbent"]) {
print " <strong>(Incumbent)</strong>"
}
if (Candidates[Candidate]["Elected"]) {
print " <strong>(" Candidates[Candidate]["Elected"] ")</strong>"
}
if (Candidates[Candidate]["Withdrawn"]) {
print " <strong>(" Candidates[Candidate]["Withdrawn"] ")</strong>"
}
print "</span>"
print (" <a href=\"#" makeid(Candidate) "\" title=\"Anchor link for " Candidates[Candidate]["Office"] " candidate in " Municipalities[Municipality] " \" class=\"internal\">⚓</a>")
print "<a href=\"searchassistant.html#" makeid(Candidate) "\" title=\"Find this candidate on the Search Assistant\" class=\"right\">🔎</a>"
print " </dt>"
print (" <dd class=\"h-card\">")
print (" <div class=\"aside\">")
if ("" != Candidates[Candidate]["ImageURL"]) {
if ("" != Candidates[Candidate]["Website"]) {
### Ensure we only get the first URL when there are multiples
split(Candidates[Candidate]["Website"], urls, " ")
print ("<a href=\"" urls[1] "\" title=\"" gettitle(urls[1]) "\">")
} # if ("" != Candidates[Website])
print (" <img src=\"" Candidates[Candidate]["ImageURL"] "\" alt=\"" Candidates[Candidate]["Name"] "\" />")
if ("" != Candidates[Candidate]["Website"]) {
print ("</a>")
} # if ( "" != Candidates[Website])
} else { # if ("" != Candidates[ImageURL])
print (" <p>No Image</p>")
} # if ("" != Candidates[ImageURL])
print (" </div> <!-- class=aside -->")
print (" <ul>")
### Print candidatae attributes
for (Attribute in Candidates[Candidate]) {
if (Attribute in skipAttributes) { # if Attribute is in skipAttributes
continue # don't list static attributes
} # if (Attribute ~ skipAttributes)
if ("" != Candidates[Candidate][Attribute]) {
print " <li><b>" Attribute "</b>: " makehtml(Candidates[Candidate][Attribute]) "</li>"
} # if ("" != Candidates[Attribute])
} # (for Attribute in Candidates)
if ("" != Candidates[Candidate]["Address"]) {
### Automatically fetch OSM link
print (" <li class=\"h-adr\"><b>Postal Address</b>:")
### If we parse out address info
### print (" <p style=\"margin-left:2em;\">")
### print("<!-- <span class=\"p-street-address\">XXXXXSTREETADDRESS</span>,<br /><span class=\"p-locality\">XXXXXLOCALITY</span>, <span class=\"p-region\">Ontario</span>,<br /><span class=\"p-country\">Canada</span> <span class=\"p-postal-code\">XXXXXPOSTALCODE</span> -->") ;
### print (" </p>")
### If we DON'T parse out address info (monolithic addresses)
print (" <span class=\"p-address\">" Candidates[Candidate]["Address"] "</span> <a class=\"map\" href=\"https://www.openstreetmap.org/search?query=" txt2uri(Candidates[Candidate]["Address"]) "\" title=\"OpenStreetMap: " Candidates[Candidate]["Address"] "\">Map</a>")
print (" </li>")
}
print " </ul>"
print (" <br />") # Ensure images don't extend beyond <dl> box
print (" </dd>")
print ("")
} # if ("" != Candidates[Name])
} # for candidate in candidates
print (" </dl>")
print ("")
} # if ("" != Offices[])
} # if (CandidatesInOffice > 0 )
} # if ("" != Municipalities[[])
} # for (Municipality in Municipalities)
print ("<div class=\"footer\">")
print (" <!--#include virtual=\"footer.shtml\" -->")
print (" <p class=\"bottom\">This page was last updated <span class=\"datetime\" title=\"" strftime("%FT%T%z") "\">" strftime("%A, %e %B %Y at %l:%M%P %Z") "</span></p>")
print ("</div> <!-- class=footer -->")
print ("</body>")
print ("</html>")
print ("")
} # END
# EOF: csv2municipalcandidates.awk
← ↑ → csv2candidates-search.awk
Generated on Sun, 12 Nov 2023 05:13:31 -0500 from csv2candidates-search.awk
#! /bin/gawk -f
# Program : csv2municipalcandidates-search.awk
# Purpose : Generate a table with links to simplify searching for missing data
# Author : Bob Jonkman bjonkman@sobac.com
# Date : 15 August 2022
# Based on : csv2municipalcandidates.awk
# Copyright (c) 2020-2022 Bob Jonkman and/or SOBAC Microcomputer Services
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Usage : gawk -f csv2municipalcandidates-search.awk candidates.csv
# Records : 1st line: Fields are headers <h1> to <h6>
# : 2nd line: Field header names
# : 3rd line to end: Data
# Fields : Fieldnames with * are required, cannot be empty.
# Other fields appear only if not empty
# Fieldnames with ! are used to group entries and are used for layout.
# Fieldnames with ! must exist, fields may be empty. Itemized in
# variable skipAttributes, used to skip in attribute lists
# Fields are referenced by header name,
# the second line MUST be a field header line
#
# Static Fields (must exist, if blank the entry is ignored):
# 1 Name
# Optional Static Fields (field must exist, but may be blank)
# Address (single line monolithic, at the moment. May use sub-fields later)
# Website
# E mail
# Phone
# Dynamic Fields (field may or may not exist, and may be blank)
# "Twitter","Facebook","Instagram","YouTube","LinkedIn", &c.
# (social media URLs) field numbers may vary (will eventually be parsed for @Twitter and @instagram)
@include "library.awk"
function makeid(candidatenumber)
{
return (valnameid(inputarray[headernums["Name"]] "-" inputarray[headernums["Municipality"]] "-" inputarray[headernums["Office"]]))
}
function printURL(URL,LinkText,Class)
{
print "<a href=\"" URL "\" title=\"Search: " LinkText "\" class=\"" Class "\" target=\"_blank\">" LinkText "</a>"
}
function printsearch(URL, LinkText)
{ if ( "" == inputarray[field] )
Class="red"
else
Class="green"
print "<td>"
printURL(URL,LinkText,Class)
print "</td>"
}
BEGIN {
# IFS = ","
# OFS = " "
# IGNORE case = 1 # for sorting
# staticfields = 7
# Municipalities[0][0]="" # Initialize 2D array
## Special Purpose fields are ignored in the candidates' list of resources
### Some are used for images, some for class names (CSS styling), some for page layout
# skipAttributes["Name"]=1 # Used for header, ID
skipAttributes["Office"]=1 # Used for grouping, ID
skipAttributes["Elect"]=1 # Additional info for Office
skipAttributes["Municipality"]=1 # Used for grouping, ID
skipAttributes["MunicipalityURL"]=1 # URL to municipal website
skipAttributes["MunicipalityLogo"]=1 # URL to image
skipAttributes["Riding"]=1 # Riding name
skipAttributes["RidingURL"]=1 # URL to Elections Canada/Ontario website
skipAttributes["RidingLogo"]=1 # URL to Elections Canada/Ontario logo
skipAttributes["Party"]=1 # Party Name
skipAttributes["PartyURL"]=1 # URL to Party URL
skipAttributes["PartyLogo"]=1 # URL to Party logo
skipAttributes["ImageURL"]=1 # URL to candidate image
skipAttributes["Incumbent"]=1 # Class name (styling), indicator
skipAttributes["Unregistered"]=1 # Class name (styling), indicator
skipAttributes["Elected"]=1 # Class name (styling), indicator
skipAttributes["Withdrawn"]=1
skipAttributes["Address"]=1 # Used for OpenStreetMap lookup
### Things to not search for
skipAttributes["Website"]=1 # (maybe a general websearch, later)
skipAttributes["Notes"]=1
skipAttributes["Calendar"]=1
skipAttributes["Votes"]=1
skipAttributes["HiddenNotes"]=1
# Write the headers and table start up front
Title = "Search for Missing Candidate Data"
LINKREL[1]="<link rel=\"stylesheet\" href=\"/candidates.css\" type=\"text/css\" />"
LINKREL[2]="<link rel=\"shortcut icon\" href=\"https://poliblog.jonkman.ca/blogs/pollywog-32x32b.jpg\" type=\"image/x-icon\" />"
printhtmlhead(Title)
print "<body>"q
print " <h1>" Title "</h1>"
print " <p>Filename: " txt2html(ARGV[1]) "<br />"
print " Date: "
system("stat -c %y " ARGV[1] )
print "<br />Generated: " strftime("%FT%T") "</p>"
print " <table borders=\"1\" caption=\"" Title "\">"
}
# Read the title and subtitle(s)
(NR == 1) { # ignore the first line for searching
}
# (NR == 1)
# Read field headers
(NR == 2) {
numfields = getheaders($0, headernums, headernames)
print " <thead>"
print " <tr>"
for (field=1 ; field <= numfields ; field++) {
if ( skipAttributes[headernames[field]] )
continue; # skip unwanted headers
print " <th>" headernames[field] "</th>"
}
print " </tr>"
print " </thead>"
print " <tbody>" # gotta put this somewhere...
} # (NR == 2)
# Read rest of the file
(NR > 2) {
numfields = parsecsv($0, inputarray)
# If name is blank then advance to next record
if ( ! inputarray[headernums["Name"]] ) {
next
}
print " <tr>"
for (field=1 ; field <= numfields ; field++) {
if ( ! skipAttributes[headernames[field]] )
switch (headernames[field]) {
case "Name" :
print " <td id=\"" makeid(Candidate) "\">"
print "<a href=\"index.html#" makeid(Candidate) "\" title=\"Main entry for " inputarray[field] " - " inputarray[headernums["Office"]] " - " inputarray[headernums["Municipality"]] " \" class=\"right\">⊕</a>"
print "<a href=\"https://www.google.com/search?q=%22" inputarray[field] "%22 " inputarray[headernums["Office"]] " " inputarray[headernums["Municipality"]] "\" title=\"Google search\" target=\"_blank\"><strong>" inputarray[field] "</strong><br />" inputarray[headernums["Office"]] "<br />" inputarray[headernums["Municipality"]] "</a> "
print "<a href=\"#" makeid(Candidate) "\" title=\"Search Assistant Anchor for " inputarray[field] " - " inputarray[headernums["Office"]] " - " inputarray[headernums["Municipality"]] "\" class=\"right internal\">⚓</a>"
if (inputarray[headernums["Website"]]) {
numWebs = split(inputarray[headernums["Website"]],Webs," ")
for (num in Webs)
print "<a href=\"" Webs[num] "\" title=\"Go to: " Webs[num] "\" class=\"right\" target=\"_blank\">𝕎</a> "
}
print "</td>"
break
case "Phone" : {
print "<td class=\"phone\">"
numPhones = split(inputarray[field],Phones," ")
for (num in Phones) {
Phone = substr(Phones[num],index(Phones[num],"-")+1)
printURL("https://www.google.com/search?q=" Phone, Phone)
print "<br />"
}
print "</td>"
}
break
case "Email" : {
print "<td class=\"email\">"
numEmails = split(inputarray[field],Emails," ")
for (num in Emails) {
print "<a href=\"https://www.google.com/search?q=" txt2uri("\"" Emails[num] "\"") "\" title=\"Search: " Emails[num] "\" target=\"_blank\">𝕄</a> "
}
print "</td>"
}
break
case "Twitter" :
printsearch("https://twitter.com/search?src=typed_query&f=user&q=" txt2uri(inputarray[headernums["Name"]]), "Twitter")
break
case "Facebook" :
printsearch("https://www.facebook.com/search/top/?q=" txt2uri(inputarray[headernums["Name"]]), "Facebook")
break
case "LinkedIn" :
printsearch("https://linkedin.com/search/results/people/?keywords=" txt2uri(inputarray[headernums["Name"]]), "LinkedIn")
break
case "YouTube" :
printsearch("https://www.youtube.com/results?sp=EgIQAg%253D%253D&search_query=" txt2uri(inputarray[headernums["Name"]]), "YouTube")
break
case "Instagram" :
printsearch("https://www.instagram.com/explore/search/keyword/?q=" txt2uri(inputarray[headernums["Name"]]), "Instagram")
break
case "Pinterest" :
printsearch("https://www.pinterest.ca/search/pins/?q=" txt2uri(inputarray[headernums["Name"]]), "Pinterest")
break
case "Flickr" :
printsearch("https://www.flickr.com/search/people/?username=" txt2uri(inputarray[headernums["Name"]]), "Flickr")
break
case "Tumblr" :
printsearch("https://www.tumblr.com/search/" txt2uri(inputarray[headernums["Name"]]), "Tumblr")
break
case "MySpace" :
printsearch("https://myspace.com/search/people?q=" txt2uri(inputarray[headernums["Name"]]), "MySpace")
break
case "TikTok" :
printsearch("https://www.tiktok.com/search/user?q=" txt2uri(inputarray[headernums["Name"]]), "TikTok")
break
case "Twitch" :
printsearch("https://www.twitch.tv/search?term=" txt2uri(inputarray[headernums["Name"]]), "Twitch")
break
case "Reddit" :
printsearch("https://www.reddit.com/search/?type=user&q=" txt2uri(inputarray[headernums["Name"]]), "Reddit")
break
default :
print "<!-- default blank -->"
} # switch (headernames[field])
} # for fields
print " </tr>"
} # (NR > 2)
END { print " </tbody>"
print " </table>"
print " <div class=\"footer\">"
print (" <!--#include virtual=\"footer.shtml\" -->")
print (" <p class=\"bottom\">This page was last updated <span class=\"datetime\" /title=\"" strftime("%FT%T%z") "\">" strftime("%A, %e %B %Y at %l:%M%P %Z") "</span></p>")
print (" </div> <!-- class=footer -->")
print ("</body>")
print ("</html>")
print ("")
}
# EOF: csv2candidates-search.awk
← ↑ → /home/bjonkman/bin/awk/library.awk
Generated on Sun, 12 Nov 2023 05:13:31 -0500 from /home/bjonkman/bin/awk/library.awk
# Program : LIBRARY.AWK
# Purpose : Contains functions common to many AWK scripts
# Author : Bob Jonkman <bjonkman@sobac.com>
# Copyright 2008 Bob Jonkman and/or SOBAC Microcomputer Services
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Date : 19 July 2005
# Contents :
# rstring() - Returns the right-most n characters of string
# max() - Maximum of items in list
# min() - Minimum of items in list
# trim() - Trim whitespace from both ends of a string
# ltrim() - Trim whitespace from beginning (left side) of a string
# rtrim() - Trim whitespace from end (right side) of a string
# gwid2smtp() - escape reserved SMTP characters in GW Object
# see http://www.novell.com/documentation/gw55/index.html?page=/documentation/gw55/gw55ia/data/a30135u.html#a30135u
# smtp2gwid() - un-escape
# reverse() - reverse order of delimited string, eg. a.b.c -> c_b_a
# txt2html() - escape reserved HTML characters
# txt2xml() - escape reserved XML characters
# html2txt() - restore reserved HTML characters
# txt2uri() - escape reserved URI characters
# uri2txt() - restore reserved URI characters
# makehtml() - Create href links (http:// file:// mailto:) from text (maito: was microformatted with class="vcard" on 2006-08-04)
# parsecsv() - Split Comma-Quote fields in string into an array
# printcsv() - Determine whether to print a field with delimiters
# valnameid() - Validate Name and ID token names (see http://www.w3.org/TR/html4/types.html#type-id )
# printhtmlhead() - print headers for an HTML file
# parsecgi() - Parse QUERY_STRING or POST data into an array
# txt2gwapi() - escape reserved characters in GW API files
# (see pg 42,43 "Keyword Ordering Requirements and Delimiters" in GroupWise API Gateway documentation)
# getheaders() - Create reverse lookup array for input string
# printarray() - Print all elements of an array (for debugging, mostly) Added 2022-05-01
# gettitle() - Get a title attribute from web page (stub added 2022-05-01)
function rstring(string,n) { # Returns the right-most n characters of string
return(substr(string,length(string)-n+1))
}
function max(a,b) { return(( a > b ) ? a : b)
}
function min(a,b) { return(( a < b ) ? a : b)
}
function trim(string) {
string = rtrim(string) ;
string = ltrim(string) ;
return(string) ;
}
##### End trim #####
# trim left-side whitespace
function ltrim(string) { gsub(/^[ \t]+/, "", string)
return(string)
}
##### End ltrim #####
# trim right-side whitespace
function rtrim(string) { gsub(/[ \t]+$/, "", string)
return(string)
}
##### End rtrim #####
function gwid2smtp(text, smtptext) # Perform GroupWise character translation
# http://www.novell.com/documentation/gw55/index.html?page=/documentation/gw55/gw55ia/data/a30135u.html#a30135u
{
smtptext = text ;
gsub(/#/ ,"#h#",smtptext) ;
gsub(/_/ ,"#u#",smtptext) ;
gsub(/ / ,"_" ,smtptext) ;
gsub(/\(/ ,"#l#",smtptext) ;
gsub(/)/ ,"#r#",smtptext) ;
gsub(/,/ ,"#m#",smtptext) ;
gsub(/:/ ,"#c#",smtptext) ;
gsub(/\\/ ,"#b#",smtptext) ;
gsub(/=/ ,"#e#",smtptext) ;
gsub(/\// ,"#s#",smtptext) ;
return(smtptext) ;
}
##### End gwid2smpt #####
function smtp2gwid(text, gwidtext)
{
gwidtext = text ;
gsub(/#s#/,"/" ,gwidtext);
gsub(/#e#/,"=" ,gwidtext);
gsub(/#b#/,"\\",gwidtext);
gsub(/#c#/,":" ,gwidtext);
gsub(/#m#/,"," ,gwidtext);
gsub(/#r#/,")" ,gwidtext);
gsub(/#l#/,"(" ,gwidtext);
gsub(/_/ ," " ,gwidtext);
gsub(/#u#/,"_" ,gwidtext);
gsub(/#h#/,"#" ,gwidtext);
return(gwidtext) ;
}
##### End smtp2gwid #####
function reverse(instring,inseparator,outseparator, numelements,array,i,outstring)
{
numelements = split(instring, array, inseparator);
outstring = array[numelements];
for (i=numelements-1; i>0; i--)
outstring = outstring outseparator array[i];
return outstring;
}
##### End reverse #####
function txt2html(text, htmltext)
{
htmltext = text ;
gsub(/&/ , "\\&" ,htmltext)
gsub(/>/ , "\\>" ,htmltext)
gsub(/</ , "\\<" ,htmltext)
gsub(/"/ , "\\"" ,htmltext)
gsub(/'/ , "\\'" ,htmltext) # ' or tick appears to be invalid in XML UTF-8 documents, substitute numeric entity
gsub(/\x91/, "\\‘",htmltext) # ‘ left-single-quote or backtick becomes tick
gsub(/\x92/, "\\’",htmltext) # ’ right-single-quote becomes tick
gsub(/\x93/, "\\“",htmltext) # “ left-double-quote becomes double-quote
gsub(/\x94/, "\\”",htmltext) # ” right-double-quote becomes double-quote
gsub(/\x95/, "\\&ndash" ,htmltext) # – N-dash becomes hyphen (for XML)
gsub(/\x96/, "\\&mdash" ,htmltext) # — EM Dash becomes two hyphens
# print("\n##### DEBUG ##### txt2html: text=" text "; htmltext=" htmltext "\n")
##### DEBUG ##### gsub(/[^a-zA-Z0-9 \!\#\$\%\&\(\)\*\+\,\-\.\/\:\;\=\?\@\[\\\]\^\_\`\{\|\}\~\n\t]/,"",htmltext) # remove all other non-ascii characters
return(htmltext) ;
}
##### End txt2html #####
function txt2xml(text, xmltext) # converts plain text or HTML to XML
{
xmltext = text ;
gsub(/&/ , "\\&" ,xmltext)
gsub(/>/ , "\\>" ,xmltext)
gsub(/</ , "\\<" ,xmltext)
gsub(/"/ , "\\"" ,xmltext)
gsub(/'/ , "\\'" ,xmltext) # ' or tick appears to be invalid in XML UTF-8 documents, substitute numeric entity
gsub(/\x91/ , "\\'" ,xmltext) # left-single-quote or backtick becomes tick
gsub(/‘/, "\\'" ,xmltext) # ‘ left-single-quote becomes tick
gsub(/\x92/ , "\\'" ,xmltext) # ’ right-single-quote becomes tick
gsub(/’/, "\\'" ,xmltext) # right-single-quote becomes tick
gsub(/\x93/ , "\\"" ,xmltext) # “ left-double-quote becomes double-quote
gsub(/“/, "\\"" ,xmltext) # left-double-quote becomes double-quote
gsub(/\x94/ , "\\"" ,xmltext) # ” right-double-quote becomes double-quote
gsub(/”/, "\\"" ,xmltext) # right-double-quote becomes double-quote
gsub(/\x95/ , "-" ,xmltext) # – N-dash becomes hyphen (for XML)
gsub(/–/, "-" ,xmltext) # – N-dash becomes hyphen (for XML)
gsub(/--/ , "\\+\\+" ,xmltext) # two hyphens are invalid inside comments <!-- -->
gsub(/\x96/ , "\\+\\+" ,xmltext) # — EM Dash becomes two hyphens
gsub(/—/, "\\+\\+" ,xmltext) # — EM Dash becomes two hyphens
##### DEBUG ##### gsub(/[^a-zA-Z0-9 \!\#\$\%\&\(\)\*\+\,\-\.\/\:\;\=\?\@\[\\\]\^\_\`\{\|\}\~\n\t]/,"",xmltext) # remove all other non-ascii characters
return(xmltext) ;
}
function html2txt(htmltext, text)
{
text = htmltext ;
gsub(/–/ , "-" , text)
gsub(/–/ , "-" , text)
gsub(/—/ , "--" , text)
gsub(/—/ , "--" , text)
gsub(/‘/ , "'" , text) # left-single-quote value
gsub(/‘/ , "'" , text) # left-single-quote entity
gsub(/’/ , "'" , text) # right-single-quote value
gsub(/’/ , "'" , text) # right-single-quote entity
gsub(/“/ , "\"" , text) # left-double-quote value
gsub(/“/ , "\"" , text) # left-double-quote entity
gsub(/”/ , "\"" , text) # right-double-quote value
gsub(/”/ , "\"" , text) # right-double-quote entity
gsub(/'/ , "'" , text) # apostrophe or tick '
gsub(/'/ , "'" , text) # apostrophe entity
gsub(/"/ , "\"" , text)
gsub(/ / , " " , text)
gsub(/</ , "<" , text)
gsub(/>/ , ">" , text)
gsub(/&/ , "\\&" , text)
return(text) ;
}
##### End html2txt #####
function txt2uri(text, uritext) # see RFC3986 (STD0066) section 2.2
{
uritext = text ;
gsub(/%/ ,"%25",uritext)
gsub(/ / ,"%20",uritext)
gsub(/:/ ,"%3A",uritext)
gsub(/\// ,"%2F",uritext)
gsub(/\?/ ,"%3F",uritext)
gsub(/#/ ,"%23",uritext)
gsub(/\[/ ,"%5B",uritext)
gsub(/]/ ,"%5D",uritext)
gsub(/@/ ,"%40",uritext)
gsub(/!/ ,"%21",uritext)
gsub(/\$/ ,"%24",uritext)
gsub(/&/ ,"%26",uritext)
gsub(/'/ ,"%27",uritext)
gsub(/\(/ ,"%28",uritext)
gsub(/)/ ,"%29",uritext)
gsub(/\*/ ,"%2A",uritext)
gsub(/\+/ ,"%2B",uritext)
gsub(/,/ ,"%2C",uritext)
gsub(/;/ ,"%3B",uritext)
gsub(/=/ ,"%3D",uritext)
gsub(/\|/ ,"%7C",uritext) # Not part of RFC3986 (I think)
gsub(/~/ ,"%7E",uritext) # Not part of RFC3986 (I think)
gsub(/\^/ ,"%5E",uritext) # Not part of RFC3986 (I think)
gsub(/`/ ,"%60",uritext) # Not part of RFC3986 (I think)
gsub(/\{/ ,"%7B",uritext) # Not part of RFC3986 (I think)
gsub(/\}/ ,"%7D",uritext) # Not part of RFC3986 (I think)
gsub(/"/ ,"%22",uritext) # Not part of RFC3986 (I think)
gsub(/</ ,"%3C",uritext) # Not part of RFC3986 (I think)
gsub(/>/ ,"%3E",uritext) # Not part of RFC3986 (I think)
gsub(/\\/ ,"%5C",uritext) # Not part of RFC3986 (I think)
return(uritext) ;
}
##### End txt2uri #####
function uri2txt(uri, text)
{
text = uri
gsub(/%20/," " ,text)
gsub(/%3A/,":" ,text)
gsub(/%2F/,"/" ,text)
gsub(/%3F/,"?" ,text)
gsub(/%23/,"#" ,text)
gsub(/%5B/,"[" ,text)
gsub(/%5D/,"]" ,text)
gsub(/%40/,"@" ,text)
gsub(/%21/,"!" ,text)
gsub(/%24/,"$" ,text)
gsub(/%26/,"\\&" ,text) # unescaped '&' would indicate "replacement text"
gsub(/%27/,"'" ,text)
gsub(/%28/,"(" ,text)
gsub(/%29/,")" ,text)
gsub(/%2A/,"*" ,text)
gsub(/%2B/,"+" ,text)
gsub(/%2C/,"," ,text)
gsub(/%3B/,";" ,text)
gsub(/%3D/,"=" ,text)
gsub(/%7C/,"|" ,text)
gsub(/%7E/,"~" ,text)
gsub(/%5E/,"^" ,text)
gsub(/%60/,"`" ,text)
gsub(/%7B/,"{" ,text)
gsub(/%7D/,"}" ,text)
gsub(/%22/,"\"" ,text)
gsub(/%3C/,"<" ,text)
gsub(/%3E/,">" ,text)
gsub(/%0D%0A/,"\\n" ,text)
gsub(/%5C/,"\\" ,text) # not part of RFC3986 ?
gsub(/%25/,"%" ,text)
return(text)
}
function makehtml(string, numurls,i,htmlstring,path,gwiduri,outstring)
{
numurls = split(string, urls, " ") #split string into urls on space. If a URL is meant to include a space then the input string must substitute '+'
for (i=1; i<=numurls; i++)
{
if(substr(urls[i],1,2) == "\\\\")
{
path = urls[i] ;
gsub(/\\\\/,"/",path) ;
htmlstring = "<a href=\"file:///" txt2uri(path) "\">" txt2html(urls[i]) "</a>" ;
}
else if(tolower(substr(urls[i],1,7)) == "http://") # If the string starts with http:// then assume it is already txt2uri converted
htmlstring = "<a href=\"http://" substr(urls[i],8) "\" title=\"" gettitle(urls[i]) "\">" txt2html(urls[i]) "</a>"
else if(tolower(substr(urls[i],1,8)) == "https://") # If the string starts with https:// then assume it is already txt2uri converted
htmlstring = "<a href=\"https://" substr(urls[i],9) "\" title=\"" gettitle(urls[i]) "\">" txt2html(urls[i]) "</a>"
else if(tolower(substr(urls[i],1,2)) == "//") # If the string starts with // then assume it is already txt2uri converted
htmlstring = "<a href=\"//" substr(urls[i],3) "\" title=\"" gettitle(urls[i]) "\">" txt2html(urls[i]) "</a>"
else if(atpos=index(urls[i], "@"))
{
htmlstring = "<span class=\"vcard\"><a class=\"email\" href=\"mailto:" txt2uri(urls[i]) "\" title=\"E-mail to " txt2html(urls[i]) "\">" txt2html(urls[i]) "</a>" ; # E-mail address is not cleaned with txt2uri() so @ stays as symbol. But txt2uri() may be necessary!
# if ((substr(urls[i],atpos+1) == "sobac.com") && !NOEGUIDE)
# { gwiduri = txt2uri(smtp2gwid(substr(urls[i],1,atpos-1))) ;
# htmlstring = htmlstring " <a class=\"url\" href=\"http://eguide.sobac.com/eGuide/servlet/eGuide?Action=Detail.get&User.dn=cn%3d" gwiduri "%2cou%3dStaff%2co%3dsobac&Directory.uid=Staff\" title=\"Look up " gwiduri " in eGuide Staff Container\" target=\"ldap\">S</a>"
# htmlstring = htmlstring " <a class=\"url\" href=\"http://eguide.sobac.com/eGuide/servlet/eGuide?Action=Detail.get&User.dn=cn%3d" gwiduri "%2cou%3dNon%2dStaff%2co%3dsobac&Directory.uid=Non_Staff\" title=\"Look up " gwiduri " in eGuide Non-Staff Container\" target=\"ldap\">N</a>"
# }
htmlstring = htmlstring "</span>"
}
else if( (tolower(substr(urls[i],1,4)) == "tel:" ) || (substr(urls[i],1,1) == "+") )
{
if (tolower(substr(urls[i],1,4)) == "tel:" )
{
# print ("<!-- #####DEBUG##### i=" i " ; urls[i]=" urls[i] " ; tolower(substr(urls[i],1,4))=" tolower(substr(urls[i],1,4)) " -->")
urls[i] = substr(urls[i],5) # strip out "tel:"
}
htmlstring = "<span class=\"vcard h-card\"><a class=\"p-tel\" href=\"tel:" urls[i] "\" title=\"Phone this number\">" urls[i] "</a></span>" ;
}
else htmlstring = txt2html(urls[i]) ;
outstring = outstring " " htmlstring # Concatenate to outstring separated by spaces
} # for i<=numurls
return(trim(outstring)) ; # Concatenation above prepends an extra space, so trim it
}
##### End makehtml #####
function parsecsv(rawfield,fieldarray, fieldnum,qflag,i,char)
{
#
# Note that parsecsv() does not currently handle embedded CRLF
# eg. "data","data<CRLF>data","data"
#
delete fieldarray ;
qflag = 0 ; # TRUE if inside field delimiters
fieldnum = 1 ;
if(!FIELD_SEPARATOR)
FIELD_SEPARATOR = "," ;
if(!FIELD_DELIMITER)
FIELD_DELIMITER = "\"" ;
for(i=1; i <= length(rawfield); i++)
{ char = substr(rawfield,i,1)
if (char == FIELD_DELIMITER)
{
if (substr(rawfield,i+1,1) == FIELD_DELIMITER) # check for two adjacent field delimiters; treat as one character
{
i++ # skip over the next character, and add the delimiter to the output string
fieldarray[fieldnum] = fieldarray[fieldnum] FIELD_DELIMITER
}
else
qflag = !qflag;
}
else if (char == FIELD_SEPARATOR)
{ if (qflag)
fieldarray[fieldnum] = fieldarray[fieldnum] FIELD_SEPARATOR ;
else
{
fieldnum++ ;
}
}
else
{ fieldarray[fieldnum] = fieldarray[fieldnum] char ;
}
}
return(fieldnum);
}
##### End of parsecsv() #####
function printcsv(field) # Determine whether to print a field with delimiters
{
if(!FIELD_SEPARATOR)
FIELD_SEPARATOR = "," ;
if(!FIELD_DELIMITER)
FIELD_DELIMITER = "\"" ;
# Double field delimiters to escape them
gsub(FIELD_DELIMITER,FIELD_DELIMITER FIELD_DELIMITER,field)
# Apply field delimiters when field separator is in field
if (field ~ FIELD_SEPARATOR)
field = FIELD_DELIMITER field FIELD_DELIMITER
return(field)
}
function valnameid(text, valtext) # Validate NAME and ID token names
{
valtext = text
gsub(/[^A-Za-z0-9\-_:\.]/, "_", valtext)
if(substr(valtext,1,1) !~ /[A-Za-z]/)
{
valtext = "a" valtext # Prepend an 'a'
}
return(valtext)
}
##### End of valnameid() #####
function printhtmlhead(title,noclosetag)
{
# print ("<!-- #####DEBUG##### title=" title " -->")
# print ("<!-- #####DEBUG##### noclosetag=" noclosetag " -->")
if (!QUOTE)
QUOTE = "\""
if ((ENVIRON["REQUEST_METHOD"]) && (!NOHTTP)) # If this is a CGI request and NOHTTP has not been specified
print("Content-type: text/html; charset=UTF-8\n") # ...then print HTTP header
### xml directives are not supported in HTML5
# print("<?xml version=" QUOTE "1.0" QUOTE " encoding=" QUOTE "utf-8" QUOTE "?>") ;
### Removed XHTML Strict --Bob 2022-05-06
# print("<!DOCTYPE html PUBLIC " QUOTE "-//W3C//DTD XHTML 1.0 Strict//EN" QUOTE ) ;
# print(" " QUOTE "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd" QUOTE ">") ;
# print("<html xmlns=" QUOTE "http://www.w3.org/1999/xhtml" QUOTE ) ;
# print(" xml:lang=" QUOTE "en" QUOTE ) ;
# print(" lang=" QUOTE "en" QUOTE ">") ;
### Added HTML5 header --Bob 2022-05-06
print("<!DOCTYPE html>")
print("<html lang=\"en\">")
print(" <head>") ;
print(" <meta http-equiv=" QUOTE "Content-Type" QUOTE ) ;
print(" content=" QUOTE "text/html;charset=utf-8" QUOTE " />") ;
print(" <link rel=" QUOTE "stylesheet" QUOTE ) ;
print(" href=" QUOTE "/default.css" QUOTE ) ;
print(" type=" QUOTE "text/css" QUOTE " />") ;
for(i in LINKREL)
print(" " LINKREL[i])
print(" <meta name=" QUOTE "generator" QUOTE ) ;
print(" content=" QUOTE "AWK tools for HTML by Bob Jonkman" QUOTE " />") ;
print(" <meta name=" QUOTE "author" QUOTE ) ;
print(" content=" QUOTE "Bob Jonkman bjonkman@sobac.com;" QUOTE " />") ;
print(" <meta name=" QUOTE "description" QUOTE ) ;
print(" content=" QUOTE "Datetime: " strftime("%FT%T") )
print(" " txt2html(title) QUOTE " />" )
print(" <title>" txt2html(title) "</title>") ;
if ("" == noclosetag) { # Suppress close tag if requested
print (" </head>")
}
}
##### End of printhtmlhead() #####
function parsecgi(inputstring,outputarray, querystring,paramstring,numparam)
{ delete querystring
delete outputarray
numparam = split(inputstring,querystring,"&")
for(i in querystring)
{
gsub(/\+/," ",querystring[i]) # remove + as space substitute
split(querystring[i],paramstring,"=")
# allow multi-value parameters, separate with FS
if (outputarray[uri2txt(paramstring[1])])
{
outputarray[uri2txt(paramstring[1])] = outputarray[uri2txt(paramstring[1])] FS uri2txt(paramstring[2]) ;
numparam-- ;
}
else
outputarray[uri2txt(paramstring[1])] = uri2txt(paramstring[2]) ;
}
return(numparam)
}
##### End of parsecgi() #####
function txt2gwapi(text)
{
gsub(/;/ ,"\\;" , text) # escape semicolon to backslash semicolon
gsub(/"/ , "\\\"", text) # escape unpaired double quote to backslash unpaired double quote
return(text)
}
##### End of txt2gwapi()
# Results of getheaders("Alpha,Beta,Gamma",namearray,numberarray)
# namearray["Alpha"] == 1
# namearray["Beta"] == 2
# namearray["Gamma"] == 3
# numberarray[1] == "Alpha"
# numberarray[2] == "Beta"
# numberarray[3] == "Gamma"
function getheaders(instring,namearray,numberarray, numfields,i) {
numfields = parsecsv(instring,numberarray)
for(i=1; i<=numfields; i++)
namearray[numberarray[i]] = i ;
return(numfields)
}
##### End of getheaders()
function printarray(arrayname, name, i)
{
if (isarray(arrayname)) {
print "Array:"
for (name in arrayname) {
if (isarray(arrayname[name])) {
print "\nBegin Subarray"
printarray(arrayname[name])
print "End Subarray\n"
} else {
print i++ " name=" name " arrayname[name]=" arrayname[name]
} # if(isarray(arrayname[name]))
} # for (name in arrayname)
} else { # not an array
print "Scalar: " arrayname
} # if(isarray(arrayname))
return i
}
##### End of printarray()
function gettitle(url)
{ return(txt2html(url)) # It's just a stub for now
}
# EOF: LIBRARY.AWK
← ↑ → makefile
Generated on Sun, 12 Nov 2023 05:13:31 -0500 from makefile
# Program : makefile
# Purpose : To re-create the files
# Author : Bob Jonkman bjonkman@sobac.com
# Date : 5 August 2022
.PHONY : all
all : csv2municipalcandidates.awk csv2municipalcandidates.html \
csv2candidates-search.awk \
csv2candidates.awk csv2candidates.html \
candidates.css \
rsync
### Municipal
csv2municipalcandidates.awk : csv2municipalcandidates-DEBUG.awk
grep -v "#####DEBUG#####" csv2municipalcandidates-DEBUG.awk > csv2municipalcandidates.awk
csv2municipalcandidates.html : csv2municipalcandidates.awk csv2candidates-search.awk makefile */makefile
/home/bjonkman/bin/awk/code2html.awk -v LINK=1 -v TITLE="CSV to Municipal Candidates" \
csv2municipalcandidates.awk \
csv2candidates-search.awk \
/home/bjonkman/bin/awk/library.awk \
makefile \
*/makefile > csv2municipalcandidates.html
### Federal/Provincial
csv2candidates.awk : csv2candidates-DEBUG.awk
grep -v "#####DEBUG#####" csv2candidates-DEBUG.awk > csv2candidates.awk
csv2candidates.html : csv2candidates.awk csv2candidates-search.awk makefile */makefile
/home/bjonkman/bin/awk/code2html.awk -v LINK=1 -v TITLE="CSV to Candidates (Federal or Provincial)" \
csv2candidates.awk \
csv2candidates-search.awk \
/home/bjonkman/bin/awk/library.awk \
makefile \
*/makefile > csv2candidates.html
### Everything
csv2candidates-search.awk : csv2candidates-search-DEBUG.awk
grep -v "#####DEBUG#####" csv2candidates-search-DEBUG.awk > csv2candidates-search.awk
.PHONY : rsync
rsync :
rsync -av csv2*candidates.html irving:/home/jonkman/public_html/Poliblog-Elections/ ; \
rsync -av candidates.css irving:/home/jonkman/public_html/
# EOF: makefile
← ↑ → 2022-10-24-Municipal-Election/makefile
Generated on Sun, 12 Nov 2023 05:13:31 -0500 from 2022-10-24-Municipal-Election/makefile
# File : makefile
# Purpose : csv2municipalcandidates - Re-run various scripts when updates are made
# Author : Bob Jonkman
# Date : 30 July 2022
.PHONY : all
all : 2022-Municipal.ods 2022-Municipal.csv index.html Municipality.html searchassistant.html rsync
2022-Municipal.ods : /home/bjonkman/mount/nextcloud/Documents/Websites/PoliBlog/2022-10-24-Municipal-Election/2022-Municipal.ods
cp -pv /home/bjonkman/mount/nextcloud/Documents/Websites/PoliBlog/2022-10-24-Municipal-Election/2022-Municipal.ods /home/bjonkman/Documents/Websites/PoliBlog/2022-10-24-Municipal-Election/
index.html : /home/bjonkman/Documents/Websites/PoliBlog/2022-10-24-Municipal-Election/2022-Municipal.csv /home/bjonkman/Documents/Websites/PoliBlog/csv2municipalcandidates.awk
/home/bjonkman/Documents/Websites/PoliBlog/csv2municipalcandidates.awk /home/bjonkman/Documents/Websites/PoliBlog/2022-10-24-Municipal-Election/2022-Municipal.csv > /home/bjonkman/Documents/Websites/PoliBlog/2022-10-24-Municipal-Election/index.html
2022-Municipal.csv : /home/bjonkman/Documents/Websites/PoliBlog/2022-10-24-Municipal-Election/2022-Municipal.ods
soffice --convert-to "csv" /home/bjonkman/Documents/Websites/PoliBlog/2022-10-24-Municipal-Election/2022-Municipal.ods
Municipality.html : /home/bjonkman/Documents/Websites/PoliBlog/2022-10-24-Municipal-Election/Municipality.csv
/home/bjonkman/bin/awk/csv2html.awk -v TITLE="Municipalities, URLs and Logos" /home/bjonkman/Documents/Websites/PoliBlog/2022-10-24-Municipal-Election/Municipality.csv > /home/bjonkman/Documents/Websites/PoliBlog/2022-10-24-Municipal-Election/Municipality.html
searchassistant.html : /home/bjonkman/Documents/Websites/PoliBlog/2022-10-24-Municipal-Election/2022-Municipal.csv /home/bjonkman/Documents/Websites/PoliBlog/csv2municipalcandidates-search.awk
/home/bjonkman/Documents/Websites/PoliBlog/csv2municipalcandidates-search.awk /home/bjonkman/Documents/Websites/PoliBlog/2022-10-24-Municipal-Election/2022-Municipal.csv > /home/bjonkman/Documents/Websites/PoliBlog/2022-10-24-Municipal-Election/searchassistant.html
.PHONY : rsync
rsync :
rsync -av --delete /home/bjonkman/Documents/Websites/PoliBlog/2022-10-24-Municipal-Election/ irving:/home/jonkman/public_html/Poliblog-Elections/2022-10-24-Municipal-Election/
rsync -av --delete /home/bjonkman/Documents/Websites/PoliBlog/images/ irving://home/jonkman/public_html/Poliblog-Elections/images/
# EOF: makefile
← ↑ → 2023-11-13-Cambridge-Ward01-Byelection/makefile
Generated on Sun, 12 Nov 2023 05:13:31 -0500 from 2023-11-13-Cambridge-Ward01-Byelection/makefile
# File : makefile
# Purpose : csv2municipalcandidates - Re-run various scripts when updates are made
# Author : Bob Jonkman
# Date : 30 July 2022
# Modified: 1 November 2023
.PHONY : all
all : candidates.ods candidates.csv index.html Municipalities.html searchassistant.html rsync
index.html : /home/bjonkman/Documents/Websites/PoliBlog/2023-11-13-Cambridge-Ward01-Byelection/candidates.csv /home/bjonkman/Documents/Websites/PoliBlog/csv2municipalcandidates.awk
/home/bjonkman/Documents/Websites/PoliBlog/csv2municipalcandidates.awk /home/bjonkman/Documents/Websites/PoliBlog/2023-11-13-Cambridge-Ward01-Byelection/candidates.csv > /home/bjonkman/Documents/Websites/PoliBlog/2023-11-13-Cambridge-Ward01-Byelection/index.html
candidates.csv : /home/bjonkman/Documents/Websites/PoliBlog/2023-11-13-Cambridge-Ward01-Byelection/candidates.ods
soffice --convert-to "csv" /home/bjonkman/Documents/Websites/PoliBlog/2023-11-13-Cambridge-Ward01-Byelection/candidates.ods
Municipalities.html : /home/bjonkman/Documents/Websites/PoliBlog/2023-11-13-Cambridge-Ward01-Byelection/Municipalities.csv
/home/bjonkman/bin/awk/csv2html.awk -v TITLE="Municipalities, URLs and Logos" /home/bjonkman/Documents/Websites/PoliBlog/2023-11-13-Cambridge-Ward01-Byelection/Municipalities.csv > /home/bjonkman/Documents/Websites/PoliBlog/2023-11-13-Cambridge-Ward01-Byelection/Municipalities.html
searchassistant.html : /home/bjonkman/Documents/Websites/PoliBlog/2023-11-13-Cambridge-Ward01-Byelection/candidates.csv /home/bjonkman/Documents/Websites/PoliBlog/csv2candidates-search.awk
/home/bjonkman/Documents/Websites/PoliBlog/csv2candidates-search.awk /home/bjonkman/Documents/Websites/PoliBlog/2023-11-13-Cambridge-Ward01-Byelection/candidates.csv > /home/bjonkman/Documents/Websites/PoliBlog/2023-11-13-Cambridge-Ward01-Byelection/searchassistant.html
.PHONY : rsync
rsync :
rsync -av --delete /home/bjonkman/Documents/Websites/PoliBlog/2023-11-13-Cambridge-Ward01-Byelection/ irving:/home/jonkman/public_html/Poliblog-Elections/2023-11-13-Cambridge-Ward01-Byelection/
rsync -av --delete /home/bjonkman/Documents/Websites/PoliBlog/images/ irving://home/jonkman/public_html/Poliblog-Elections/images/
# EOF: makefile
← ↑ → 2023-Provincial-Byelection-Kitchener/makefile
Generated on Sun, 12 Nov 2023 05:13:31 -0500 from 2023-Provincial-Byelection-Kitchener/makefile
# File : makefile
# Purpose : Update 2023 Kitchener Centre Byelection candidates - Re-run various scripts when updates are made
# Author : Bob Jonkman
# Date : 30 October 2023
.PHONY : all
all : candidates.ods candidates.csv index.html Parties-Ridings.html searchassistant.html rsync
# candidates.ods : /home/bjonkman/mount/nextcloud/Documents/Websites/PoliBlog/2023-Provincial-Byelection-Kitchener/candidates.ods
# cp -pv /home/bjonkman/mount/nextcloud/Documents/Websites/PoliBlog/2023-Provincial-Byelection-Kitchener/candidates.ods
index.html : candidates.csv ../csv2candidates.awk
../csv2candidates.awk candidates.csv > index.html
candidates.csv : candidates.ods
soffice --convert-to "csv" candidates.ods
Parties-Ridings.html : Parties.csv Ridings.csv ReservedParties.csv UnregisteredParties.csv
/home/bjonkman/bin/awk/csv2html.awk -v TITLE="Parties and Ridings" -v LINK=1 -v SORTABLE=1 Parties.csv Ridings.csv ReservedParties.csv UnregisteredParties.csv > Parties-Ridings.html
searchassistant.html : candidates.csv ../csv2candidates-search.awk
../csv2candidates-search.awk candidates.csv > searchassistant.html
.PHONY : rsync
rsync :
rsync -av --delete ./ irving:/home/jonkman/public_html/Poliblog-Elections/2023-Provincial-Byelection-Kitchener/
rsync -av --delete ../images/ irving:/home/jonkman/public_html/Poliblog-Elections/images/
# EOF: makefile
7 files processed.