← ↑ → csv2candidates.awk
Generated on Sat, 09 Dec 2023 03:59:33 -0500 from csv2candidates.awk
#! /bin/gawk -f
# Program : csv2candidates.awk
# Purpose : Generate copy'n'paste-able code for Poliblog's candidate lists
# Author : Bob Jonkman bjonkman@sobac.com
# Date : 30 April 2022
# Copyright (c) 2020 Bob Jonkman and/or SOBAC Microcomputer Services
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Usage : gawk -f csv2candidates.awk candidates.csv
# Fields : (fields with * will appear even when empty, social media fields only for those who have them)
# Static Fields: (update the variable in BEGIN block when this changes!)
# * 1 Name
# * 2 Riding
# 3 Party # \
# 4 PartyURL # > Party info labels appear only if there's party info, so this can be used for municipal elections too
# 5 PartyLogo # /
# * 6 Website
# * 7 Email
# * 8 Phone
# * 9 Address (single line monolithic, at the moment. May use sub-fields later)
# * 10 ImageURL
# 11 Incumbent
# * 12 RidingURL
# * 13 RidingLogo
#
# Dynamic Fields:
# "Twitter","Facebook","Instagram","YouTube","LinkedIn", &c.
# (social media URLs) field numbers may vary (will eventually be parsed for @Twitter and @instagram)
@include "library.awk"
function makeid(candidatenumber) {
return(valnameid(candidates[candidatenumber,"Name"] "-" candidates[candidatenumber,"Riding"] "-" candidates[candidatenumber,"Party"]))
}
BEGIN {
## Special Purpose fields are ignored in the candidates' list of resources
### Some are used for images, some for class names (CSS styling), some for page layout
skipAttributes["Name"]=1 # Used for header, ID
skipAttributes["Firstname"]=1 # Waterloo Region Votes needs separated FirstName, Lastname (we don't)
skipAttributes["Lastname"]=1 # Waterloo Region Votes needs separated FirstName, Lastname (we don't)
skipAttributes["Riding"]=1 # Used for header, ID
skipAttributes["RidingURL"]=1 # URL to Elections Ontario/Canada website
skipAttributes["RidingLogo"]=1 # URL to image
skipAttributes["RidingShortName"]=1 # Selector for Riding Name
skipAttributes["Party"]=1 # Party name
skipAttributes["PartyShortName"]=1 # Selector for Party name
skipAttributes["PartyLogo"]=1 # URL to image
skipAttributes["PartyURL"]=1 # URL to party website
skipAttributes["Office"]=1 # Used for grouping, ID (eg. Mayor, Councillor, Trustee)
skipAttributes["Elect"]=1 # Additional info for Office
skipAttributes["Municipality"]=1 # Used for grouping, ID
skipAttributes["MunicipalityURL"]=1 # URL to municipal website
skipAttributes["MunicipalityLogo"]=1 # URL to image
skipAttributes["ImageURL"]=1 # URL to candidate image
skipAttributes["Address"]=1 # Used for OpenStreetMap lookup
skipAttributes["HiddenNotes"]=1 # 'cos they're hidden, eh?
skipAttributes["Incumbent"]=1 # Class name (styling), indicator
skipAttributes["candidateclass"]=1 # Used internally to transfer styling from table to candidate list
### classarray to turn candidate attributes in to a class for styling
classarray["Incumbent"]=1 # for building the class= attribute
classarray["Withdrawn"]=1
classarray["Unregistered"]=1
classarray["Elected"]=1
### Fields to concatenate for search
concatfields["Name"]=1
concatfields["Office"]=1
concatfields["Municipality"]=1
concatfields["Riding"]=1
concatfields["Party"]=1
IFS = ","
OFS = " "
}
# Read the title and subtitle(s)
(NR == 1) {
parsecsv($0, h)
}
# Read field headers
(NR == 2) {
numfields = getheaders($0, headernums, headernames)
}
# Read rest of the file
(NR > 2) {
numfields = parsecsv($0, inputarray)
for (field = 1; field <= numfields; field++) {
candidates[NR, headernames[field]] = inputarray[field]
}
### Create an id attribute, same as candidates-search.awk
concatid = ""
for ( concatname in concatfields ) {
if ( "" != trim(candidates[NR, concatname]) ) {
concatid = concatid candidates[NR, concatname] "-"
}
}
candidates[NR, "concatid"] = valnameid(concatid) ### ensure this string is a valid id attribute
### Build candidateclass for styling in table and name
candidateclass = ""
for ( classflag in classarray ) {
if ( "" != trim(candidates[NR, classflag]) ) {
candidateclass = candidateclass " " classflag
}
}
candidates[NR, "candidateclass"] = tolower(candidateclass)
### Build list of ridings
if ( "" != trim(candidates[NR, "Riding"]) ) {
riding = candidates[NR, "Riding"]
ridings[riding]++
ridingurls[riding] = candidates[NR, "RidingURL"]
ridinglogos[riding] = candidates[NR, "RidingLogo"]
} # if ("" != Riding)
### Build list of parties
if ( "" != trim(candidates[NR, "Party"]) ) {
parties[candidates[NR,"Party"]]++ # Build an array of parties
partyurls[candidates[NR,"Party"]] = candidates[NR, "PartyURL"]
ridingsxparties[riding, candidates[NR,"Party"]] = candidates[NR,"Name"] # Build an array of Ridings x Parties
} # if ("" != Party)
}
END {
printhtmlhead(h[1] " - " h[2], "noclose")
##### Instead of "noclose" use
print (" <link rel=\"stylesheet\" href=\"../candidates.css\" type=\"text/css\">")
print (" <link rel=\"shortcut icon\" href=\"https://poliblog.jonkman.ca/blogs/pollywog-32x32b.jpg\" type=\"image/x-icon\">")
### Close <head> ("noclose")
print ("</head>")
print ("<!--")
print ("This information is provided as-is. Data is public information and not subject to copyright.")
print ("Images and logos are copyright and trademarked by their respective owners.")
print ("They are used under \"Fair Dealing\" as news reporting.")
print ("")
print ("Other material on this page is Copyright (c) " strftime("%Y") " by Bob Jonkman")
print ("and released under a Creative Commons Attribution-only Share-Alike license: http://creativecommons.org/licenses/by-sa/4.0/")
print (" -->")
print ("<body>")
print ("<div id=\"top_of_page\" class=\"header\">")
print (" <!--#include virtual=\"header.ssi\" -->")
print ("</div> <!-- class=header -->")
for (i = 1; i <= 6; i++) {
if ("" != trim(h[i])) {
print ("<h" i ">" h[i] "</h" i ">")
}
}
# Get ridings and parties in alphabetical order
numridings = asorti(ridings)
numparties = asorti(parties)
# Create table of ridings and parties
# ########################################## #
# #
# This is the new way of creating a table #
# Ridings x Party #
# (because the number of ridings will #
# be more consistent than the number #
# of parties) #
# #
# ########################################## #
print (" <table")
print (" class=\"candidates\"")
print (">")
print (" <thead>")
print (" <tr>")
print (" <th> </th>") # for the Parties column
### Print column headers
for( riding=1; riding<=numridings ; riding++) {
print (" <th><a class=\"internal\" href=\"#" valnameid(ridings[riding]) "\" title=\"Jump to " ridings[riding] "\">" ridings[riding] "</a></th>")
}
print " </tr>"
print " </thead>"
print " <tbody>"
for ( party=1; party<=numparties ; party++ ) {
print " <tr>"
if ( "" == partyurls[parties[party]] ) {
print (" <th>" parties[party] "</th>")
} else {
### The PartyURL field may contain multiple URLs, use only the first one
split(partyurls[parties[party]],partyurlsplit," ")
print (" <th><a href=\"" partyurlsplit[1] "\" title=\"" partyurls[parties[party]] "\">" parties[party] "</a></th>")
} # if ("" = partyurl)
for ( riding in ridings ) {
print " <td>"
for (candidate = 3 ; candidate <= NR ; candidate++ ) {
if ("" != trim(candidates[candidate, "Name"]) ) { # skip blank candidates
if ( (parties[party] == candidates[candidate, "Party"] ) && \
(ridings[riding] == candidates[candidate, "Riding"]) ) {
print "<a href=\"#" candidates[candidate, "concatid"] "\" title=\"" candidates[candidate, "Name"] "\" class=\"internal " candidates[candidate, "candidateclass"] "\">" candidates[candidate, "Name"]
for ( classflag in classarray ) {
if ( "" != trim(candidates[candidate, classflag]) ) { # check for non-empty field, contents may not be == classflag
print " (" classflag ")"
} # if (candidate, classarray)
} # for (classflag)
print "</a><br>"
} # if (Party, Riding)
} # if ( "" != candidate )
} # for (candidate)
print " </td>"
} # for (riding)
print " </tr>"
print ""
} # for (party)
print "</tbody>"
print "</table>"
print ""
# ########################################## #
# #
# #
# End of table Ridings x Party #
# #
# #
# ########################################## #
# Print the list of candidates
for (i=1 ; i <= numridings ; i++) { # generate page menu
riding=ridings[i]
print (" <h3 id=\"" valnameid(riding) "\"><a href=\"#top_of_page\" title=\"Go to the top of this page\" class=\"internal\"> ^ </a> " riding)
if ("" != trim(ridingurls[riding]) ) {
print (" <a href=\"" ridingurls[riding] "\" title=\"Information about " riding "\">")
if ("" != trim(ridinglogos[riding]) ) {
print (" <img class=\"icon\" src=\"" ridinglogos[riding] "\" alt=\"Riding logo\"> Info")
} else {
print ("Information about this riding")
}
print (" </a>")
}
print (" </h3>")
print (" <dl>")
for (candidate = 2; candidate <= NR; candidate++) {
if (candidates[candidate, "Riding"] != riding) {
# Only print the candidates in this riding
continue # with next candidate
}
if (candidates[candidate, "Name"] == "") {
# skip blank records
continue # with next candidate
}
### print("<!--<a href=\"https://www4.elections.on.ca/internetapp/nominationcontests.aspx\" title=\"Elections Ontario: Nomination Contest Details\"><span style=\"color:#FECE41; background:white; font-variant:small-caps; height:2ex; vertical-align:middle;\">N</span></a> -->") ;
print ("")
print (" <dt id=\"" candidates[candidate, "concatid"] "\" class=\"h-card\">")
print (" <a href=\"#top_of_page\" title=\"Go to the top of this page\" class=\"internal\"> ^ </a> ")
print "<span class=\"" candidates[candidate, "candidateclass"] "\">" candidates[candidate, "Name"]
for ( classflag in classarray ) {
if ( "" != trim(candidates[candidate, classflag]) ) { # check for non-empty field, contents may not be == classflag
print " <strong>(" classflag ")</strong>"
} # if (candidate, classarray)
} # for (classflag)
print "</span>"
print (" <a href=\"#" candidates[candidate, "concatid"] "\" title=\"Anchor link for " candidates[candidate, "Party"] " candidate in " riding "\">⚓</a>")
print "<a href=\"searchassistant.html#" candidates[candidate, "concatid"] "\" title=\"Find this candidate on the Search Assistant\" class=\"right\">🔎</a>"
### print("<!-- provides a link to Elections Ontario candidate list -->") ;
### print("<!-- <a href=\"https://www4.elections.on.ca/internetapp/nominationcontests.aspx\" title=\"Elections Ontario: Nomination Contest Details\"><span style=\"color:#FECE41; background:gray; font-variant:small-caps; height:2ex; vertical-align:middle;\">N</span></a> -->") ;
print (" </dt>")
print (" <dd class=\"h-card\">")
print (" <div class=\"aside\">")
if ("" != trim(candidates[candidate, "ImageURL"]) ) {
if ("" != trim(candidates[candidate, "Website"]) ) {
### Ensure we only get the first URL when there are multiples
split(candidates[candidate, "Website"],urls," ")
print ("<a href=\"" urls[1] "\" title=\"" gettitle(urls[1]) "\">")
}
print (" <img src=\"" candidates[candidate, "ImageURL"] "\" alt=\"" candidates[candidate, "Name"] "\">")
if ("" != trim(candidates[candidate, "Website"]) ) {
print ("</a>")
}
} else {
print (" <p>No Image</p>")
}
print (" </div> <!-- class=aside -->")
print (" <ul style=\"list-style-type:none;\">")
if ("" != trim(candidates[candidate, "Party"]) ) {
print (" <li class=\"p-org\"><b>Party</b>: ") # Field label is optional, for municipal elections without party affiliation
if ("" != trim(candidates[candidate, "PartyURL"]) ) {
### Ensure we only get the first URL when there are multiples
split(candidates[candidate, "PartyURL"],urls," ")
print (" <a href=\"" urls[1] "\" title=\"" gettitle(candidates[candidate, "PartyURL"]) "\">" candidates[candidate, "Party"] " ")
if ("" != trim(candidates[candidate, "PartyLogo"]) ) {
print (" <img src=\"" candidates[candidate, "PartyLogo"] "\" alt=\"Logo for " candidates[candidate, "Party"] "\">")
} # if ("PartyLogo")
print (" </a>")
} else { # if no ("PartyURL")
print (candidates[candidate, "Party"] )
} # endif ("PartyURL")
print (" </li>")
} # if ("Party")
# Add social media fields
for (field = 1; field <= numfields; field++) {
if ( headernames[field] in skipAttributes) { # if Attribute is in skipAttributes
continue # don't list static attributes
} # if (Attribute ~ skipAttributes)
if ("" != trim(candidates[candidate, headernames[field]]) ) { # Skip empty fields
print (" <li><b>" headernames[field] "</b>: " makehtml(candidates[candidate, headernames[field]]) "</li>")
} # empty field
} # for (field <= numfields)
# Postal address at the end (and if not empty)
if ("" != trim(candidates[candidate, "Address"]) ) {
### Automatically fetch OSM link?
### print("<!--  <a class=\"map\" href=\"https://www.openstreetmap.org/search?query=\" title=\"OpenStreetMap:\">Map</a> -->") ;
print (" <li class=\"h-adr\"><b>Postal Address</b>:")
print (" <p style=\"margin-left:2em;\">")
### If we parse out address info
### print("<!-- <span class=\"p-street-address\">XXXXXSTREETADDRESS</span>,<br><span class=\"p-locality\">XXXXXLOCALITY</span>, <span class=\"p-region\">Ontario</span>,<br><span class=\"p-country\">Canada</span> <span class=\"p-postal-code\">XXXXXPOSTALCODE</span> -->") ;
### If we DON'T parse out address info (monolithic addresses)
print (" <span class=\"p-address\">" candidates[candidate, "Address"] "</span> <a class=\"map\" href=\"https://www.openstreetmap.org/search?query=" txt2uri(candidates[candidate, "Address"]) "\" title=\"OpenStreetMap: " candidates[candidate, "Address"] "\">Map</a>")
print (" </p>")
print (" </li>")
} # address field not empty
print ("")
print (" </ul>")
print (" <br style=\"clear:both;\">") # Ensure images don't extend beyond <dl> box
print (" </dd>")
print ("")
} # for candidate in candidates
print (" </dl>")
print ("")
} # for riding in ridings
print ("<div class=\"footer\">")
print (" <!--#include virtual=\"footer.ssi\" -->")
print (" <p>Source code for csv2candidates generator: <a href=\"../csv2candidates.html\" title=\"Code Files - csv2candidates.awk and library.awk\">csv2candidates.awk and library.awk</a></p>")
print (" <p><a property=\"dct:title\" rel=\"cc:attributionURL\" href=\"http://poliblog.jonkman.ca/Poliblog-Elections/\">List of Candidates</a> by <a rel=\"cc:attributionURL dct:creator\" property=\"cc:attributionName\" href=\"http://poliblog.jonkman.ca/blogs/about/\">Bob Jonkman</a> is licensed under <a href=\"http://creativecommons.org/licenses/by-sa/4.0/?ref=chooser-v1\" target=\"_blank\" rel=\"license noopener noreferrer\" style=\"display:inline-block;\">CC BY-SA 4.0<img alt=\"\" style=\"height:22px!important;margin-left:3px;vertical-align:text-bottom;\" src=\"https://mirrors.creativecommons.org/presskit/icons/cc.svg?ref=chooser-v1\"><img alt=\"\" style=\"height:22px!important;margin-left:3px;vertical-align:text-bottom;\" src=\"https://mirrors.creativecommons.org/presskit/icons/by.svg?ref=chooser-v1\"><img alt=\"\" style=\"height:22px!important;margin-left:3px;vertical-align:text-bottom;\" src=\"https://mirrors.creativecommons.org/presskit/icons/sa.svg?ref=chooser-v1\"></a></p>")
print (" <p class=\"bottom\">This page was last updated <!--#config timefmt=\"%a, %d %b %Y %T %z\" --><!--#echo var=\"LAST_MODIFIED\"--> </p>")
print ("</div> <!-- class=footer -->")
print ("</body>")
print ("</html>")
print ("")
} # END
# EOF: csv2candidates.awk
← ↑ → csv2municipalcandidates.awk
Generated on Sat, 09 Dec 2023 03:59:33 -0500 from csv2municipalcandidates.awk
#! /bin/gawk -f
# Program : csv2municipalcandidates.awk
# Purpose : Generate copy'n'paste-able code for Poliblog's candidate lists for municipal elections
# Author : Bob Jonkman bjonkman@sobac.com
# Date : 2 July 2022
# Based on : csv2municipalcandidates.awk (for provincial and federal elections with Offices)
# Copyright (c) 2020 Bob Jonkman and/or SOBAC Microcomputer Services
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Usage : gawk -f csv2municipalcandidates.awk candidates.csv
# Records : 1st line: Fields are headers <h1> to <h6>
# : 2nd line: Field header names
# : 3rd line to end: Data
# Fields : Fieldnames with * are required, cannot be empty. Other fields appear only if not empty
# Fieldnames with ! are used to group entries and are used for layout. Fieldnames with ! must exist, fields may be empty. Itemized in variable skipAttributes, used to skip in attribute lists
# Fields are referenced by header name, the second line MUST be a field header line
#
# Static Fields (must exist, if blank the entry is ignored):
# 1 Name
# 2 Office
# 3 Municipality
# Address (single line monolithic, at the moment. May use sub-fields later)
# Website
# E mail
# Phone
# "Twitter","Facebook","Instagram","YouTube","LinkedIn", &c.
# (social media URLs) field numbers may vary (will eventually be parsed for @Twitter and @instagram)
@include "library.awk"
function makeid(candidatenumber)
{
return (valnameid(Candidates[candidatenumber]["Name"] "-" Candidates[candidatenumber]["Municipality"] "-" Candidates[candidatenumber]["Office"]))
}
BEGIN {
# IFS = ","
# OFS = " "
# IGNORECASE = 1 # for sorting
# staticfields = 7
# Municipalities[0][0]="" # Initialize 2D array
## Special Purpose fields are ignored in the candidates' list of resources
### Some are used for images, some for class names (CSS styling), some for page layout
skipAttributes["Name"]=1 # Used for header, ID
skipAttributes["Office"]=1 # Used for grouping, ID
skipAttributes["Elect"]=1 # Additional info for Office
skipAttributes["Municipality"]=1 # Used for grouping, ID
skipAttributes["MunicipalityURL"]=1 # URL to municipal website
skipAttributes["MunicipalityLogo"]=1 # URL to image
skipAttributes["ImageURL"]=1 # URL to image
skipAttributes["Address"]=1 # Used for OpenStreetMap lookup
skipAttributes["Incumbent"]=1 # Class name (styling), indicator
classarray["Incumbent"]=1 # for building the class= attribute
# skipAttributes["Withdrawn"]=1 ### Changed to include reason for withdrawal, eg. "Deceased"
classarray["Withdrawn"]=1
skipAttributes["Unregistered"]=1 # Class name (styling), indicator
classarray["Unregistered"]=1
# skipAttributes["Elected"]=1 # Class name (styling), indicator ### Modified 2022-10-24 to show "Elected" or "Acclaimed"
classarray["Elected"]=1
skipAttributes["candidateclass"]=1 # Used internally to transfer styling from table to candidate list
} # BEGIN
# Read the title and subtitle(s)
(NR == 1) {
parsecsv($0, h)
} # (NR == 1)
# Read field headers
(NR == 2) {
numfields = getheaders($0, headernums, headernames)
} # (NR == 2)
# Read rest of the file
(NR > 2) {
numfields = parsecsv($0, inputarray)
## Move input line to candidates record
for (field = 1; field <= numfields; field++) {
Candidates[NR][headernames[field]] = inputarray[field]
}
## Populate Municipalities arrays
if ("" != Candidates[NR]["Name"]) {
Municipalities[Candidates[NR]["Municipality"]]++
MunicipalitiesURL[Candidates[NR]["Municipality"]] = Candidates[NR]["MunicipalityURL"]
MunicipalitiesLogo[Candidates[NR]["Municipality"]] = Candidates[NR]["MunicipalityLogo"]
## Populate Offices arrays
Offices[Candidates[NR]["Office"]]++
Elect[Candidates[NR]["Office"],Candidates[NR]["Municipality"]] = Candidates[NR]["Elect"] # Build an array of Offices, use "Elect" for later use
CandidatesInOffice[Candidates[NR]["Office"],Candidates[NR]["Municipality"]]++
} # if ("" != Candidates[NR]["Name"]
}
# (NR > 2)
END {
#
#
#
printhtmlhead(h[1] " - " h[2], "noclose")
print (" <link rel=\"stylesheet\" href=\"../candidates.css\" type=\"text/css\" />")
print ("</head>")
print ("<!--")
print ("This information is provided as-is. Data is public information and not subject to copyright.")
print ("Images and logos are copyright and trademarked by their respective owners.")
print ("Other material on this page is Copyright (c) " strftime("%Y") " by Bob Jonkman")
print ("and released under a Creative Commons Attribution-only Share-Alike license: https://creativecommons.org/licenses/by-sa/4.0/")
print ("-->")
print ("<body>")
print ("<div id=\"top_of_page\" class=\"header\">")
print (" <!--#include virtual=\"header.ssi\" -->")
print ("</div> <!-- class=header -->")
for (i = 1; i <= 6; i++) {
if ("" != h[i]) {
print ("<h" i ">" makehtml(h[i]) "</h" i ">")
}
}
# Get Municipalities and Offices in alphabetical order
numMunicipalities = asorti(Municipalities)
numOffices = asorti(Offices)
# Create table of Municipalities and Offices
print (" <table")
print (" class=\"candidates\"")
print (">")
print (" <thead>")
print (" <tr>")
print (" <th> </th>")
for (Municipality = 1; Municipality <= numMunicipalities; Municipality++) {
if ("" != Municipalities[Municipality]) {
print (" <th><a class=\"internal\" href=\"#" valnameid(Municipalities[Municipality]) "\" title=\"Jump to " Municipalities[Municipality] "\">" Municipalities[Municipality] "</a></th>")
} # if ("" == Municipalities[])
} # for Municipality in Municipalities[]
print (" </tr>")
print (" </thead>")
print (" <tbody>")
for (Office = 1; Office <= numOffices; Office++) {
if ("" != Offices[Office]) {
print (" <tr>")
print (" <th>" Offices[Office] "</th>")
# list candidates for Municipality here
for (Municipality = 1; Municipality <= numMunicipalities; Municipality++) {
if ("" != Municipalities[Municipality]) {
print " <td>"
for (Candidate in Candidates) {
if ((Candidates[Candidate]["Municipality"] == Municipalities[Municipality]) && (Candidates[Candidate]["Office"] == Offices[Office])) {
candidateclass = " " # clear previous value, space to provide separator between existing class string
# Construct a class for this candidate (used for styling)
for (classflag in classarray) {
if ("" != Candidates[Candidate][classflag]) {
candidateclass = candidateclass " " tolower(classflag)
}
}
Candidates[Candidate]["candidateclass"] = candidateclass # save for use in list of candidates
print "<a href=\"#" makeid(Candidate) "\" title=\"" Candidates[Candidate]["Name"] "\" class=\"internal " candidateclass " \" >" Candidates[Candidate]["Name"]
if (Candidates[Candidate]["Incumbent"]) {
print " (I)"
}
if (Candidates[Candidate]["Unregistered"]) {
print " (Unregistered)"
}
if (Candidates[Candidate]["Withdrawn"]) {
print " (" Candidates[Candidate]["Withdrawn"] ")"
}
if (Candidates[Candidate]["Elected"]) {
print " (" Candidates[Candidate]["Elected"] ")"
} print "</a><br />"
} # if (candidate matches)
} # for (Candidate in Candidates)
print (" </td>")
} # if ("" != Municipalities[])
} # for (Municipality in Municipalities)
print (" </tr>")
} # if ("" != Offices[Office]["Name"])
} # for (Office in Offices)
print (" </tbody>")
print (" </table>")
# Create the list of candidates
for (Municipality = 1; Municipality <= numMunicipalities; Municipality++) {
if ("" != Municipalities[Municipality]) {
print (" <h3 id=\"" valnameid(Municipalities[Municipality]) "\"><a href=\"#top_of_page\" title=\"Go to the top of this page\" class=\"internal navaid\"> ^ </a> " Municipalities[Municipality])
if ("" != MunicipalitiesURL[Municipalities[Municipality]]) {
print (" <a href=\"" MunicipalitiesURL[Municipalities[Municipality]] "\" title=\"Information about " Municipalities[Municipality] "\">")
if ("" != MunicipalitiesLogo[Municipalities[Municipality]]) {
print (" <img class=\"icon\" src=\"" MunicipalitiesLogo[Municipalities[Municipality]] "\" alt=\"Municipality logo\" /> Info")
} else {
print ("Information about " Municipalities[Municipality] "")
} # if (Municpalities logo exists)
print (" </a>")
} # if (Municipalities URL exists)
print (" </h3>")
### Construct an array of Candidates in Office so we avoid printing Offices with no candidates (or Offices that don't apply to a Muncipality)
# for ( Candidate in Candidate ) {
# if ( Candidates[Candidate]
for (Office = 1; Office <= numOffices; Office++) {
if ( CandidatesInOffice[Offices[Office],Municipalities[Municipality]] ) { # If there is at least one candidate running for this office
print "<h4 id=\"" valnameid(Offices[Office] "-" Municipalities[Municipality]) "\">"
print "<a href=\"#top_of_page\" title=\"Go to the top of this page\" class=\"internal navaid\"> ^ </a> "
print " <a href=\"#" valnameid(Municipalities[Municipality]) "\" title=\"Up to the start of " Municipalities[Municipality] "\" class=\"internal navaid\"> ⇑ </a> "
print Offices[Office] " - " Municipalities[Municipality]
if ( Elect[Offices[Office],Municipalities[Municipality]] > 0 ) {
print " : Elect " Elect[Offices[Office],Municipalities[Municipality]]
} # if ( Elect > 0 )
print "</h4>"
print "<dl>"
for (Candidate in Candidates) {
if (("" != Candidates[Candidate]["Name"]) && (Candidates[Candidate]["Municipality"] == Municipalities[Municipality]) && (Candidates[Candidate]["Office"] == Offices[Office])) {
print " <dt id=\"" makeid(Candidate) "\" class=\"h-card\">"
print "<a href=\"#top_of_page\" title=\"Go to the top of this page\" class=\"internal navaid\"> ^ </a> "
print "<a href=\"#" valnameid(Municipalities[Municipality]) "\" title=\"Up to the start of " Municipalities[Municipality] "\" class=\"internal navaid\"> ⇑ </a> "
print "<a href=\"#" valnameid(Offices[Office] "-" Municipalities[Municipality]) "\" title=\"Up to " Offices[Office] " - " Municipalities[Municipality] "\" class=\"internal navaid\" > ↑ </a> "
print "<span class=\"" Candidates[Candidate]["candidateclass"] "\">" Candidates[Candidate]["Name"]
if (Candidates[Candidate]["Unregistered"]) {
print " <strong>(Unregistered)</strong>"
}
if (Candidates[Candidate]["Incumbent"]) {
print " <strong>(Incumbent)</strong>"
}
if (Candidates[Candidate]["Elected"]) {
print " <strong>(" Candidates[Candidate]["Elected"] ")</strong>"
}
if (Candidates[Candidate]["Withdrawn"]) {
print " <strong>(" Candidates[Candidate]["Withdrawn"] ")</strong>"
}
print "</span>"
print (" <a href=\"#" makeid(Candidate) "\" title=\"Anchor link for " Candidates[Candidate]["Office"] " candidate in " Municipalities[Municipality] " \" class=\"internal\">⚓</a>")
print "<a href=\"searchassistant.html#" makeid(Candidate) "\" title=\"Find this candidate on the Search Assistant\" class=\"right\">🔎</a>"
print " </dt>"
print (" <dd class=\"h-card\">")
print (" <div class=\"aside\">")
if ("" != Candidates[Candidate]["ImageURL"]) {
if ("" != Candidates[Candidate]["Website"]) {
### Ensure we only get the first URL when there are multiples
split(Candidates[Candidate]["Website"], urls, " ")
print ("<a href=\"" urls[1] "\" title=\"" gettitle(urls[1]) "\">")
} # if ("" != Candidates[Website])
print (" <img src=\"" Candidates[Candidate]["ImageURL"] "\" alt=\"" Candidates[Candidate]["Name"] "\" />")
if ("" != Candidates[Candidate]["Website"]) {
print ("</a>")
} # if ( "" != Candidates[Website])
} else { # if ("" != Candidates[ImageURL])
print (" <p>No Image</p>")
} # if ("" != Candidates[ImageURL])
print (" </div> <!-- class=aside -->")
print (" <ul>")
### Print candidatae attributes
for (Attribute in Candidates[Candidate]) {
if (Attribute in skipAttributes) { # if Attribute is in skipAttributes
continue # don't list static attributes
} # if (Attribute ~ skipAttributes)
if ("" != Candidates[Candidate][Attribute]) {
print " <li><b>" Attribute "</b>: " makehtml(Candidates[Candidate][Attribute]) "</li>"
} # if ("" != Candidates[Attribute])
} # (for Attribute in Candidates)
if ("" != Candidates[Candidate]["Address"]) {
### Automatically fetch OSM link
print (" <li class=\"h-adr\"><b>Postal Address</b>:")
### If we parse out address info
### print (" <p style=\"margin-left:2em;\">")
### print("<!-- <span class=\"p-street-address\">XXXXXSTREETADDRESS</span>,<br /><span class=\"p-locality\">XXXXXLOCALITY</span>, <span class=\"p-region\">Ontario</span>,<br /><span class=\"p-country\">Canada</span> <span class=\"p-postal-code\">XXXXXPOSTALCODE</span> -->") ;
### print (" </p>")
### If we DON'T parse out address info (monolithic addresses)
print (" <span class=\"p-address\">" Candidates[Candidate]["Address"] "</span> <a class=\"map\" href=\"https://www.openstreetmap.org/search?query=" txt2uri(Candidates[Candidate]["Address"]) "\" title=\"OpenStreetMap: " Candidates[Candidate]["Address"] "\">Map</a>")
print (" </li>")
}
print " </ul>"
print (" <br />") # Ensure images don't extend beyond <dl> box
print (" </dd>")
print ("")
} # if ("" != Candidates[Name])
} # for candidate in candidates
print (" </dl>")
print ("")
} # if ("" != Offices[])
} # if (CandidatesInOffice > 0 )
} # if ("" != Municipalities[[])
} # for (Municipality in Municipalities)
print ("<div class=\"footer\">")
print (" <!--#include virtual=\"footer.ssi\" -->")
print (" <p class=\"bottom\">This page was last updated <span class=\"datetime\" title=\"" strftime("%FT%T%z") "\">" strftime("%A, %e %B %Y at %l:%M%P %Z") "</span></p>")
print ("</div> <!-- class=footer -->")
print ("</body>")
print ("</html>")
print ("")
} # END
# EOF: csv2municipalcandidates.awk
← ↑ → csv2candidates-search.awk
Generated on Sat, 09 Dec 2023 03:59:33 -0500 from csv2candidates-search.awk
#! /bin/gawk -f
# Program : csv2municipalcandidates-search.awk
# Purpose : Generate a table with links to simplify searching for missing data
# Author : Bob Jonkman bjonkman@sobac.com
# Date : 15 August 2022
# Based on : csv2municipalcandidates.awk
# Modified : 14 Nov 2023 - Added "Party" and "Riding" to id and search term
# 23 Nov 2023 - Create columns for all services we know
# Copyright (c) 2020-2022 Bob Jonkman and/or SOBAC Microcomputer Services
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Usage : gawk -f csv2municipalcandidates-search.awk candidates.csv
# Records : 1st line: Fields are headers <h1> to <h6>
# : 2nd line: Field header names
# : 3rd line to end: Data
# Fields : Fieldnames with * are required, cannot be empty.
# Other fields appear only if not empty
# Fieldnames with ! are used to group entries and are used for layout.
# Fieldnames with ! must exist, fields may be empty. Itemized in
# variable skipAttributes, used to skip in attribute lists
# Fields are referenced by header name,
# the second line MUST be a field header line
#
# Static Fields (must exist, if blank the entry is ignored):
# 1 Name
# Optional Static Fields (field must exist, but may be blank)
# Address (single line monolithic, at the moment. May use sub-fields later)
# Website
# E mail
# Phone
# Dynamic Fields (field may or may not exist, and may be blank)
# "Twitter","Facebook","Instagram","YouTube","LinkedIn", &c.
# (social media URLs) field numbers may vary (will eventually be parsed for @Twitter and @instagram)
@include "library.awk"
function makeid(candidatenumber, concatfieldname,concatid)
{
concatid=""
for ( concatfieldname in concatfields ) {
if ( "" != inputarray[headernums[concatfieldname]] ) {
}
}
return (valnameid(concatid))
}
function printURL(URL,LinkText,Class)
{
print "<a href=\"" URL "\" title=\"Search: " LinkText "\" class=\"" Class "\" target=\"_blank\">" LinkText "</a>"
}
function printsearch(URL, LinkText)
{ if ( "" == inputarray[headernums[LinkText]] )
Class="red"
else
Class="green"
print "<td>"
printURL(URL,LinkText,Class)
print "</td>"
}
BEGIN {
# IFS = ","
# OFS = " "
# IGNORE case = 1 # for sorting
# staticfields = 7
# Municipalities[0][0]="" # Initialize 2D array
## Special Purpose fields are ignored in the candidates' list of resources
### Some are used for images, some for class names (CSS styling), some for page layout
# skipAttributes["Name"]=1 # Used for header, ID
skipAttributes["Office"]=1 # Used for grouping, ID
skipAttributes["Elect"]=1 # Additional info for Office
skipAttributes["Municipality"]=1 # Used for grouping, ID
skipAttributes["MunicipalityURL"]=1 # URL to municipal website
skipAttributes["MunicipalityLogo"]=1 # URL to image
skipAttributes["Riding"]=1 # Riding name
skipAttributes["RidingURL"]=1 # URL to Elections Canada/Ontario website
skipAttributes["RidingLogo"]=1 # URL to Elections Canada/Ontario logo
skipAttributes["PartyShortName"]=1 # selector in VLOOKUP
skipAttributes["Party"]=1 # Party Name
skipAttributes["PartyURL"]=1 # URL to Party URL
skipAttributes["PartyLogo"]=1 # URL to Party logo
skipAttributes["ImageURL"]=1 # URL to candidate image
skipAttributes["Incumbent"]=1 # Class name (styling), indicator
skipAttributes["Unregistered"]=1 # Class name (styling), indicator
skipAttributes["Elected"]=1 # Class name (styling), indicator
skipAttributes["Withdrawn"]=1
skipAttributes["Address"]=1 # Used for OpenStreetMap lookup
### Things to not search for
skipAttributes["Website"]=1 # (maybe a general websearch, later)
skipAttributes["Notes"]=1
skipAttributes["Calendar"]=1
skipAttributes["Votes"]=1
skipAttributes["HiddenNotes"]=1
### Fields to concatenate for search
concatfields["Name"]=1
concatfields["Office"]=1
concatfields["Municipality"]=1
concatfields["Riding"]=1
concatfields["Party"]=1
# Write the headers and table start up front
Title = "Search for Missing Candidate Data"
LINKREL[1]="<link rel=\"stylesheet\" href=\"../candidates.css\" type=\"text/css\">"
LINKREL[2]="<link rel=\"shortcut icon\" href=\"https://poliblog.jonkman.ca/blogs/pollywog-32x32b.jpg\" type=\"image/x-icon\">"
printhtmlhead(Title)
print "<body>"q
print " <h1>" Title "</h1>"
print " <p>Filename: " txt2html(ARGV[1]) "<br>"
print " Date: "
system("stat -c %y " ARGV[1] )
print "<br>Generated: " strftime("%FT%T") "</p>"
print " <table borders=\"1\" caption=\"" Title "\">"
print " <thead>"
print " <tr>"
print " <td>Google Name, Riding, Party</td>"
print " <td>Phone</td>"
print " <td>Email</td>"
print " <td>Twitter</td>"
print " <td>Facebook</td>"
print " <td>Instagram</td>"
print " <td>LinkedIn</td>"
print " <td>YouTube</td>"
print " <td>Reddit</td>"
print " <td>Pinterest</td>"
print " <td>Flickr</td>"
print " <td>Tumblr</td>"
print " <td>MySpace</td>"
print " <td>TikTok</td>"
print " <td>Twitch</td>"
print " <td>Snapchat</td>"
print " <td>Wikipedia</td>"
# (add others as they're discovered)
print " </tr>"
print " </thead>"
print " <tbody>"
} # BEGIN
# Read the title and subtitle(s)
(NR == 1) { # ignore the headers <h1> .. <h6>
} # (NR ==1 )
# Read the column headers
(NR == 2) {
numfields = getheaders($0, headernums, headernames)
}
# Read rest of the file
(NR > 2) {
numfields = parsecsv($0, inputarray)
# If name is blank then advance to next record
if ( "" == inputarray[headernums["Name"]] ) {
next
}
print " <tr>"
### No need to step through fields, we only create links for services we know about
### General search (Google, for now)
searchfieldsquote = ""
searchfieldsbr = ""
searchfieldsdash = ""
for (concatfieldname in concatfields ) {
if ( "" != inputarray[headernums[concatfieldname]] ) {
searchfieldsquote = searchfieldsquote "%22" inputarray[headernums[concatfieldname]] "%22 "
searchfieldsbr = searchfieldsbr inputarray[headernums[concatfieldname]] "<br>"
searchfieldsdash = searchfieldsdash inputarray[headernums[concatfieldname]] "-"
} # if ("" != inputarray)
} # for (concatfieldname)
print " <td id=\"" valnameid(searchfieldsdash) "\">"
print "<a href=\"index.html#" valnameid(searchfieldsdash) "\" title=\"Main entry for " searchfieldsdash " \" class=\"right\">⊕</a>"
print "<a href=\"https://www.google.com/search?q=" searchfieldsquote "\" title=\"Google Search\" target=\"_blank\" >" searchfieldsbr "</a>"
print "<a href=\"#" valnameid(searchfieldsdash) "\" title=\"Search Assistant Anchor for " searchfieldsdash "\" class=\"right internal\">⚓</a>"
if (inputarray[headernums["Website"]]) {
numWebs = split(inputarray[headernums["Website"]],Webs," ")
for (num in Webs) {
print "<a href=\"" Webs[num] "\" title=\"Go to: " Webs[num] "\" class=\"right\" target=\"_blank\">𝕎</a> "
} # for (num in Webs)
} # if (inputarray)
print "</td>"
### Phone Number
print "<td class=\"phone\">"
numPhones = split(inputarray[headernums["Phone"]],Phones," ")
for (num in Phones) {
Phone = substr(Phones[num],index(Phones[num],"-")+1)
printURL("https://www.google.com/search?q=" Phone, Phone)
print "<br>"
} # for (num in Phones)
print "</td>"
### E-mail
print "<td class=\"email\">"
numEmails = split(inputarray[headernums["Email"]],Emails," ")
for (num in Emails) {
print "<a href=\"https://www.google.com/search?q=" txt2uri("\"" Emails[num] "\"") "\" title=\"Search: " Emails[num] "\" target=\"_blank\">𝕄</a> "
} # for (num in Emails)
print "</td>"
### Social Media sites
printsearch("https://twitter.com/search?src=typed_query&f=user&q=" txt2uri(inputarray[headernums["Name"]]), "Twitter")
printsearch("https://www.facebook.com/search/top/?q=" txt2uri(inputarray[headernums["Name"]]), "Facebook")
printsearch("https://www.instagram.com/explore/search/keyword/?q=" txt2uri(inputarray[headernums["Name"]]), "Instagram")
printsearch("https://linkedin.com/search/results/people/?keywords=" txt2uri(inputarray[headernums["Name"]]), "LinkedIn")
printsearch("https://www.youtube.com/results?sp=EgIQAg%253D%253D&search_query=" txt2uri(inputarray[headernums["Name"]]), "YouTube")
printsearch("https://www.reddit.com/search/?type=user&q=" txt2uri(inputarray[headernums["Name"]]), "Reddit")
printsearch("https://www.pinterest.ca/search/pins/?q=" txt2uri(inputarray[headernums["Name"]]), "Pinterest")
printsearch("https://www.flickr.com/search/people/?username=" txt2uri(inputarray[headernums["Name"]]), "Flickr")
printsearch("https://www.tumblr.com/search/" txt2uri(inputarray[headernums["Name"]]), "Tumblr")
printsearch("https://myspace.com/search/people?q=" txt2uri(inputarray[headernums["Name"]]), "MySpace")
printsearch("https://www.tiktok.com/search/user?q=" txt2uri(inputarray[headernums["Name"]]), "TikTok")
printsearch("https://www.twitch.tv/search?term=" txt2uri(inputarray[headernums["Name"]]), "Twitch")
printsearch("https://www.snapchat.com/explore/" txt2uri(gensub(" ","-","G",inputarray[headernums["Name"]])) "/profiles", "Snapchat")
printsearch("https://wikipedia.org/w/index.php?search=" txt2uri(inputarray[headernums["Name"]]), "Wikipedia")
print " </tr>"
} # (NR > 2)
END { print " </tbody>"
print " </table>"
print " <div class=\"footer\">"
print (" <!--#include virtual=\"footer.ssi\" -->")
print (" <p class=\"bottom\">This page was last updated <span class=\"datetime\" title=\"" strftime("%FT%T%z") "\">" strftime("%A, %e %B %Y at %l:%M%P %Z") "</span></p>")
print (" </div> <!-- class=footer -->")
print ("</body>")
print ("</html>")
print ("")
}
# EOF: csv2candidates-search.awk
← ↑ → /home/bjonkman/bin/awk/library.awk
Generated on Sat, 09 Dec 2023 03:59:33 -0500 from /home/bjonkman/bin/awk/library.awk
# Program : LIBRARY.AWK
# Purpose : Contains functions common to many AWK scripts
# Author : Bob Jonkman <bjonkman@sobac.com>
# Copyright 2008 Bob Jonkman and/or SOBAC Microcomputer Services
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Date : 19 July 2005
# Contents :
# rstring() - Returns the right-most n characters of string
# max() - Maximum of items in list
# min() - Minimum of items in list
# trim() - Trim whitespace from both ends of a string
# ltrim() - Trim whitespace from beginning (left side) of a string
# rtrim() - Trim whitespace from end (right side) of a string
# gwid2smtp() - escape reserved SMTP characters in GW Object
# see http://www.novell.com/documentation/gw55/index.html?page=/documentation/gw55/gw55ia/data/a30135u.html#a30135u
# smtp2gwid() - un-escape
# reverse() - reverse order of delimited string, eg. a.b.c -> c_b_a
# txt2html() - escape reserved HTML characters
# txt2xml() - escape reserved XML characters
# html2txt() - restore reserved HTML characters
# txt2uri() - escape reserved URI characters
# uri2txt() - restore reserved URI characters
# makehtml() - Create href links (http:// file:// mailto:) from text (maito: was microformatted with class="vcard" on 2006-08-04)
# parsecsv() - Split Comma-Quote fields in string into an array
# printcsv() - Determine whether to print a field with delimiters
# valnameid() - Validate Name and ID token names (see http://www.w3.org/TR/html4/types.html#type-id )
# printhtmlhead() - print headers for an HTML file
# parsecgi() - Parse QUERY_STRING or POST data into an array
# txt2gwapi() - escape reserved characters in GW API files
# (see pg 42,43 "Keyword Ordering Requirements and Delimiters" in GroupWise API Gateway documentation)
# getheaders() - Create reverse lookup array for input string
# printarray() - Print all elements of an array (for debugging, mostly) Added 2022-05-01
# gettitle() - Get a title attribute from web page (stub added 2022-05-01)
function rstring(string,n) { # Returns the right-most n characters of string
return(substr(string,length(string)-n+1))
}
function max(a,b) { return(( a > b ) ? a : b)
}
function min(a,b) { return(( a < b ) ? a : b)
}
function trim(string) {
string = rtrim(string) ;
string = ltrim(string) ;
return(string) ;
}
##### End trim #####
# trim left-side whitespace
function ltrim(string) { gsub(/^[ \t]+/, "", string)
return(string)
}
##### End ltrim #####
# trim right-side whitespace
function rtrim(string) { gsub(/[ \t]+$/, "", string)
return(string)
}
##### End rtrim #####
function gwid2smtp(text, smtptext) # Perform GroupWise character translation
# http://www.novell.com/documentation/gw55/index.html?page=/documentation/gw55/gw55ia/data/a30135u.html#a30135u
{
smtptext = text ;
gsub(/#/ ,"#h#",smtptext) ;
gsub(/_/ ,"#u#",smtptext) ;
gsub(/ / ,"_" ,smtptext) ;
gsub(/\(/ ,"#l#",smtptext) ;
gsub(/)/ ,"#r#",smtptext) ;
gsub(/,/ ,"#m#",smtptext) ;
gsub(/:/ ,"#c#",smtptext) ;
gsub(/\\/ ,"#b#",smtptext) ;
gsub(/=/ ,"#e#",smtptext) ;
gsub(/\// ,"#s#",smtptext) ;
return(smtptext) ;
}
##### End gwid2smpt #####
function smtp2gwid(text, gwidtext)
{
gwidtext = text ;
gsub(/#s#/,"/" ,gwidtext);
gsub(/#e#/,"=" ,gwidtext);
gsub(/#b#/,"\\",gwidtext);
gsub(/#c#/,":" ,gwidtext);
gsub(/#m#/,"," ,gwidtext);
gsub(/#r#/,")" ,gwidtext);
gsub(/#l#/,"(" ,gwidtext);
gsub(/_/ ," " ,gwidtext);
gsub(/#u#/,"_" ,gwidtext);
gsub(/#h#/,"#" ,gwidtext);
return(gwidtext) ;
}
##### End smtp2gwid #####
function reverse(instring,inseparator,outseparator, numelements,array,i,outstring)
{
numelements = split(instring, array, inseparator);
outstring = array[numelements];
for (i=numelements-1; i>0; i--)
outstring = outstring outseparator array[i];
return outstring;
}
##### End reverse #####
function txt2html(text, htmltext)
{
htmltext = text ;
gsub(/&/ , "\\&" ,htmltext)
gsub(/>/ , "\\>" ,htmltext)
gsub(/</ , "\\<" ,htmltext)
gsub(/"/ , "\\"" ,htmltext)
gsub(/'/ , "\\'" ,htmltext) # ' or tick appears to be invalid in XML UTF-8 documents, substitute numeric entity
gsub(/\x91/, "\\‘",htmltext) # ‘ left-single-quote or backtick becomes tick
gsub(/\x92/, "\\’",htmltext) # ’ right-single-quote becomes tick
gsub(/\x93/, "\\“",htmltext) # “ left-double-quote becomes double-quote
gsub(/\x94/, "\\”",htmltext) # ” right-double-quote becomes double-quote
gsub(/\x95/, "\\&ndash" ,htmltext) # – N-dash becomes hyphen (for XML)
gsub(/\x96/, "\\&mdash" ,htmltext) # — EM Dash becomes two hyphens
# print("\n##### DEBUG ##### txt2html: text=" text "; htmltext=" htmltext "\n")
##### DEBUG ##### gsub(/[^a-zA-Z0-9 \!\#\$\%\&\(\)\*\+\,\-\.\/\:\;\=\?\@\[\\\]\^\_\`\{\|\}\~\n\t]/,"",htmltext) # remove all other non-ascii characters
return(htmltext) ;
}
##### End txt2html #####
function txt2xml(text, xmltext) # converts plain text or HTML to XML
{
xmltext = text ;
gsub(/&/ , "\\&" ,xmltext)
gsub(/>/ , "\\>" ,xmltext)
gsub(/</ , "\\<" ,xmltext)
gsub(/"/ , "\\"" ,xmltext)
gsub(/'/ , "\\'" ,xmltext) # ' or tick appears to be invalid in XML UTF-8 documents, substitute numeric entity
gsub(/\x91/ , "\\'" ,xmltext) # left-single-quote or backtick becomes tick
gsub(/‘/, "\\'" ,xmltext) # ‘ left-single-quote becomes tick
gsub(/\x92/ , "\\'" ,xmltext) # ’ right-single-quote becomes tick
gsub(/’/, "\\'" ,xmltext) # right-single-quote becomes tick
gsub(/\x93/ , "\\"" ,xmltext) # “ left-double-quote becomes double-quote
gsub(/“/, "\\"" ,xmltext) # left-double-quote becomes double-quote
gsub(/\x94/ , "\\"" ,xmltext) # ” right-double-quote becomes double-quote
gsub(/”/, "\\"" ,xmltext) # right-double-quote becomes double-quote
gsub(/\x95/ , "-" ,xmltext) # – N-dash becomes hyphen (for XML)
gsub(/–/, "-" ,xmltext) # – N-dash becomes hyphen (for XML)
gsub(/--/ , "\\+\\+" ,xmltext) # two hyphens are invalid inside comments <!-- -->
gsub(/\x96/ , "\\+\\+" ,xmltext) # — EM Dash becomes two hyphens
gsub(/—/, "\\+\\+" ,xmltext) # — EM Dash becomes two hyphens
##### DEBUG ##### gsub(/[^a-zA-Z0-9 \!\#\$\%\&\(\)\*\+\,\-\.\/\:\;\=\?\@\[\\\]\^\_\`\{\|\}\~\n\t]/,"",xmltext) # remove all other non-ascii characters
return(xmltext) ;
}
function html2txt(htmltext, text)
{
text = htmltext ;
gsub(/–/ , "-" , text)
gsub(/–/ , "-" , text)
gsub(/—/ , "--" , text)
gsub(/—/ , "--" , text)
gsub(/‘/ , "'" , text) # left-single-quote value
gsub(/‘/ , "'" , text) # left-single-quote entity
gsub(/’/ , "'" , text) # right-single-quote value
gsub(/’/ , "'" , text) # right-single-quote entity
gsub(/“/ , "\"" , text) # left-double-quote value
gsub(/“/ , "\"" , text) # left-double-quote entity
gsub(/”/ , "\"" , text) # right-double-quote value
gsub(/”/ , "\"" , text) # right-double-quote entity
gsub(/'/ , "'" , text) # apostrophe or tick '
gsub(/'/ , "'" , text) # apostrophe entity
gsub(/"/ , "\"" , text)
gsub(/ / , " " , text)
gsub(/</ , "<" , text)
gsub(/>/ , ">" , text)
gsub(/&/ , "\\&" , text)
return(text) ;
}
##### End html2txt #####
function txt2uri(text, uritext) # see RFC3986 (STD0066) section 2.2
{
uritext = text ;
gsub(/%/ ,"%25",uritext)
gsub(/ / ,"%20",uritext)
gsub(/:/ ,"%3A",uritext)
gsub(/\// ,"%2F",uritext)
gsub(/\?/ ,"%3F",uritext)
gsub(/#/ ,"%23",uritext)
gsub(/\[/ ,"%5B",uritext)
gsub(/]/ ,"%5D",uritext)
gsub(/@/ ,"%40",uritext)
gsub(/!/ ,"%21",uritext)
gsub(/\$/ ,"%24",uritext)
gsub(/&/ ,"%26",uritext)
gsub(/'/ ,"%27",uritext)
gsub(/\(/ ,"%28",uritext)
gsub(/)/ ,"%29",uritext)
gsub(/\*/ ,"%2A",uritext)
gsub(/\+/ ,"%2B",uritext)
gsub(/,/ ,"%2C",uritext)
gsub(/;/ ,"%3B",uritext)
gsub(/=/ ,"%3D",uritext)
gsub(/\|/ ,"%7C",uritext) # Not part of RFC3986 (I think)
gsub(/~/ ,"%7E",uritext) # Not part of RFC3986 (I think)
gsub(/\^/ ,"%5E",uritext) # Not part of RFC3986 (I think)
gsub(/`/ ,"%60",uritext) # Not part of RFC3986 (I think)
gsub(/\{/ ,"%7B",uritext) # Not part of RFC3986 (I think)
gsub(/\}/ ,"%7D",uritext) # Not part of RFC3986 (I think)
gsub(/"/ ,"%22",uritext) # Not part of RFC3986 (I think)
gsub(/</ ,"%3C",uritext) # Not part of RFC3986 (I think)
gsub(/>/ ,"%3E",uritext) # Not part of RFC3986 (I think)
gsub(/\\/ ,"%5C",uritext) # Not part of RFC3986 (I think)
return(uritext) ;
}
##### End txt2uri #####
function uri2txt(uri, text)
{
text = uri
gsub(/%20/," " ,text)
gsub(/%3A/,":" ,text)
gsub(/%2F/,"/" ,text)
gsub(/%3F/,"?" ,text)
gsub(/%23/,"#" ,text)
gsub(/%5B/,"[" ,text)
gsub(/%5D/,"]" ,text)
gsub(/%40/,"@" ,text)
gsub(/%21/,"!" ,text)
gsub(/%24/,"$" ,text)
gsub(/%26/,"\\&" ,text) # unescaped '&' would indicate "replacement text"
gsub(/%27/,"'" ,text)
gsub(/%28/,"(" ,text)
gsub(/%29/,")" ,text)
gsub(/%2A/,"*" ,text)
gsub(/%2B/,"+" ,text)
gsub(/%2C/,"," ,text)
gsub(/%3B/,";" ,text)
gsub(/%3D/,"=" ,text)
gsub(/%7C/,"|" ,text)
gsub(/%7E/,"~" ,text)
gsub(/%5E/,"^" ,text)
gsub(/%60/,"`" ,text)
gsub(/%7B/,"{" ,text)
gsub(/%7D/,"}" ,text)
gsub(/%22/,"\"" ,text)
gsub(/%3C/,"<" ,text)
gsub(/%3E/,">" ,text)
gsub(/%0D%0A/,"\\n" ,text)
gsub(/%5C/,"\\" ,text) # not part of RFC3986 ?
gsub(/%25/,"%" ,text)
return(text)
}
function makehtml(string, numurls,i,htmlstring,path,gwiduri,outstring)
{
numurls = split(string, urls, " ") #split string into urls on space. If a URL is meant to include a space then the input string must substitute '+'
for (i=1; i<=numurls; i++)
{
if(substr(urls[i],1,2) == "\\\\")
{
path = urls[i] ;
gsub(/\\\\/,"/",path) ;
htmlstring = "<a href=\"file:///" txt2uri(path) "\">" txt2html(urls[i]) "</a>" ;
}
else if(tolower(substr(urls[i],1,7)) == "http://") # If the string starts with http:// then assume it is already txt2uri converted
htmlstring = "<a href=\"http://" substr(urls[i],8) "\" title=\"" gettitle(urls[i]) "\">" txt2html(urls[i]) "</a>"
else if(tolower(substr(urls[i],1,8)) == "https://") # If the string starts with https:// then assume it is already txt2uri converted
htmlstring = "<a href=\"https://" substr(urls[i],9) "\" title=\"" gettitle(urls[i]) "\">" txt2html(urls[i]) "</a>"
else if(tolower(substr(urls[i],1,2)) == "//") # If the string starts with // then assume it is already txt2uri converted
htmlstring = "<a href=\"//" substr(urls[i],3) "\" title=\"" gettitle(urls[i]) "\">" txt2html(urls[i]) "</a>"
else if(atpos=index(urls[i], "@"))
{
htmlstring = "<span class=\"vcard\"><a class=\"email\" href=\"mailto:" txt2uri(urls[i]) "\" title=\"E-mail to " txt2html(urls[i]) "\">" txt2html(urls[i]) "</a>" ; # E-mail address is not cleaned with txt2uri() so @ stays as symbol. But txt2uri() may be necessary!
# if ((substr(urls[i],atpos+1) == "sobac.com") && !NOEGUIDE)
# { gwiduri = txt2uri(smtp2gwid(substr(urls[i],1,atpos-1))) ;
# htmlstring = htmlstring " <a class=\"url\" href=\"http://eguide.sobac.com/eGuide/servlet/eGuide?Action=Detail.get&User.dn=cn%3d" gwiduri "%2cou%3dStaff%2co%3dsobac&Directory.uid=Staff\" title=\"Look up " gwiduri " in eGuide Staff Container\" target=\"ldap\">S</a>"
# htmlstring = htmlstring " <a class=\"url\" href=\"http://eguide.sobac.com/eGuide/servlet/eGuide?Action=Detail.get&User.dn=cn%3d" gwiduri "%2cou%3dNon%2dStaff%2co%3dsobac&Directory.uid=Non_Staff\" title=\"Look up " gwiduri " in eGuide Non-Staff Container\" target=\"ldap\">N</a>"
# }
htmlstring = htmlstring "</span>"
}
else if( (tolower(substr(urls[i],1,4)) == "tel:" ) || (substr(urls[i],1,1) == "+") )
{
if (tolower(substr(urls[i],1,4)) == "tel:" )
{
# print ("<!-- #####DEBUG##### i=" i " ; urls[i]=" urls[i] " ; tolower(substr(urls[i],1,4))=" tolower(substr(urls[i],1,4)) " -->")
urls[i] = substr(urls[i],5) # strip out "tel:"
}
htmlstring = "<span class=\"vcard h-card\"><a class=\"p-tel\" href=\"tel:" urls[i] "\" title=\"Phone this number\">" urls[i] "</a></span>" ;
}
else htmlstring = txt2html(urls[i]) ;
outstring = outstring " " htmlstring # Concatenate to outstring separated by spaces
} # for i<=numurls
return(trim(outstring)) ; # Concatenation above prepends an extra space, so trim it
}
##### End makehtml #####
function parsecsv(rawfield,fieldarray, fieldnum,qflag,i,char)
{
#
# Note that parsecsv() does not currently handle embedded CRLF
# eg. "data","data<CRLF>data","data"
#
delete fieldarray ;
qflag = 0 ; # TRUE if inside field delimiters
fieldnum = 1 ;
if(!FIELD_SEPARATOR)
FIELD_SEPARATOR = "," ;
if(!FIELD_DELIMITER)
FIELD_DELIMITER = "\"" ;
for(i=1; i <= length(rawfield); i++)
{ char = substr(rawfield,i,1)
if (char == FIELD_DELIMITER)
{
if (substr(rawfield,i+1,1) == FIELD_DELIMITER) # check for two adjacent field delimiters; treat as one character
{
i++ # skip over the next character, and add the delimiter to the output string
fieldarray[fieldnum] = fieldarray[fieldnum] FIELD_DELIMITER
}
else
qflag = !qflag;
}
else if (char == FIELD_SEPARATOR)
{ if (qflag)
fieldarray[fieldnum] = fieldarray[fieldnum] FIELD_SEPARATOR ;
else
{
fieldnum++ ;
}
}
else
{ fieldarray[fieldnum] = fieldarray[fieldnum] char ;
}
}
return(fieldnum);
}
##### End of parsecsv() #####
function printcsv(field) # Determine whether to print a field with delimiters
{
if(!FIELD_SEPARATOR)
FIELD_SEPARATOR = "," ;
if(!FIELD_DELIMITER)
FIELD_DELIMITER = "\"" ;
# Double field delimiters to escape them
gsub(FIELD_DELIMITER,FIELD_DELIMITER FIELD_DELIMITER,field)
# Apply field delimiters when field separator is in field
if (field ~ FIELD_SEPARATOR)
field = FIELD_DELIMITER field FIELD_DELIMITER
return(field)
}
function valnameid(text, valtext) # Validate NAME and ID token names
{
valtext = text
gsub(/[^A-Za-z0-9\-_:\.]/, "_", valtext)
if(substr(valtext,1,1) !~ /[A-Za-z]/)
{
valtext = "a" valtext # Prepend an 'a'
}
return(valtext)
}
##### End of valnameid() #####
function printhtmlhead(title,noclosetag)
{
# print ("<!-- #####DEBUG##### title=" title " -->")
# print ("<!-- #####DEBUG##### noclosetag=" noclosetag " -->")
if (!QUOTE)
QUOTE = "\""
if ((ENVIRON["REQUEST_METHOD"]) && (!NOHTTP)) # If this is a CGI request and NOHTTP has not been specified
print("Content-type: text/html; charset=UTF-8\n") # ...then print HTTP header
### xml directives are not supported in HTML5
# print("<?xml version=" QUOTE "1.0" QUOTE " encoding=" QUOTE "utf-8" QUOTE "?>") ;
### Removed XHTML Strict --Bob 2022-05-06
# print("<!DOCTYPE html PUBLIC " QUOTE "-//W3C//DTD XHTML 1.0 Strict//EN" QUOTE ) ;
# print(" " QUOTE "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd" QUOTE ">") ;
# print("<html xmlns=" QUOTE "http://www.w3.org/1999/xhtml" QUOTE ) ;
# print(" xml:lang=" QUOTE "en" QUOTE ) ;
# print(" lang=" QUOTE "en" QUOTE ">") ;
### Added HTML5 header --Bob 2022-05-06
print("<!DOCTYPE html>")
print("<html lang=\"en\">")
print(" <head>") ;
print(" <meta http-equiv=" QUOTE "Content-Type" QUOTE ) ;
print(" content=" QUOTE "text/html;charset=utf-8" QUOTE ">") ;
print(" <link rel=" QUOTE "stylesheet" QUOTE ) ;
print(" href=" QUOTE "/default.css" QUOTE ) ;
print(" type=" QUOTE "text/css" QUOTE ">") ;
for(i in LINKREL)
print(" " LINKREL[i])
print(" <meta name=" QUOTE "generator" QUOTE ) ;
print(" content=" QUOTE "AWK tools for HTML by Bob Jonkman" QUOTE ">") ;
print(" <meta name=" QUOTE "author" QUOTE ) ;
print(" content=" QUOTE "Bob Jonkman bjonkman@sobac.com;" QUOTE ">") ;
print(" <meta name=" QUOTE "description" QUOTE ) ;
print(" content=" QUOTE "Datetime: " strftime("%FT%T") )
print(" " txt2html(title) QUOTE ">" )
print(" <title>" txt2html(title) "</title>") ;
if ("" == noclosetag) { # Suppress close tag if requested
print (" </head>")
}
}
##### End of printhtmlhead() #####
function parsecgi(inputstring,outputarray, querystring,paramstring,numparam)
{ delete querystring
delete outputarray
numparam = split(inputstring,querystring,"&")
for(i in querystring)
{
gsub(/\+/," ",querystring[i]) # remove + as space substitute
split(querystring[i],paramstring,"=")
# allow multi-value parameters, separate with FS
if (outputarray[uri2txt(paramstring[1])])
{
outputarray[uri2txt(paramstring[1])] = outputarray[uri2txt(paramstring[1])] FS uri2txt(paramstring[2]) ;
numparam-- ;
}
else
outputarray[uri2txt(paramstring[1])] = uri2txt(paramstring[2]) ;
}
return(numparam)
}
##### End of parsecgi() #####
function txt2gwapi(text)
{
gsub(/;/ ,"\\;" , text) # escape semicolon to backslash semicolon
gsub(/"/ , "\\\"", text) # escape unpaired double quote to backslash unpaired double quote
return(text)
}
##### End of txt2gwapi()
# Results of getheaders("Alpha,Beta,Gamma",namearray,numberarray)
# namearray["Alpha"] == 1
# namearray["Beta"] == 2
# namearray["Gamma"] == 3
# numberarray[1] == "Alpha"
# numberarray[2] == "Beta"
# numberarray[3] == "Gamma"
function getheaders(instring,namearray,numberarray, numfields,i) {
numfields = parsecsv(instring,numberarray)
for(i=1; i<=numfields; i++)
namearray[numberarray[i]] = i ;
return(numfields)
}
##### End of getheaders()
function printarray(arrayname, name, i)
{
if (isarray(arrayname)) {
print "Array:"
for (name in arrayname) {
if (isarray(arrayname[name])) {
print "\nBegin Subarray"
printarray(arrayname[name])
print "End Subarray\n"
} else {
print i++ " name=" name " arrayname[name]=" arrayname[name]
} # if(isarray(arrayname[name]))
} # for (name in arrayname)
} else { # not an array
print "Scalar: " arrayname
} # if(isarray(arrayname))
return i
}
##### End of printarray()
function gettitle(url)
{ return(txt2html(url)) # It's just a stub for now
}
# EOF: LIBRARY.AWK
← ↑ → makefile
Generated on Sat, 09 Dec 2023 03:59:33 -0500 from makefile
# Program : makefile
# Purpose : To re-create the files
# Author : Bob Jonkman bjonkman@sobac.com
# Date : 5 August 2022
.PHONY : all
all : csv2municipalcandidates.awk csv2municipalcandidates.html \
csv2candidates-search.awk \
csv2candidates.awk csv2candidates.html \
candidates.css \
rsync
csv2municipalcandidates.awk : csv2municipalcandidates-DEBUG.awk
grep -v "#####DEBUG#####" csv2municipalcandidates-DEBUG.awk > csv2municipalcandidates.awk
csv2candidates.awk : csv2candidates-DEBUG.awk
grep -v "#####DEBUG#####" csv2candidates-DEBUG.awk > csv2candidates.awk
csv2candidates.html : csv2candidates.awk csv2candidates-search.awk makefile */makefile
/home/bjonkman/bin/awk/code2html.awk -v LINK=1 -v TITLE="CSV to Candidates" \
csv2candidates.awk \
csv2municipalcandidates.awk \
csv2candidates-search.awk \
/home/bjonkman/bin/awk/library.awk \
makefile \
*/makefile > csv2candidates.html
### Everything
csv2candidates-search.awk : csv2candidates-search-DEBUG.awk
grep -v "#####DEBUG#####" csv2candidates-search-DEBUG.awk > csv2candidates-search.awk
.PHONY : rsync
rsync :
rsync -av --no-recursive --dirs --delete * irving:/home/jonkman/public_html/Poliblog-Elections/
# EOF: makefile
← ↑ → 2019-10-29-Federal-Election/makefile
Generated on Sat, 09 Dec 2023 03:59:33 -0500 from 2019-10-29-Federal-Election/makefile
# File : makefile
# Purpose : Update 2019 Federal election candidates - Re-run various scripts when updates are made
# Author : Bob Jonkman
# Date : 20 November 2023
.PHONY : all
all : candidates.ods candidates.csv index.html Parties-Ridings.html searchassistant.html rsync
# candidates.ods : /home/bjonkman/mount/nextcloud/Documents/Websites/PoliBlog/2019-10-29-Federal-Election/candidates.ods
# cp -pv /home/bjonkman/mount/nextcloud/Documents/Websites/PoliBlog/2019-10-29-Federal-Election/candidates.ods
index.html : candidates.csv ../csv2candidates.awk
../csv2candidates.awk candidates.csv > index.html
candidates.csv : candidates.ods
soffice --convert-to "csv" candidates.ods
# Parties-Ridings.html : Parties.csv Ridings.csv ReservedParties.csv UnregisteredParties.csv
Parties-Ridings.html : Parties.csv Ridings.csv
# /home/bjonkman/bin/awk/csv2html.awk -v TITLE="Parties and Ridings" -v LINK=1 -v SORTABLE=1 Parties.csv Ridings.csv ReservedParties.csv UnregisteredParties.csv > Parties-Ridings.html
/home/bjonkman/bin/awk/csv2html.awk -v TITLE="Parties and Ridings" -v LINK=1 -v SORTABLE=1 Parties.csv Ridings.csv > Parties-Ridings.html
searchassistant.html : candidates.csv ../csv2candidates-search.awk
../csv2candidates-search.awk candidates.csv > searchassistant.html
.PHONY : rsync
rsync :
rsync -av --delete ./ irving:/home/jonkman/public_html/Poliblog-Elections/2019-10-29-Federal-Election/
rsync -av --delete ../images/ irving:/home/jonkman/public_html/Poliblog-Elections/images/
# EOF: makefile
← ↑ → 2021-09-20-Federal-Election/makefile
Generated on Sat, 09 Dec 2023 03:59:33 -0500 from 2021-09-20-Federal-Election/makefile
# File : makefile
# Purpose : Update 2021 Federal election candidates - Re-run various scripts when updates are made
# Author : Bob Jonkman
# Date : 20 November 2023
.PHONY : all
all : candidates.ods candidates.csv index.html Parties-Ridings.html searchassistant.html rsync
index.html : candidates.csv ../csv2candidates.awk
../csv2candidates.awk candidates.csv > index.html
candidates.csv : candidates.ods
soffice --convert-to "csv" candidates.ods
# Parties-Ridings.html : Parties.csv Ridings.csv ReservedParties.csv UnregisteredParties.csv
Parties-Ridings.html : Parties.csv
# /home/bjonkman/bin/awk/csv2html.awk -v TITLE="Parties and Ridings" -v LINK=1 -v SORTABLE=1 Parties.csv Ridings.csv ReservedParties.csv UnregisteredParties.csv > Parties-Ridings.html
/home/bjonkman/bin/awk/csv2html.awk -v TITLE="Parties and Ridings" -v LINK=1 -v SORTABLE=1 Parties.csv > Parties-Ridings.html
searchassistant.html : candidates.csv ../csv2candidates-search.awk
../csv2candidates-search.awk candidates.csv > searchassistant.html
.PHONY : rsync
rsync :
rsync -av --delete ./ irving:/home/jonkman/public_html/Poliblog-Elections/2021-09-20-Federal-Election/
rsync -av --delete ../images/ irving:/home/jonkman/public_html/Poliblog-Elections/images/
# EOF: makefile
← ↑ → 2022-06-02-Provincial-Election/makefile
Generated on Sat, 09 Dec 2023 03:59:33 -0500 from 2022-06-02-Provincial-Election/makefile
# File : makefile
# Purpose : Update 2022 Provincial election candidates - Re-run various scripts when updates are made
# Author : Bob Jonkman
# Date : 20 November 2023
.PHONY : all
all : candidates.ods candidates.csv index.html Parties-Ridings.html searchassistant.html rsync
# candidates.ods : /home/bjonkman/mount/nextcloud/Documents/Websites/PoliBlog/2022-06-02-Provincial-Election/candidates.ods
# cp -pv /home/bjonkman/mount/nextcloud/Documents/Websites/PoliBlog/2022-06-02-Provincial-Election/candidates.ods
index.html : candidates.csv ../csv2candidates.awk
../csv2candidates.awk candidates.csv > index.html
candidates.csv : candidates.ods
soffice --convert-to "csv" candidates.ods
# Parties-Ridings.html : Parties.csv Ridings.csv ReservedParties.csv UnregisteredParties.csv
Parties-Ridings.html : Parties.csv Ridings.csv
# /home/bjonkman/bin/awk/csv2html.awk -v TITLE="Parties and Ridings" -v LINK=1 -v SORTABLE=1 Parties.csv Ridings.csv ReservedParties.csv UnregisteredParties.csv > Parties-Ridings.html
/home/bjonkman/bin/awk/csv2html.awk -v TITLE="Parties and Ridings" -v LINK=1 -v SORTABLE=1 Parties.csv Ridings.csv > Parties-Ridings.html
searchassistant.html : candidates.csv ../csv2candidates-search.awk
../csv2candidates-search.awk candidates.csv > searchassistant.html
.PHONY : rsync
rsync :
rsync -av --delete ./ irving:/home/jonkman/public_html/Poliblog-Elections/2022-06-02-Provincial-Election/
rsync -av --delete ../images/ irving:/home/jonkman/public_html/Poliblog-Elections/images/
# EOF: makefile
← ↑ → 2022-10-24-Municipal-Election/makefile
Generated on Sat, 09 Dec 2023 03:59:33 -0500 from 2022-10-24-Municipal-Election/makefile
# File : makefile
# Purpose : csv2municipalcandidates - Re-run various scripts when updates are made
# Author : Bob Jonkman
# Date : 30 July 2022
.PHONY : all
all : 2022-Municipal.ods 2022-Municipal.csv index.html Municipality.html searchassistant.html rsync
2022-Municipal.ods : /home/bjonkman/mount/nextcloud/Documents/Websites/PoliBlog/2022-10-24-Municipal-Election/2022-Municipal.ods
cp -pv /home/bjonkman/mount/nextcloud/Documents/Websites/PoliBlog/2022-10-24-Municipal-Election/2022-Municipal.ods /home/bjonkman/Documents/Websites/PoliBlog/2022-10-24-Municipal-Election/
index.html : /home/bjonkman/Documents/Websites/PoliBlog/2022-10-24-Municipal-Election/2022-Municipal.csv /home/bjonkman/Documents/Websites/PoliBlog/csv2municipalcandidates.awk
/home/bjonkman/Documents/Websites/PoliBlog/csv2municipalcandidates.awk /home/bjonkman/Documents/Websites/PoliBlog/2022-10-24-Municipal-Election/2022-Municipal.csv > /home/bjonkman/Documents/Websites/PoliBlog/2022-10-24-Municipal-Election/index.html
2022-Municipal.csv : /home/bjonkman/Documents/Websites/PoliBlog/2022-10-24-Municipal-Election/2022-Municipal.ods
soffice --convert-to "csv" /home/bjonkman/Documents/Websites/PoliBlog/2022-10-24-Municipal-Election/2022-Municipal.ods
Municipality.html : /home/bjonkman/Documents/Websites/PoliBlog/2022-10-24-Municipal-Election/Municipality.csv
/home/bjonkman/bin/awk/csv2html.awk -v TITLE="Municipalities, URLs and Logos" /home/bjonkman/Documents/Websites/PoliBlog/2022-10-24-Municipal-Election/Municipality.csv > /home/bjonkman/Documents/Websites/PoliBlog/2022-10-24-Municipal-Election/Municipality.html
searchassistant.html : /home/bjonkman/Documents/Websites/PoliBlog/2022-10-24-Municipal-Election/2022-Municipal.csv /home/bjonkman/Documents/Websites/PoliBlog/csv2municipalcandidates-search.awk
/home/bjonkman/Documents/Websites/PoliBlog/csv2municipalcandidates-search.awk /home/bjonkman/Documents/Websites/PoliBlog/2022-10-24-Municipal-Election/2022-Municipal.csv > /home/bjonkman/Documents/Websites/PoliBlog/2022-10-24-Municipal-Election/searchassistant.html
.PHONY : rsync
rsync :
rsync -av --delete /home/bjonkman/Documents/Websites/PoliBlog/2022-10-24-Municipal-Election/ irving:/home/jonkman/public_html/Poliblog-Elections/2022-10-24-Municipal-Election/
rsync -av --delete /home/bjonkman/Documents/Websites/PoliBlog/images/ irving://home/jonkman/public_html/Poliblog-Elections/images/
# EOF: makefile
← ↑ → 2023-11-13-Cambridge-Ward01-Byelection/makefile
Generated on Sat, 09 Dec 2023 03:59:33 -0500 from 2023-11-13-Cambridge-Ward01-Byelection/makefile
# File : makefile
# Purpose : csv2municipalcandidates - Re-run various scripts when updates are made
# Author : Bob Jonkman
# Date : 30 July 2022
# Modified: 1 November 2023
.PHONY : all
all : candidates.ods candidates.csv index.html Municipalities.html searchassistant.html rsync
index.html : /home/bjonkman/Documents/Websites/PoliBlog/2023-11-13-Cambridge-Ward01-Byelection/candidates.csv /home/bjonkman/Documents/Websites/PoliBlog/csv2municipalcandidates.awk
/home/bjonkman/Documents/Websites/PoliBlog/csv2municipalcandidates.awk /home/bjonkman/Documents/Websites/PoliBlog/2023-11-13-Cambridge-Ward01-Byelection/candidates.csv > /home/bjonkman/Documents/Websites/PoliBlog/2023-11-13-Cambridge-Ward01-Byelection/index.html
candidates.csv : /home/bjonkman/Documents/Websites/PoliBlog/2023-11-13-Cambridge-Ward01-Byelection/candidates.ods
soffice --convert-to "csv" /home/bjonkman/Documents/Websites/PoliBlog/2023-11-13-Cambridge-Ward01-Byelection/candidates.ods
Municipalities.html : /home/bjonkman/Documents/Websites/PoliBlog/2023-11-13-Cambridge-Ward01-Byelection/Municipalities.csv
/home/bjonkman/bin/awk/csv2html.awk -v TITLE="Municipalities, URLs and Logos" /home/bjonkman/Documents/Websites/PoliBlog/2023-11-13-Cambridge-Ward01-Byelection/Municipalities.csv > /home/bjonkman/Documents/Websites/PoliBlog/2023-11-13-Cambridge-Ward01-Byelection/Municipalities.html
searchassistant.html : /home/bjonkman/Documents/Websites/PoliBlog/2023-11-13-Cambridge-Ward01-Byelection/candidates.csv /home/bjonkman/Documents/Websites/PoliBlog/csv2candidates-search.awk
/home/bjonkman/Documents/Websites/PoliBlog/csv2candidates-search.awk /home/bjonkman/Documents/Websites/PoliBlog/2023-11-13-Cambridge-Ward01-Byelection/candidates.csv > /home/bjonkman/Documents/Websites/PoliBlog/2023-11-13-Cambridge-Ward01-Byelection/searchassistant.html
.PHONY : rsync
rsync :
rsync -av --delete /home/bjonkman/Documents/Websites/PoliBlog/2023-11-13-Cambridge-Ward01-Byelection/ irving:/home/jonkman/public_html/Poliblog-Elections/2023-11-13-Cambridge-Ward01-Byelection/
rsync -av --delete /home/bjonkman/Documents/Websites/PoliBlog/images/ irving://home/jonkman/public_html/Poliblog-Elections/images/
# EOF: makefile
← ↑ → 2023-Provincial-Byelection-Kitchener/makefile
Generated on Sat, 09 Dec 2023 03:59:33 -0500 from 2023-Provincial-Byelection-Kitchener/makefile
# File : makefile
# Purpose : Update 2023 Kitchener Centre Byelection candidates - Re-run various scripts when updates are made
# Author : Bob Jonkman
# Date : 30 October 2023
.PHONY : all
all : candidates.ods candidates.csv index.html Parties-Ridings.html searchassistant.html rsync
# candidates.ods : /home/bjonkman/mount/nextcloud/Documents/Websites/PoliBlog/2023-Provincial-Byelection-Kitchener/candidates.ods
# cp -pv /home/bjonkman/mount/nextcloud/Documents/Websites/PoliBlog/2023-Provincial-Byelection-Kitchener/candidates.ods
index.html : candidates.csv ../csv2candidates.awk
../csv2candidates.awk candidates.csv > index.html
candidates.csv : candidates.ods
soffice --convert-to "csv" candidates.ods
Parties-Ridings.html : Parties.csv Ridings.csv ReservedParties.csv UnregisteredParties.csv
/home/bjonkman/bin/awk/csv2html.awk -v TITLE="Parties and Ridings" -v LINK=1 -v SORTABLE=1 Parties.csv Ridings.csv ReservedParties.csv UnregisteredParties.csv > Parties-Ridings.html
searchassistant.html : candidates.csv ../csv2candidates-search.awk
../csv2candidates-search.awk candidates.csv > searchassistant.html
.PHONY : rsync
rsync :
rsync -av --delete ./ irving:/home/jonkman/public_html/Poliblog-Elections/2023-Provincial-Byelection-Kitchener/
rsync -av --delete ../images/ irving:/home/jonkman/public_html/Poliblog-Elections/images/
# EOF: makefile
← ↑ → 2025-Federal-Election/makefile
Generated on Sat, 09 Dec 2023 03:59:33 -0500 from 2025-Federal-Election/makefile
# File : makefile
# Purpose : Re-run various scripts when updates are made
# Author : Bob Jonkman
# Date : 23 November 2023
# Notes : for upcoming Federal Election, change folder name when date is known
.PHONY : all
all : candidates.ods candidates.csv index.html Parties-Ridings.html searchassistant.html rsync
# candidates.ods : /home/bjonkman/mount/nextcloud/Documents/Websites/PoliBlog/2025-Federal-Election/candidates.ods
# cp -pv /home/bjonkman/mount/nextcloud/Documents/Websites/PoliBlog/2025-Federal-Election/candidates.ods
index.html : candidates.csv ../csv2candidates.awk
../csv2candidates.awk candidates.csv > index.html
candidates.csv : candidates.ods
soffice --convert-to "csv" candidates.ods
Parties-Ridings.html : Parties.csv Ridings.csv
/home/bjonkman/bin/awk/csv2html.awk -v TITLE="Parties and Ridings" -v LINK=1 -v SORTABLE=1 Parties.csv Ridings.csv > Parties-Ridings.html
searchassistant.html : candidates.csv ../csv2candidates-search.awk
../csv2candidates-search.awk candidates.csv > searchassistant.html
.PHONY : rsync
rsync :
rsync -av --delete ./ irving:/home/jonkman/public_html/Poliblog-Elections/2025-Federal-Election/
rsync -av --delete ../images/ irving:/home/jonkman/public_html/Poliblog-Elections/images/
# EOF: makefile
12 files processed.