2020-04-26 23:34:17 +01:00
#!/usr/bin/env python
2021-04-24 23:16:44 +01:00
import sys , os , operator , urllib . request , urllib . parse , urllib . error , json , re , time
2020-04-26 23:47:04 +01:00
from datetime import datetime
2021-04-24 23:16:44 +01:00
from functools import reduce
from pathlib import Path
2020-04-26 23:34:17 +01:00
2020-04-26 23:41:29 +01:00
# 2017 originally by Martin Green
# 2018-08-27 edited Philip Sargent
2021-04-24 23:16:44 +01:00
# 2019-03-02 extended to take command line argument of loser_dir and set mod time of index.html
# to be same as json file
2020-04-26 23:41:29 +01:00
# 2019-12-17 extra output of links to troggle-generated trip data
2021-04-24 23:16:44 +01:00
# 2019-12-31 bits to make website link-checker not barf so much. Added endswith() to .startswith()
# for notes, elev, plan filenames
2020-04-26 23:47:04 +01:00
# 2020-01-21 Now we are using Windows10-WSL1, +links to expedition logbook on every generated page
2021-04-24 23:16:44 +01:00
# 2020-03-15 Adding timestamp to visible outputs, changing name of produced files to walletindex.html
# so that contents can be browsed
2020-04-26 23:47:04 +01:00
# 2020-03-15 Added "ignore" to the <year>#00 folder containing scraps - then removed as we do
# want it to appear in the reports under "UNKNOWN"
2021-04-24 23:16:44 +01:00
# 2021-04-24 Converted from python2 to python3 - god almighty did I really once think this was an
# acceptable python layout?
''' This stand-alone programe processes all the wallet folders for one year and produces the
list of actions that need to be done .
It produces
- an overall summary page for all the wallets in this year
- a summary page for each wallet
- a page specific to each person . listing what they need to do across all wallets
It scans the subdirectories only one level deep
e . g . we are in / 2020 / so it scans / 2020 / 2020 #01, /2020/2020#02 et seq.
All the files in one folder must be for only one cave , but in principle coule be for several trips .
However all the files in one folder should relate to a single survex file ( troggle assumes this ) and
a survex file should relate to a single trip ( we do this , the Austrians and Germans don ' t)
'''
2020-04-26 23:36:25 +01:00
2020-04-26 23:47:04 +01:00
loser_dir = " /home/expo/loser "
#loser_dir = "/mnt/d/CUCC-Expo/Loser/" # when running on Win10-WSL1
2020-04-26 23:41:29 +01:00
#loser_dir = "/media/philip/SD-huge/CUCC-Expo/loser/" # when running on xubuntu laptop 'barbie'
2021-04-24 23:16:44 +01:00
# GLOBALS
wallets_needing_scanning = set ( )
website_needing_updating = set ( )
wallets = [ ] #need to use wallets as a dict/tuple (id,cave,name)
people = { }
cave = " "
name = " "
2020-04-26 23:42:43 +01:00
2020-04-26 23:41:29 +01:00
2020-04-26 23:34:17 +01:00
html_base = " <html><body> %(body)s </body></html> "
2020-04-26 23:47:04 +01:00
html_year_index = html_base % { " body " : " <H1> %(year)s surveys: wallets status</H1> \n <p>List of trips: <a href= \" http://expo.survex.com/expedition/ %(year)s \" >expedition/ %(year)s </a> - troggle-processed .svx files and logbook entries on server</p> \n As of %(timestamp)s \n <H2>Persons</H2> \n <UL> \n %(persons)s </UL> \n <H2>Wallets</H2> \n <table> %(wallets)s </table> \n <H2>Needing Scanning</H2> \n <UL> \n % (needing scanning)s</ul> \n <H2>Website (Guidebook description) needing updating \n </H2> \n <UL style= \" column-count: 3; \" > \n % (website needing updating)s</ul> \n " }
2020-04-26 23:41:29 +01:00
html_year_person = " <li><a href= ' %(person)s .html ' > %(person)s </a><UL> \n %(complaints)s </ul></li> \n "
html_year_wallet_entry = " <tr><td><a href= ' %(walletindex)s ' > %(walletname)s %(cave)s %(name)s </a></td> <td> %(complaints)s </td></tr> \n "
html_person_wallet_entry = " <li><a href= ' %(walletindex)s ' > %(walletname)s </a> <UL> \n %(complaints)s </ul></li> \n "
html_year_scanning_entry = " <li><a href= ' %(walletindex)s ' > %(walletname)s %(cave)s %(name)s </a></li> \n "
html_wallet_file_entry = " <li><a href= ' %(fileurl)s ' > %(filename)s </a></li> \n "
2020-04-26 23:47:04 +01:00
html_wallet_index = html_base % { " body " : " <H1> %(title)s : %(cave)s : %(name)s </H1> \n <p>List of trips: <a href= \" http://expo.survex.com/expedition/ %(year)s \" >expedition/ %(year)s </a> - troggle-processed .svx files and logbook entries on server</p> \n <p>Date: %(date)s </p><p>People: %(people)s </p> \n <p>Cave <a href= ' %(description)s ' >Guidebook description</a> - %(description_needed)s \n <p>Survex file:<br> <br> Local: <a href= ' file:/// %(loser_dir)s / %(survex)s ' download>file:/// %(loser_dir)s / %(survex)s </a><br> Server: <a href= ' http://expo.survex.com/survexfile/ %(survex)s ' download>http://expo.survex.com/survexfile/ %(survex)s </a></p><a href= ' ../walletindex.html ' >Wallet index for this year</a><br/>Local location for ::loser:: repo specified on command line is <a href= ' file:/// %(loser_dir)s ' > %(loser_dir)s </a>. </p> \n <H2>Issues</H2> \n %(complaints)s \n <H2>Files</H2> \n <UL> \n %(files)s </UL> \n " }
2020-04-26 23:34:17 +01:00
html_survex_required = { True : " Survex " , False : " " }
html_plan_scanned = { True : " " , False : " Plan " }
html_elev_scanned = { True : " " , False : " Elev " }
html_description_written = { True : " " , False : " Desc " }
html_qms_written = { True : " " , False : " QMs " }
html_status = { True : " Issues: " , False : " " }
2020-04-26 23:47:04 +01:00
html_person = html_base % { " body " : " <H1> %(person)s </H1> \n <p>List of trips: <a href= \" http://expo.survex.com/expedition/ %(year)s \" >expedition/ %(year)s </a> - troggle-processed .svx files and logbook entries on server</p> \n <H2>Outstanding Wallets</H2> \n As of %(timestamp)s \n <UL> \n %(wallets)s </UL> " }
2020-04-26 23:34:17 +01:00
html_complaint_items = " <li> %(count)i %(complaint)s </li> "
html_items = " <li> %s </li> "
2020-04-26 23:41:29 +01:00
blank_json = {
" cave " : " " ,
" date " : " " ,
2020-04-26 23:47:04 +01:00
" description url " : " /caves " ,
2020-04-26 23:41:29 +01:00
" description written " : False ,
" electronic survey " : False ,
" elev drawn " : False ,
" elev not required " : False ,
" name " : " " ,
" people " : [
" Unknown "
] ,
" plan drawn " : False ,
" plan not required " : False ,
" qms written " : False ,
" survex file " : " " ,
" survex not required " : False ,
" website updated " : False }
2020-04-26 23:34:17 +01:00
2021-04-24 23:16:44 +01:00
def do_item ( year , item ) :
global loser_dir
global wallets
global people
global cave , name
global wallets_needing_scanning
global website_needing_updating
2020-04-26 23:42:43 +01:00
2021-04-24 23:16:44 +01:00
files = [ ]
for f in os . listdir ( os . path . join ( " . " , item ) ) :
if f not in [ " contents.json " , " contents.json~ " , " walletindex.html " ] and os . path . isfile ( os . path . join ( " . " , item , f ) ) :
files . append ( f )
contents_path = os . path . join ( " . " , item , " contents.json " )
#print "Trying to read file %s" % (contents_path)
if not os . path . isfile ( contents_path ) :
print ( " Creating file %s from template " % ( contents_path ) )
json_file = open ( contents_path , " w " )
json . dump ( blank_json , json_file , sort_keys = True , indent = 1 )
2020-04-26 23:34:17 +01:00
json_file . close ( )
2021-04-24 23:16:44 +01:00
#print "Reading file %s" % (contents_path)
json_file = open ( contents_path )
try :
data = json . load ( json_file )
except :
print ( " FAILURE parsing JSON file %s " % ( contents_path ) )
# Python bug: https://github.com/ShinNoNoir/twitterwebsearch/issues/12
raise
if not data [ " people " ] :
data [ " people " ] = [ " NOBODY " ]
json_file . close ( )
write_required = False
try :
wallet , cave , name = re . match ( " ( \ d \ d \ d \ d# \ d \ d)-(.*) (.*) " , item ) . groups ( )
except :
wallet , cave , name = " " , " " , " "
#print data
for k , v in list ( blank_json . items ( ) ) :
if k not in data :
if k == " cave " :
data [ k ] = cave
elif k == " name " :
data [ k ] = name
else :
data [ k ] = v
write_required = True
#print write_required
if write_required :
print ( " Writing file %s " % ( contents_path ) )
json_file = open ( contents_path , " w " )
json . dump ( data , json_file , indent = 1 )
json_file . close ( )
# Get modification time of contents.json
# print("json last modified: %s" % time.ctime(os.path.getmtime(contents_path)))
json_mtime = os . path . getmtime ( contents_path )
#make wallet descriptions
#Survex
not_req = ( data [ " survex not required " ] and data [ " survex file " ] == " " )
req = ( not data [ " survex not required " ] and os . path . isfile ( os . path . join ( loser_dir , data [ " survex file " ] ) ) )
survex_required = not_req or not req
survex_complaint = " "
if data [ " survex not required " ] and data [ " survex file " ] != " " :
survex_complaint = " Survex is not required and yet there is a survex file! "
if not data [ " survex not required " ] and data [ " survex file " ] == " " :
survex_complaint = " A survex file is required, but has not been specified! "
if not data [ " survex not required " ] and not os . path . isfile ( os . path . join ( loser_dir , data [ " survex file " ] ) ) :
survex_complaint = " The specified survex file ( %s ) does not exist here! " % os . path . join ( loser_dir , data [ " survex file " ] )
complaints = [ ]
person_complaints = [ ]
if survex_required :
complaints . append ( survex_complaint )
person_complaints . append ( survex_complaint )
#Notes
notes_scanned = reduce ( operator . or_ , [ f . startswith ( " note " ) for f in files ] , False )
notes_scanned = reduce ( operator . or_ , [ f . endswith ( " note " ) for f in files ] , notes_scanned )
if not notes_scanned :
complaints . append ( " The notes needs scanning (no noteNN.jpg or XXnote.jpg file found) " )
wallets_needing_scanning . add ( item )
#Plan drawing required
plan_scanned = reduce ( operator . or_ , [ f . startswith ( " plan " ) for f in files ] , False )
plan_scanned = reduce ( operator . or_ , [ f . endswith ( " plan " ) for f in files ] , plan_scanned )
plan_drawing_required = not ( plan_scanned or data [ " plan drawn " ] )
if plan_drawing_required :
complaints . append ( " The plan needs drawing (no planNN.jpg or XXplan.jpg file found) " )
person_complaints . append ( " plan(s) needs drawing (no planNN.jpg or XXplan.jpg file found) " )
if not plan_drawing_required and not plan_scanned :
complaints . append ( " The plan needs <em>scanning</em> (no planNN.jpg or XXplan.jpg file found) " )
wallets_needing_scanning . add ( item )
#Elev drawing required
elev_scanned = reduce ( operator . or_ , [ f . startswith ( " elev " ) for f in files ] , False )
elev_scanned = reduce ( operator . or_ , [ f . endswith ( " elev " ) for f in files ] , elev_scanned )
elev_drawing_required = not ( elev_scanned or data [ " elev drawn " ] )
if elev_drawing_required :
complaints . append ( " The elev needs drawing (no elevNN.jpg or XXelev.jpg file found) " )
person_complaints . append ( " elev(s) needs drawing (no elevNN.jpg or XXelev.jpg file found) " )
if not elev_drawing_required and not elev_scanned :
complaints . append ( " The elev needs <em>scanning</em> (no elevNN.jpg or XXelev.jpg file found) " )
wallets_needing_scanning . add ( item )
#Description
if not data [ " description written " ] :
complaints . append ( " The description needs writing " )
person_complaints . append ( " description(s) needs writing " )
description_needed = " A description is indicated as being needed, so may need adding into this cave page. "
#QMS
if not data [ " qms written " ] :
complaints . append ( " The QMs needs writing " )
person_complaints . append ( " set(s) of QMs needs writing " )
#Website
if not data [ " website updated " ] :
complaints . append ( " The website is marked as needing updating (using the guidebook description) " )
website_needing_updating . add ( item )
2020-04-26 23:37:29 +01:00
2021-04-24 23:16:44 +01:00
#Electronic Surveys
if not data [ " electronic survey " ] :
complaints . append ( " Tunnel / Therion drawing files need drawing " )
if data [ " survex file " ] :
survex_description = data [ " survex file " ]
else :
survex_description = " Not specified "
wallet_index_file = open ( os . path . join ( item , " walletindex.html " ) , " w " )
wallet_index_file . write ( html_wallet_index % { " title " : item , " year " : year ,
" cave " : data [ " cave " ] ,
" name " : data [ " name " ] ,
" date " : data [ " date " ] ,
" people " : reduce ( operator . add , [ " %s , " % person for person in data [ " people " ] ] , " " ) ,
" description " : " http://expo.survex.com " + data [ " description url " ] ,
" description_needed " : description_needed ,
" loser_dir " : loser_dir ,
" loser_dirw " : loser_dir [ 5 ] . upper ( ) + ' :/ ' + loser_dir [ 7 : ] ,
" survex " : survex_description ,
" complaints " : reduce ( operator . add , [ " <p> " + complaint + " </p> " for complaint in complaints ] , " " ) ,
" files " : reduce ( operator . add ,
[ html_wallet_file_entry % { " fileurl " : urllib . parse . quote ( f ) ,
" filename " : f }
for f
in files ] ,
" " ) } )
wallet_index_file . close ( )
wallets . append ( ( item , data [ " cave " ] , data [ " name " ] , survex_required , plan_scanned , elev_scanned , data [ " description written " ] , data [ " qms written " ] ) )
# Set modification time to be the same as that of contents.json
index_file = item + " /walletindex.html "
os . utime ( index_file , ( json_mtime , json_mtime ) )
#People
for person in data [ " people " ] :
# delete all person.html as we are recreating all the ones that matter and old ones have old data
if os . path . isfile ( person + " .html " ) :
os . remove ( person + " .html " )
if person_complaints :
2020-04-26 23:37:29 +01:00
for person in data [ " people " ] :
2021-04-24 23:16:44 +01:00
if person not in people :
people [ person ] = [ ]
people [ person ] . append ( ( item , person_complaints ) )
def main ( ) :
global loser_dir
global wallets
global people
global cave , name
global wallets_needing_scanning
global website_needing_updating
if len ( sys . argv ) > 1 :
if sys . argv [ 1 ] != " " :
loser_dir = sys . argv [ 1 ]
dateTimeObj = datetime . now ( tz = None )
timestamp = dateTimeObj . strftime ( " %d - % b- % Y ( % H: % M) " )
print ( " Loser repo (for svx files) is assumed to be in: " + loser_dir + " / " )
drawings_dir = loser_dir [ 0 : len ( loser_dir ) - 5 ] + " drawings "
print ( " Drawings repo (for drawings files) is assumed to be in: " + drawings_dir + " / " )
#use dir this file is in to get current year
path , year = os . path . split ( os . path . dirname ( os . path . realpath ( __file__ ) ) )
print ( " Year: " + year )
for item in sorted ( os . listdir ( " . " ) ) :
if os . path . isdir ( item ) and item != year + " indexpages " :
do_item ( year , item )
wallets . sort ( )
website_needing_updating = list ( website_needing_updating )
website_needing_updating . sort ( )
wallets_needing_scanning = list ( wallets_needing_scanning )
wallets_needing_scanning . sort ( )
person_summary = [ ]
for person , person_wallets in list ( people . items ( ) ) :
complaints = reduce ( operator . add , [ complaints for wallet , complaints in person_wallets ] , [ ] )
complaints_summary = [ ]
for complaint in set ( complaints ) :
complaints_summary . append ( ( complaint , complaints . count ( complaint ) ) )
person_summary . append ( ( person , complaints_summary ) )
2020-04-26 23:34:17 +01:00
2021-04-24 23:16:44 +01:00
person_summary = dict ( person_summary )
year_index_file = open ( " walletindex.html " , " w " )
year_index_file . write ( html_year_index % { " year " : year , " timestamp " : timestamp , " persons " : reduce ( operator . add , [ html_year_person % { " person " : person ,
" complaints " : reduce ( operator . add ,
[ html_complaint_items % { " complaint " : complaint ,
" count " : count }
for complaint , count
in complaints ] ,
" " ) }
for person , complaints
in list ( person_summary . items ( ) ) ] , " " ) ,
" needing scanning " : reduce ( operator . add , [ html_year_scanning_entry % { " walletname " : wallet ,
" cave " : cave ,
" name " : name ,
" walletindex " : urllib . parse . quote ( wallet ) + " /walletindex.html " }
for ( wallet )
in wallets_needing_scanning ] , " " ) ,
" website needing updating " : reduce ( operator . add , [ html_year_scanning_entry % { " walletname " : wallet ,
" cave " : cave ,
" name " : name ,
" walletindex " : urllib . parse . quote ( wallet ) + " /walletindex.html " }
for ( wallet )
in website_needing_updating ] , " " ) ,
" wallets " : reduce ( operator . add ,
[ html_year_wallet_entry % { " walletname " : wallet ,
" cave " : cave ,
" name " : name ,
" walletindex " : urllib . parse . quote ( wallet ) + " /walletindex.html " ,
" complaints " : html_status [ survex_required or not plan_scanned or not elev_scanned or description_written ] + html_survex_required [ survex_required ] + html_plan_scanned [ plan_scanned ] + html_elev_scanned [ elev_scanned ] + html_description_written [ description_written ] + html_qms_written [ qms_written ] }
for ( wallet , cave , name , survex_required , plan_scanned , elev_scanned , description_written , qms_written )
in wallets ] ) } )
year_index_file . close ( )
for person , item_complaint_list in list ( people . items ( ) ) :
person_file = open ( person + " .html " , " w " )
person_file . write ( html_person % { " person " : person , " year " : year , " timestamp " : timestamp ,
" wallets " : reduce ( operator . add , [ html_person_wallet_entry % { " walletname " : wallet ,
" walletindex " : urllib . parse . quote ( wallet ) + " /walletindex.html " ,
" complaints " : reduce ( operator . add ,
[ html_items % complaint
for complaint
in complaints ] ,
" " ) }
for wallet , complaints
in item_complaint_list ] , " " )
} )
person_file . close ( )
2020-04-26 23:34:17 +01:00
2021-04-24 23:16:44 +01:00
#if __name__ == "__main__":
main ( )