#!/usr/bin/env python import sys, os, operator, urllib.request, urllib.parse, urllib.error, json, re, time from datetime import datetime from functools import reduce from pathlib import Path # 2017 originally by Martin Green # 2018-08-27 edited Philip Sargent # 2019-03-02 extended to take command line argument of loser_dir and set mod time of index.html # to be same as json file # 2019-12-17 extra output of links to troggle-generated trip data # 2019-12-31 bits to make website link-checker not barf so much. Added endswith() to .startswith() # for notes, elev, plan filenames # 2020-01-21 Now we are using Windows10-WSL1, +links to expedition logbook on every generated page # 2020-03-15 Adding timestamp to visible outputs, changing name of produced files to walletindex.html # so that contents can be browsed # 2020-03-15 Added "ignore" to the #00 folder containing scraps - then removed as we do # want it to appear in the reports under "UNKNOWN" # 2021-04-24 Converted from python2 to python3 - god almighty did I really once think this was an # acceptable python layout? '''This stand-alone programe processes all the wallet folders for one year and produces the list of actions that need to be done. It produces - an overall summary page for all the wallets in this year - a summary page for each wallet - a page specific to each person.listing what they need to do across all wallets It scans the subdirectories only one level deep e.g. we are in /2020/ so it scans /2020/2020#01, /2020/2020#02 et seq. All the files in one folder must be for only one cave, but in principle coule be for several trips. However all the files in one folder should relate to a single survex file (troggle assumes this) and a survex file should relate to a single trip (we do this, the Austrians and Germans don't) ''' loser_dir = "/home/expo/loser" #loser_dir = "/mnt/d/CUCC-Expo/Loser/" # when running on Win10-WSL1 #loser_dir = "/media/philip/SD-huge/CUCC-Expo/loser/" # when running on xubuntu laptop 'barbie' # GLOBALS wallets_needing_scanning = set() website_needing_updating = set() wallets = [] #need to use wallets as a dict/tuple (id,cave,name) people = {} cave = "" name = "" html_base = "%(body)s" html_year_index = html_base % {"body": "

%(year)s surveys: wallets status

\n

List of trips: expedition/%(year)s - troggle-processed .svx files and logbook entries on server

\nAs of %(timestamp)s\n

Persons

\n\n

Wallets

\n%(wallets)s
\n

Needing Scanning

\n\n

Website (Guidebook description) needing updating\n

\n\n"} html_year_person = "
  • %(person)s
  • \n" html_year_wallet_entry = "%(walletname)s %(cave)s %(name)s %(complaints)s\n" html_person_wallet_entry = "
  • %(walletname)s
  • \n" html_year_scanning_entry = "
  • %(walletname)s %(cave)s %(name)s
  • \n" html_wallet_file_entry = "
  • %(filename)s
  • \n" html_wallet_index = html_base % {"body": "

    %(title)s : %(cave)s : %(name)s

    \n

    List of trips: expedition/%(year)s - troggle-processed .svx files and logbook entries on server

    \n

    Date: %(date)s

    People: %(people)s

    \n

    Cave Guidebook description - %(description_needed)s \n

    Survex file:
      
      Local:      file:///%(loser_dir)s/%(survex)s
     Server:      http://expo.survex.com/survexfile/%(survex)s

    Wallet index for this year
    Local location for ::loser:: repo specified on command line is %(loser_dir)s.

    \n

    Issues

    \n%(complaints)s\n

    Files

    \n\n"} html_survex_required = {True: "Survex ", False: ""} html_plan_scanned = {True: "", False: "Plan "} html_elev_scanned = {True: "", False: "Elev "} html_description_written = {True: "", False: "Desc "} html_qms_written = {True: "", False: "QMs "} html_status = {True: "Issues: ", False: ""} html_person = html_base % {"body": "

    %(person)s

    \n

    List of trips: expedition/%(year)s - troggle-processed .svx files and logbook entries on server

    \n

    Outstanding Wallets

    \nAs of %(timestamp)s\n"} html_complaint_items = "
  • %(count)i %(complaint)s
  • " html_items = "
  • %s
  • " blank_json = { "cave": "", "date": "", "description url": "/caves", "description written": False, "electronic survey": False, "elev drawn": False, "elev not required": False, "name": "", "people": [ "Unknown" ], "plan drawn": False, "plan not required": False, "qms written": False, "survex file": "", "survex not required": False, "website updated": False} def do_item(year, item): global loser_dir global wallets global people global cave, name global wallets_needing_scanning global website_needing_updating files = [] for f in os.listdir(os.path.join(".", item)): if f not in ["contents.json", "contents.json~","walletindex.html"] and os.path.isfile(os.path.join(".", item, f)): files.append(f) contents_path = os.path.join(".", item, "contents.json") #print "Trying to read file %s" % (contents_path) if not os.path.isfile(contents_path): print("Creating file %s from template" % (contents_path)) json_file = open(contents_path, "w") json.dump(blank_json, json_file, sort_keys=True, indent = 1) json_file.close() #print "Reading file %s" % (contents_path) json_file = open(contents_path) try: data = json.load(json_file) except: print("FAILURE parsing JSON file %s" % (contents_path)) # Python bug: https://github.com/ShinNoNoir/twitterwebsearch/issues/12 raise if not data["people"]: data["people"]=["NOBODY"] json_file.close() write_required = False try: wallet, cave, name = re.match("(\d\d\d\d#\d\d)-(.*) (.*)", item).groups() except: wallet, cave, name = "", "", "" #print data for k, v in list(blank_json.items()): if k not in data: if k == "cave": data[k] = cave elif k == "name": data[k] = name else: data[k] = v write_required = True #print write_required if write_required: print("Writing file %s" % (contents_path)) json_file = open(contents_path, "w") json.dump(data, json_file, indent = 1) json_file.close() # Get modification time of contents.json # print("json last modified: %s" % time.ctime(os.path.getmtime(contents_path))) json_mtime = os.path.getmtime(contents_path) #make wallet descriptions #Survex not_req = (data["survex not required"] and data["survex file"] == "") req = (not data["survex not required"] and os.path.isfile(os.path.join(loser_dir, data["survex file"]))) survex_required = not_req or not req survex_complaint = "" if data["survex not required"] and data["survex file"] != "": survex_complaint = "Survex is not required and yet there is a survex file!" if not data["survex not required"] and data["survex file"] == "": survex_complaint = "A survex file is required, but has not been specified!" if not data["survex not required"] and not os.path.isfile(os.path.join(loser_dir, data["survex file"])): survex_complaint = "The specified survex file (%s) does not exist here!" % os.path.join(loser_dir, data["survex file"]) complaints = [] person_complaints = [] if survex_required: complaints.append(survex_complaint) person_complaints.append(survex_complaint) #Notes notes_scanned = reduce(operator.or_, [f.startswith("note") for f in files], False) notes_scanned = reduce(operator.or_, [f.endswith("note") for f in files], notes_scanned) if not notes_scanned: complaints.append("The notes needs scanning (no noteNN.jpg or XXnote.jpg file found)") wallets_needing_scanning.add(item) #Plan drawing required plan_scanned = reduce(operator.or_, [f.startswith("plan") for f in files], False) plan_scanned = reduce(operator.or_, [f.endswith("plan") for f in files], plan_scanned) plan_drawing_required = not (plan_scanned or data["plan drawn"]) if plan_drawing_required: complaints.append("The plan needs drawing (no planNN.jpg or XXplan.jpg file found)") person_complaints.append(" plan(s) needs drawing (no planNN.jpg or XXplan.jpg file found)") if not plan_drawing_required and not plan_scanned: complaints.append("The plan needs scanning (no planNN.jpg or XXplan.jpg file found)") wallets_needing_scanning.add(item) #Elev drawing required elev_scanned = reduce(operator.or_, [f.startswith("elev") for f in files], False) elev_scanned = reduce(operator.or_, [f.endswith("elev") for f in files], elev_scanned) elev_drawing_required = not (elev_scanned or data["elev drawn"]) if elev_drawing_required: complaints.append("The elev needs drawing (no elevNN.jpg or XXelev.jpg file found)") person_complaints.append(" elev(s) needs drawing (no elevNN.jpg or XXelev.jpg file found)") if not elev_drawing_required and not elev_scanned: complaints.append("The elev needs scanning (no elevNN.jpg or XXelev.jpg file found)") wallets_needing_scanning.add(item) #Description if not data["description written"]: complaints.append("The description needs writing") person_complaints.append(" description(s) needs writing") description_needed = "A description is indicated as being needed, so may need adding into this cave page." #QMS if not data["qms written"]: complaints.append("The QMs needs writing") person_complaints.append(" set(s) of QMs needs writing") #Website if not data["website updated"]: complaints.append("The website is marked as needing updating (using the guidebook description)") website_needing_updating.add(item) #Electronic Surveys if not data["electronic survey"]: complaints.append("Tunnel / Therion drawing files need drawing") if data["survex file"]: survex_description = data["survex file"] else: survex_description = "Not specified" wallet_index_file = open(os.path.join(item, "walletindex.html"), "w") wallet_index_file.write(html_wallet_index % {"title": item, "year": year, "cave": data["cave"], "name": data["name"], "date": data["date"], "people": reduce(operator.add, [" %s," % person for person in data["people"]], ""), "description": "http://expo.survex.com"+data["description url"], "description_needed": description_needed, "loser_dir": loser_dir, "loser_dirw": loser_dir[5].upper() + ':/' + loser_dir[7:], "survex": survex_description, "complaints": reduce(operator.add, ["

    " + complaint + "

    " for complaint in complaints], ""), "files": reduce(operator.add, [html_wallet_file_entry % {"fileurl": urllib.parse.quote(f), "filename": f} for f in files], "")}) wallet_index_file.close() wallets.append((item, data["cave"], data["name"], survex_required, plan_scanned, elev_scanned, data["description written"], data["qms written"])) # Set modification time to be the same as that of contents.json index_file = item+"/walletindex.html" os.utime(index_file, ( json_mtime,json_mtime)) #People for person in data["people"]: # delete all person.html as we are recreating all the ones that matter and old ones have old data if os.path.isfile(person + ".html"): os.remove(person + ".html") if person_complaints: for person in data["people"]: if person not in people: people[person] = [] people[person].append((item, person_complaints)) def main(): global loser_dir global wallets global people global cave, name global wallets_needing_scanning global website_needing_updating if len(sys.argv) > 1 : if sys.argv[1] != "": loser_dir = sys.argv[1] dateTimeObj=datetime.now(tz=None) timestamp = dateTimeObj.strftime("%d-%b-%Y (%H:%M)") print("Loser repo (for svx files) is assumed to be in: " + loser_dir + "/") drawings_dir = loser_dir[0:len(loser_dir)-5] + "drawings" print("Drawings repo (for drawings files) is assumed to be in: " + drawings_dir + "/") #use dir this file is in to get current year path,year = os.path.split(os.path.dirname(os.path.realpath(__file__))) print("Year: " + year) for item in sorted(os.listdir(".")): if os.path.isdir(item) and item != year+"indexpages": do_item(year, item) wallets.sort() website_needing_updating = list(website_needing_updating) website_needing_updating.sort() wallets_needing_scanning = list(wallets_needing_scanning) wallets_needing_scanning.sort() person_summary = [] for person, person_wallets in list(people.items()): complaints = reduce(operator.add, [complaints for wallet, complaints in person_wallets], []) complaints_summary = [] for complaint in set(complaints): complaints_summary.append((complaint, complaints.count(complaint))) person_summary.append((person, complaints_summary)) person_summary = dict(person_summary) year_index_file = open("walletindex.html", "w") year_index_file.write(html_year_index % {"year": year, "timestamp": timestamp, "persons": reduce(operator.add, [html_year_person % {"person": person, "complaints": reduce(operator.add, [html_complaint_items % {"complaint": complaint, "count": count} for complaint, count in complaints], "")} for person, complaints in list(person_summary.items())], ""), "needing scanning": reduce(operator.add, [html_year_scanning_entry % {"walletname": wallet, "cave": cave, "name": name, "walletindex": urllib.parse.quote(wallet) + "/walletindex.html"} for (wallet) in wallets_needing_scanning], ""), "website needing updating": reduce(operator.add, [html_year_scanning_entry % {"walletname": wallet, "cave": cave, "name": name, "walletindex": urllib.parse.quote(wallet) + "/walletindex.html"} for (wallet) in website_needing_updating], ""), "wallets": reduce(operator.add, [html_year_wallet_entry % {"walletname": wallet, "cave": cave, "name": name, "walletindex": urllib.parse.quote(wallet) + "/walletindex.html", "complaints": html_status[survex_required or not plan_scanned or not elev_scanned or description_written] + html_survex_required[survex_required] + html_plan_scanned[plan_scanned] + html_elev_scanned[elev_scanned] + html_description_written[description_written] + html_qms_written[qms_written] } for (wallet, cave, name, survex_required, plan_scanned, elev_scanned, description_written, qms_written) in wallets])}) year_index_file.close() for person, item_complaint_list in list(people.items()): person_file = open(person + ".html", "w") person_file.write(html_person % {"person": person, "year": year, "timestamp": timestamp, "wallets": reduce(operator.add, [html_person_wallet_entry % {"walletname": wallet, "walletindex": urllib.parse.quote(wallet) + "/walletindex.html", "complaints": reduce(operator.add, [html_items % complaint for complaint in complaints], "") } for wallet, complaints in item_complaint_list], "") }) person_file.close() #if __name__ == "__main__": main()