mirror of
https://expo.survex.com/repositories/expoweb/.git/
synced 2024-11-22 07:11:55 +00:00
added timestamp to outputs
This commit is contained in:
parent
ca0b4cec62
commit
f1c84e2e0d
61
noinfo/wallets.py
Normal file → Executable file
61
noinfo/wallets.py
Normal file → Executable file
@ -1,43 +1,55 @@
|
||||
#!/usr/bin/env python
|
||||
import sys, os, operator, urllib, json, re, time
|
||||
from datetime import datetime
|
||||
|
||||
# 2017 originally by Martin Green
|
||||
# 2018-08-27 edited Philip Sargent
|
||||
# 2019-03-02 extended to take command line argument of loser_dir and set mod time of index.html to be sane as json file
|
||||
# 2019-03-02 extended to take command line argument of loser_dir and set mod time of index.html to be same as json file
|
||||
# 2019-12-17 extra output of links to troggle-generated trip data
|
||||
# 2019-12-31 bits to make website link-checker not barf so much. Added endswith() to .startswith() for notes, elev, plan filenames
|
||||
# 2020-01-21 Now we are using Windows10-WSL1, +links to expedition logbook on every generated page
|
||||
# 2020-03-14 Adding timestamp tovisible outputs
|
||||
|
||||
loser_dir = "/home/expo/loser/"
|
||||
#loser_dir = "/mnt/d/CUCC-Expo/loser/" # when running on Win10/bash
|
||||
loser_dir = "/home/expo/loser"
|
||||
#loser_dir = "/mnt/d/CUCC-Expo/loser/" # when running on Win10-WSL1
|
||||
#loser_dir = "/media/philip/SD-huge/CUCC-Expo/loser/" # when running on xubuntu laptop 'barbie'
|
||||
|
||||
if len(sys.argv) > 1 :
|
||||
if sys.argv[1] != "":
|
||||
loser_dir = sys.argv[1]
|
||||
|
||||
print "Loser repo is assumed to be in: " + loser_dir
|
||||
dateTimeObj=datetime.now(tz=None)
|
||||
timestamp = dateTimeObj.strftime("%d-%b-%Y (%H:%M)")
|
||||
|
||||
print "Loser repo (for svx files) is assumed to be in: " + loser_dir + "/"
|
||||
|
||||
drawings_dir = loser_dir[0:len(loser_dir)-5] + "drawings"
|
||||
print "Drawings repo (for drawings files) is assumed to be in: " + drawings_dir + "/"
|
||||
|
||||
html_base = "<html><body>%(body)s</body></html>"
|
||||
html_year_index = html_base % {"body": "<H1>%(year)s surveys: wallets status</H1>\n<p>List of trips: <a href=\"http://expo.survex.com/expedition/%(year)s\">expedition/%(year)s</a> - troggle-processed .svx files and logbook entries on server</p>\n<H2>Persons</H2>\n<UL>\n%(persons)s</UL>\n<H2>Wallets</H2>\n<table>%(wallets)s</table>\n<H2>Needing Scanning</H2>\n<UL>\n%(needing scanning)s</ul>\n<H2>Website (Guidebook description) needing updating\n</H2>\n<UL>\n%(website needing updating)s</ul>\n"}
|
||||
html_year_index = html_base % {"body": "<H1>%(year)s surveys: wallets status</H1>\n<p>List of trips: <a href=\"http://expo.survex.com/expedition/%(year)s\">expedition/%(year)s</a> - troggle-processed .svx files and logbook entries on server</p>\nAs of %(timestamp)s\n<H2>Persons</H2>\n<UL>\n%(persons)s</UL>\n<H2>Wallets</H2>\n<table>%(wallets)s</table>\n<H2>Needing Scanning</H2>\n<UL>\n%(needing scanning)s</ul>\n<H2>Website (Guidebook description) needing updating\n</H2>\n<UL>\n%(website needing updating)s</ul>\n"}
|
||||
html_year_person = "<li><a href='%(person)s.html'>%(person)s</a><UL>\n%(complaints)s</ul></li>\n"
|
||||
html_year_wallet_entry = "<tr><td><a href='%(walletindex)s'>%(walletname)s %(cave)s %(name)s</a></td> <td>%(complaints)s</td></tr>\n"
|
||||
html_person_wallet_entry = "<li><a href='%(walletindex)s'>%(walletname)s</a> <UL>\n%(complaints)s</ul></li>\n"
|
||||
html_year_scanning_entry = "<li><a href='%(walletindex)s'>%(walletname)s %(cave)s %(name)s</a></li>\n"
|
||||
html_wallet_file_entry = "<li><a href='%(fileurl)s'>%(filename)s</a></li>\n"
|
||||
html_wallet_index = html_base % {"body": "<H1>%(title)s : %(cave)s : %(name)s</H1>\n<p>Date: %(date)s</p><p>People: %(people)s</p>\n<p>Cave <a href='%(description)s'>Guidebook description</a> - %(description_needed)s \n<p>Survex file:<br> Local (Windows): <a href='%(loser_dirw)s%(survex)s' download>%(loser_dirw)s%(survex)s</a><br> Local (Linux): <a href='%(loser_dir)s%(survex)s' download>%(loser_dir)s%(survex)s</a><br> Server: <a href='http://expo.survex.com/survexfile/%(survex)s' download>%(survex)s</a></p><a href='../index.html'>Wallet index for this year</a> </p>\n<H2>Issues</H2>\n%(complaints)s\n<H2>Files</H2>\n<UL>\n%(files)s</UL>\n"}
|
||||
html_wallet_index = html_base % {"body": "<H1>%(title)s : %(cave)s : %(name)s</H1>\n<p>List of trips: <a href=\"http://expo.survex.com/expedition/%(year)s\">expedition/%(year)s</a> - troggle-processed .svx files and logbook entries on server</p>\n<p>Date: %(date)s</p><p>People: %(people)s</p>\n<p>Cave <a href='%(description)s'>Guidebook description</a> - %(description_needed)s \n<p>Survex file:<br> <br> Local: <a href='file:///%(loser_dir)s/%(survex)s' download>file:///%(loser_dir)s/%(survex)s</a><br> Server: <a href='http://expo.survex.com/survexfile/%(survex)s' download>http://expo.survex.com/survexfile/%(survex)s</a></p><a href='../index.html'>Wallet index for this year</a><br/>Local location for ::loser:: repo specified on command line is <a href='file:///%(loser_dir)s'>%(loser_dir)s</a>. </p>\n<H2>Issues</H2>\n%(complaints)s\n<H2>Files</H2>\n<UL>\n%(files)s</UL>\n"}
|
||||
html_survex_required = {True: "Survex ", False: ""}
|
||||
html_plan_scanned = {True: "", False: "Plan "}
|
||||
html_elev_scanned = {True: "", False: "Elev "}
|
||||
html_description_written = {True: "", False: "Desc "}
|
||||
html_qms_written = {True: "", False: "QMs "}
|
||||
html_status = {True: "Issues: ", False: ""}
|
||||
html_person = html_base % {"body": "<H1>%(person)s</H1><H2>Outstanding Wallets</H2><UL>\n%(wallets)s</UL>"}
|
||||
html_person = html_base % {"body": "<H1>%(person)s</H1>\n<p>List of trips: <a href=\"http://expo.survex.com/expedition/%(year)s\">expedition/%(year)s</a> - troggle-processed .svx files and logbook entries on server</p>\n<H2>Outstanding Wallets</H2>\nAs of %(timestamp)s\n<UL>\n%(wallets)s</UL>"}
|
||||
html_complaint_items = "<li>%(count)i %(complaint)s</li>"
|
||||
html_items = "<li>%s</li>"
|
||||
|
||||
|
||||
|
||||
blank_json = {
|
||||
"cave": "",
|
||||
"date": "",
|
||||
"description url": "",
|
||||
"description url": "/caves",
|
||||
"description written": False,
|
||||
"electronic survey": False,
|
||||
"elev drawn": False,
|
||||
@ -53,6 +65,7 @@ blank_json = {
|
||||
"survex not required": False,
|
||||
"website updated": False}
|
||||
|
||||
|
||||
#need to use wallets as a dict/tuple (id,cave,name) - not sure how.
|
||||
wallets = []
|
||||
wallets_needing_scanning = set()
|
||||
@ -106,7 +119,8 @@ for item in sorted(os.listdir(".")):
|
||||
print "Writing file %s" % (contents_path)
|
||||
json_file = open(contents_path, "w")
|
||||
json.dump(data, json_file, indent = 1)
|
||||
json_file.close()
|
||||
json_file.close()
|
||||
|
||||
# Get modification time of contents.json
|
||||
# print("json last modified: %s" % time.ctime(os.path.getmtime(contents_path)))
|
||||
json_mtime = os.path.getmtime(contents_path)
|
||||
@ -131,36 +145,39 @@ for item in sorted(os.listdir(".")):
|
||||
|
||||
#Notes
|
||||
notes_scanned = reduce(operator.or_, [f.startswith("note") for f in files], False)
|
||||
notes_scanned = reduce(operator.or_, [f.endswith("note") for f in files], notes_scanned)
|
||||
if not notes_scanned:
|
||||
complaints.append("The notes needs scanning (no noteN.jpg file found)")
|
||||
complaints.append("The notes needs scanning (no noteNN.jpg or XXnote.jpg file found)")
|
||||
wallets_needing_scanning.add(item)
|
||||
|
||||
#Plan drawing required
|
||||
plan_scanned = reduce(operator.or_, [f.startswith("plan") for f in files], False)
|
||||
plan_scanned = reduce(operator.or_, [f.endswith("plan") for f in files], plan_scanned)
|
||||
plan_drawing_required = not (plan_scanned or data["plan drawn"])
|
||||
if plan_drawing_required:
|
||||
complaints.append("The plan needs drawing (no planN.jpg file found)")
|
||||
person_complaints.append(" plan(s) needs drawing (no planN.jpg file found)")
|
||||
complaints.append("The plan needs drawing (no planNN.jpg or XXplan.jpg file found)")
|
||||
person_complaints.append(" plan(s) needs drawing (no planNN.jpg or XXplan.jpg file found)")
|
||||
if not plan_drawing_required and not plan_scanned:
|
||||
complaints.append("The plan needs <em>scanning</em> (no planN.jpg file found)")
|
||||
complaints.append("The plan needs <em>scanning</em> (no planNN.jpg or XXplan.jpg file found)")
|
||||
wallets_needing_scanning.add(item)
|
||||
|
||||
|
||||
#Elev drawing required
|
||||
elev_scanned = reduce(operator.or_, [f.startswith("elev") for f in files], False)
|
||||
elev_scanned = reduce(operator.or_, [f.endswith("elev") for f in files], elev_scanned)
|
||||
elev_drawing_required = not (elev_scanned or data["elev drawn"])
|
||||
if elev_drawing_required:
|
||||
complaints.append("The elev needs drawing (no elevN.jpg file found)")
|
||||
person_complaints.append(" elev(s) needs drawing (no elevN.jpg file found)")
|
||||
complaints.append("The elev needs drawing (no elevNN.jpg or XXelev.jpg file found)")
|
||||
person_complaints.append(" elev(s) needs drawing (no elevNN.jpg or XXelev.jpg file found)")
|
||||
if not elev_drawing_required and not elev_scanned:
|
||||
complaints.append("The elev needs <em>scanning</em> (no elevN.jpg file found)")
|
||||
complaints.append("The elev needs <em>scanning</em> (no elevNN.jpg or XXelev.jpg file found)")
|
||||
wallets_needing_scanning.add(item)
|
||||
|
||||
#Description
|
||||
if not data["description written"]:
|
||||
complaints.append("The description needs writing")
|
||||
person_complaints.append(" description(s) needs writing")
|
||||
description_needed = "Not present, needs doing."
|
||||
description_needed = "A description is indicated as being needed, so may need adding into this cave page."
|
||||
|
||||
|
||||
#QMS
|
||||
@ -170,12 +187,12 @@ for item in sorted(os.listdir(".")):
|
||||
|
||||
#Website
|
||||
if not data["website updated"]:
|
||||
complaints.append("The guidebook description on the website needs updating")
|
||||
complaints.append("The website is marked as needing updating (using the guidebook description)")
|
||||
website_needing_updating.add(item)
|
||||
|
||||
#Electronic Surveys
|
||||
if not data["electronic survey"]:
|
||||
complaints.append("Tunnel / Therion files need drawing")
|
||||
complaints.append("Tunnel / Therion drawing files need drawing")
|
||||
|
||||
if data["survex file"]:
|
||||
survex_description = data["survex file"]
|
||||
@ -183,7 +200,7 @@ for item in sorted(os.listdir(".")):
|
||||
survex_description = "Not specified"
|
||||
|
||||
wallet_index_file = open(os.path.join(item, "index.html"), "w")
|
||||
wallet_index_file.write(html_wallet_index % {"title": item,
|
||||
wallet_index_file.write(html_wallet_index % {"title": item, "year": year,
|
||||
"cave": data["cave"],
|
||||
"name": data["name"],
|
||||
"date": data["date"],
|
||||
@ -237,7 +254,7 @@ for person, person_wallets in people.items():
|
||||
person_summary = dict(person_summary)
|
||||
|
||||
year_index_file = open("index.html", "w")
|
||||
year_index_file.write(html_year_index % {"year": year, "persons": reduce(operator.add, [html_year_person % {"person": person,
|
||||
year_index_file.write(html_year_index % {"year": year, "timestamp": timestamp, "persons": reduce(operator.add, [html_year_person % {"person": person,
|
||||
"complaints": reduce(operator.add,
|
||||
[html_complaint_items % {"complaint": complaint,
|
||||
"count": count}
|
||||
@ -270,7 +287,7 @@ year_index_file.close()
|
||||
|
||||
for person, item_complaint_list in people.items():
|
||||
person_file = open(person + ".html", "w")
|
||||
person_file.write(html_person % {"person": person,
|
||||
person_file.write(html_person % {"person": person, "year": year, "timestamp": timestamp,
|
||||
"wallets": reduce(operator.add, [html_person_wallet_entry % {"walletname": wallet,
|
||||
"walletindex": urllib.quote(wallet) + "/index.html",
|
||||
"complaints": reduce(operator.add,
|
||||
|
Loading…
Reference in New Issue
Block a user