import os import re import sys import time from datetime import date, datetime from pathlib import Path from random import randint from django.conf import settings from django.template.defaultfilters import slugify from parsers.people import GetPersonExpeditionNameLookup, load_people_expos from troggle.core.models.caves import GetCaveLookup from troggle.core.models.logbooks import LogbookEntry, PersonLogEntry from troggle.core.models.troggle import DataIssue, Expedition from troggle.core.utils import get_process_memory """ Parses and imports logbooks in all their wonderful confusion See detailed explanation of the complete process: https://expo.survex.com/handbook/computing/logbooks-parsing.html """ todo = """ - Most of the time is during the database writing (6s out of 8s). - this is a slow and uncertain function too: cave = getCaveByReference(caveRef) - profile the code to find bad repetitive things, of which there are many. - attach or link a DataIssue to an individual expo (logbook) so that it can be found and deleted - replace explicit 1970 date with a constant EPOCH - rewrite to use generators rather than storing everything intermediate in lists - to reduce memory impact [low priority] - We should ensure logbook.html is utf-8 and stop this crap: file_in = open(logbookfile,'rb') txt = file_in.read().decode("latin1") - use Fixtures https://docs.djangoproject.com/en/4.1/ref/django-admin/#django-admin-loaddata to cache data for old logbooks? Not worth it.. """ MAX_LOGBOOK_ENTRY_TITLE_LENGTH = 200 BLOG_PARSER_SETTINGS = { # no default, must be explicit # "2022": ("ukcavingblog.html", "parser_blog"), # now folded in to logbooks.html # "2019": ("ukcavingblog.html", "parser_blog"), # now folded in to logbooks.html # "2018": ("ukcavingblog.html", "parser_blog"), # now folded in to logbooks.html # "2017": ("ukcavingblog.html", "parser_blog"), # now folded in to logbooks.html } DEFAULT_LOGBOOK_FILE = "logbook.html" DEFAULT_LOGBOOK_PARSER = "parser_html" # All years now (Jan.2023) use the default value for Logbook parser # dont forget to update expoweb/pubs.htm to match. 1982 left as reminder of expected format. LOGBOOK_PARSER_SETTINGS = { "1982": ("logbook.html", "parser_html"), } ENTRIES = { "2022": 90, "2019": 55, "2018": 95, "2017": 74, "2016": 86, "2015": 80, "2014": 67, "2013": 52, "2012": 76, "2011": 71, "2010": 22, "2009": 53, "2008": 49, "2007": 113, "2006": 60, "2005": 55, "2004": 76, "2003": 42, "2002": 31, "2001": 49, "2000": 54, "1999": 79, "1998": 43, "1997": 53, "1996": 95, "1995": 42, "1994": 32, "1993": 41, "1992": 62, "1991": 39, "1990": 87, "1989": 63, "1988": 61, "1987": 34, "1985": 24, "1984": 32, "1983": 52, "1982": 42, } logentries = [] # the entire logbook for one year is a single object: a list of entries noncaveplaces = ["travel", "Journey", "Loser Plateau", "UNKNOWN", "plateau", "base camp", "basecamp", "top camp", "topcamp"] def set_trip_id(year, seq): tid = f"{year}_s{seq:02d}" return tid rx_tripperson = re.compile(r"(?i)(.*?)$") rx_round_bracket = re.compile(r"[\(\[].*?[\)\]]") def GetTripPersons(trippeople, expedition, logtime_underground, tid=None): res = [] author = None # print(f'# {tid}') # print(f" - {tid} '{trippeople}' ") for tripperson in re.split(r",|\+|&|&(?!\w+;)| and ", trippeople): tripperson = tripperson.strip() # author_u = re.match(r"(?i)(.*?)$", tripperson) author_u = rx_tripperson.match(tripperson) if author_u: tripperson = author_u.group(1).strip() if tripperson: if tripperson[0] != "*": # a name prefix of "*" is special tripperson = re.sub(rx_round_bracket, "", tripperson).strip() # Whacky aliases all handled in GetPersonExpeditionNameLookup() personyear = GetPersonExpeditionNameLookup(expedition).get(tripperson.lower()) if not personyear: message = f" ! - {expedition.year} No name match for: '{tripperson}' in entry {tid=} for this expedition year." print(message) DataIssue.objects.create(parser="logbooks", message=message) res.append((personyear, logtime_underground)) if author_u: author = personyear else: # a person but with * prefix. Ignored everywhere. # print(f" ! - {expedition.year} * person : {tripperson}") pass if not author: if not res: return "", 0 author = res[-1][0] # the previous valid person and a time of 0 hours # print(f" - {tid} [{author.person}] '{res[0][0].person}'...") return res, author def tidy_time_underground(logtime_underground): # Nasty hack, must tidy this up.. if logtime_underground: try: logtime_underground = float(logtime_underground) except: # print(f"logtime_underground = {logtime_underground}") tu_match = re.match(r"(T/U:\s*)?(\d+[.]?\d*).*", logtime_underground) if tu_match: # print(f"logtime_underground = {tu_match.group(2)}") logtime_underground = float(tu_match.group(2)) else: logtime_underground = 0 else: logtime_underground = 0 return logtime_underground def tidy_trip_persons(trippeople, title, expedition, logtime_underground, tid): try: trippersons, author = GetTripPersons(trippeople, expedition, logtime_underground, tid=tid) # print(f" - {author} - {logtime_underground}") except: message = f" ! - {expedition.year} Skipping logentry: {title} - GetTripPersons FAIL" DataIssue.objects.create(parser="logbooks", message=message) print(message) # raise return if not author: message = f" ! - {expedition.year} Warning: logentry: {title} - no expo member author for entry '{tid}'" DataIssue.objects.create(parser="logbooks", message=message) print(message) return trippersons, author def tidy_trip_cave(place): # GetCaveLookup() need to work better. None of this data is *used* though? # 'tripcave' is converted to a string doing this, which renders as the cave slug. lplace = place.lower() cave = None if lplace not in noncaveplaces: cave = GetCaveLookup().get(lplace) return cave def tidy_trip_image_urls(text, date): y = str(date)[:4] text = text.replace(' src="', f' src="/years/{y}/') text = text.replace(" src='", f" src='/years/{y}/") text = text.replace(f' src="/years/{y}//years/{y}/', f' src="/years/{y}/') text = text.replace(f" src='/years/{y}//years/{y}/", f" src='/years/{y}/") text = text.replace("\t", "") text = text.replace("\n\n\n", "\n\n") return text def tidy_tid(tid, title): if tid is not None: return tid # print(f"! {title=} ") tid = str(randint(1000, 9999)) + "_" + slugify(title)[:10].replace("-", "_") return tid def store_entry_into_database(date, place, tripcave, title, text, trippersons, author, expedition, logtime_underground, tid): """saves a single logbook entry and related personlogentry items We could do a bulk update to save all the entries, but then we would need to do a query on each one to get the primary key to asign to the PersonLogEntries. So overall probably not much faster ? """ nonLookupAttribs = { "place": place, "text": text, "expedition": expedition, "time_underground": logtime_underground, "cave_slug": str(tripcave), "slug": tid, } lookupAttribs = {"date": date, "title": title} lbo = LogbookEntry.objects.create(**nonLookupAttribs, **lookupAttribs) pt_list = [] for tripperson, time_underground in trippersons: lookupAttribs = {"personexpedition": tripperson, "logbook_entry": lbo} # lbo is primary key nonLookupAttribs = {"time_underground": time_underground, "is_logbook_entry_author": (tripperson == author)} pt_list.append(PersonLogEntry(**nonLookupAttribs, **lookupAttribs)) PersonLogEntry.objects.bulk_create(pt_list) def parser_date(tripdate, year): """Interprets dates in the expo logbooks and returns a correct datetime.date object""" dummydate = date(1970, 1, 1) # replace with _EPOCH month = 1 day = 1 # message = f" ! - Trying to parse date in logbook: {tripdate} - {year}" # print(message) try: mdatestandard = re.match(r"(\d\d\d\d)-(\d\d)-(\d\d)", tripdate) mdategoof = re.match(r"(\d\d?)/0?(\d)/(20|19)?(\d\d)", tripdate) if mdatestandard: if not (mdatestandard.group(1) == year): message = f" ! - Bad date (year) in logbook: {tripdate} - {year}" DataIssue.objects.create(parser="logbooks", message=message) return dummydate else: year, month, day = int(mdatestandard.group(1)), int(mdatestandard.group(2)), int(mdatestandard.group(3)) elif mdategoof: if not (not mdategoof.group(3) or mdategoof.group(3) == year[:2]): message = " ! - Bad date mdategoof.group(3) in logbook: " + tripdate + " - " + mdategoof.group(3) DataIssue.objects.create(parser="logbooks", message=message) return dummydate else: yadd = int(year[:2]) * 100 day, month, year = int(mdategoof.group(1)), int(mdategoof.group(2)), int(mdategoof.group(4)) + yadd else: year = 1970 # replace with _EPOCH message = f" ! - Bad date in logbook: {tripdate} - {year}" DataIssue.objects.create(parser="logbooks", message=message) return date(year, month, day) except: message = f" ! - Failed to parse date in logbook: {tripdate} - {year}" DataIssue.objects.create(parser="logbooks", message=message) return datetime.date(1970, 1, 1) # replace with _EPOCH def parser_html(year, expedition, txt, seq=""): """This uses some of the more obscure capabilities of regular expressions, see https://docs.python.org/3/library/re.html You can't see it here, but a round-trip export-then-import will move the endmatter up to the frontmatter. This made sense when translating from parser_html_01 format logfiles, believe me. """ logentries = [] dupl = {} # extract front material and stash for later use when rebuilding from list of entries headmatch = re.match(r"(?i)(?s).*]*>(.*?) 0: frontpath = Path(settings.EXPOWEB, "years", year, "frontmatter.html") with open(frontpath, "w") as front: front.write(headpara + "\n") # extract END material and stash for later use when rebuilding from list of entries endmatch = re.match(r"(?i)(?s).*([\s\S]*?)(?= 0: endpath = Path(settings.EXPOWEB, "years", year, "endmatter.html") with open(endpath, "w") as end: end.write(endpara + "\n") tripparas = re.findall(r"([\s\S]*?)(?=.*?\s*

)? # second date \s*(?:\s*)? \s*(.*?)(?:

)? \s*\s*(.*?) \s*\s*(.*?) ([\s\S]*?) \s*(?:\s*(.*?))? \s*$ """, trippara, ) if s: tripid, tripid1, tripdate, trippeople, triptitle, triptext, tu = s.groups() else: # allow title and people to be swapped in order msg = f" !- {year} Can't parse:{logbook_entry_count} '{trippara[:50]}'..." print(msg) DataIssue.objects.create(parser="logbooks", message=msg) s2 = re.match( r"""(?x)(?:\s*.*?\s*

)? # second date \s*(?:\s*)? \s*(.*?)(?:

)? \s*\s*(.*?) \s*\s*(.*?) ([\s\S]*?) \s*(?:\s*(.*?))? \s*$ """, trippara, ) if s2: tripid, tripid1, tripdate, triptitle, trippeople, triptext, tu = s2.groups() else: # if not re.search(r"Rigging Guide", trippara): msg = f" !- Logbook. Can't parse entry on 2nd pass:{logbook_entry_count} '{trippara[:50]}'..." print(msg) DataIssue.objects.create(parser="logbooks", message=msg) continue ldate = parser_date(tripdate.strip(), year) triptitles = triptitle.split(" - ") if len(triptitles) >= 2: place = triptitles[0] else: place = "UNKNOWN" tripcontent = re.sub(r"

", "", triptext) tripcontent = re.sub(r"

", "

", tripcontent).strip() triptitle = triptitle.strip() # triptitle must be unique for a given date. We fix this here. check = (ldate, triptitle) if check in dupl: dupl[check] += 1 triptitle = f"{triptitle} #{dupl[check]}" print(f" - {triptitle} -- {ldate}") else: dupl[check] = 1 tu = tidy_time_underground(tu) trippersons, author = tidy_trip_persons(trippeople, triptitle, expedition, tu, tid) tripcave = tidy_trip_cave(place) tripcontent = tidy_trip_image_urls(tripcontent, ldate) tid = tidy_tid(tid, triptitle) entrytuple = (ldate, place, tripcave, triptitle, tripcontent, trippersons, author, expedition, tu, tid) logentries.append(entrytuple) return logentries def parser_blog(year, expedition, txt, sq=""): """Parses the format of web pages collected as 'Save As HTML" from the UK Caving blog website. Note that the entries have dates and authors, but no titles. See detailed explanation of the complete process: https://expo.survex.com/handbook/computing/logbooks-parsing.html https://expo.survex.com/handbook/computing/log-blog-parsing.html This uses some of the more obscure capabilities of regular expressions, see https://docs.python.org/3/library/re.html BLOG entries have this structure:

So the content is nested inside the header. Attachments (images) come after the content. It's a bugger, but it's out of our control. """ logentries = [] tripheads = re.findall( r"
\s*([\s\S]*?)(]*>)([\s\S]*?)(?=[\s\S]*?(?=)", "", attach) attach = re.sub(r")", "", attach) tripcontent = tripstuff[0] + attach # print(f"{i} - {len(tripstuff)} - {tripstuff[1]}") triphead = tripheads[i] logbook_entry_count += 1 tid = set_trip_id(year, logbook_entry_count) + "_blog" + sq # print(f" - tid: {tid}") # data-author="tcacrossley" match_author = re.search(r".*data-author=\"([^\"]*)\" data-content=.*", triphead) if not (match_author): message = f" ! - Skipping logentry {year}:{logbook_entry_count} on failure to parse data-author {tid} {triphead[:400]}..." DataIssue.objects.create(parser="logbooks", message=message) print(message) break trippeople = match_author.group(1) # print(f" - tid: {tid} {trippeople}") # datetime="2019-07-11T13:16:18+0100" match_datetime = re.search(r".*datetime=\"([^\"]*)\" data-time=.*", triphead) if not (match_datetime): message = f" ! - Skipping logentry {year}:{logbook_entry_count} on failure to parse datetime {tid} {triphead[:400]}..." DataIssue.objects.create(parser="logbooks", message=message) print(message) break datestamp = match_datetime.group(1) try: tripdate = datetime.fromisoformat(datestamp) except: message = f" ! - FROMISOFORMAT fail logentry {year}:{logbook_entry_count} {tid} '{datestamp}'" DataIssue.objects.create(parser="logbooks", message=message) print(message) # fallback, ignore the timestamp bits: tripdate = datetime.fromisoformat(datestamp[0:10]) # print(f" - tid: {tid} '{trippeople}' '{tripdate}'") # triptitle must have the location then a hyphen at the beginning as it is ignored by export function. We can't know what this is, so we set it as 'Expo' and 'Unknown'. place = "Unknown" # triptitle must be unique for a given date. We can enforce this here. triptitle = f"Expo - UK Caving Blog{sq} post {logbook_entry_count}" tripcontent = re.sub(r"(width=\"\d+\")", "", tripcontent) tripcontent = re.sub(r"height=\"\d+\"", "", tripcontent) tripcontent = re.sub(r"width: \d+px", "", tripcontent) tripcontent = re.sub(r"\n\n+", "\n\n", tripcontent) tripcontent = re.sub(r"", "", tripcontent) tripcontent = f"\n\n\nBlog Author: {trippeople}" + tripcontent trippersons, author = tidy_trip_persons(trippeople, expedition, logtime_underground, tid) tripcave = tidy_trip_cave(place) tripcontent = tidy_trip_image_urls(tripcontent, date) tid = tidy_tid(tid, triptitle) entrytuple = (tripdate, place, tripcave, triptitle, tripcontent, trippersons, author, expedition, tu, tid) logentries.append(entrytuple) return logentries def clean_all_logbooks(): DataIssue.objects.filter(parser="logbooks").delete() LogbookEntry.objects.all().delete() def clean_logbook_for_expedition(expedition): """Only used when loading a single logbook. Deletes database LogBookEntries and DataIssues for this expedition year. """ lbes = LogbookEntry.objects.filter(expedition=expedition).delete() dataissues = DataIssue.objects.filter(parser="logbooks") for di in dataissues: ph = expedition.year if re.search(ph, di.message) is not None: # SLOW just to delete issues for one year # print(f' - CLEANING dataissue {di.message}') di.delete() def parse_logbook_for_expedition(expedition, blog=False): """Parses all logbook entries for one expedition """ global ENTRIES logentries = [] logbook_parseable = False expologbase = Path(settings.EXPOWEB, "years") year = expedition.year expect = ENTRIES[year] # print(" - Logbook for: " + year) if year in LOGBOOK_PARSER_SETTINGS: yearfile, parsefunc = LOGBOOK_PARSER_SETTINGS[year] expedition.logbookfile = yearfile # don't change this if a blog else: yearfile = DEFAULT_LOGBOOK_FILE expedition.logbookfile = DEFAULT_LOGBOOK_FILE # don't change this if a blog parsefunc = DEFAULT_LOGBOOK_PARSER if blog: print(f" - BLOG file {yearfile} using parser {parsefunc}") if year not in BLOG_PARSER_SETTINGS: message = f" ! - Expecting blog parser buut none specified for {year}" DataIssue.objects.create(parser="logbooks", message=message) print(message) else: yearfile, parsefunc = BLOG_PARSER_SETTINGS[year] logbookpath = Path(yearfile) # print(f" - Logbook file {yearfile} using parser {parsefunc}") # expedition.save() for sq in ["", "2", "3", "4"]: # cope with blog saved as many separate files lb = Path(expologbase, year, logbookpath.stem + sq + logbookpath.suffix) if not (lb.is_file()): # print(f" ! End of blog. Next blog file in sequence not there:{lb}") break try: with open(lb, "rb") as file_in: txt = file_in.read().decode("utf-8") logbook_parseable = True except (IOError): logbook_parseable = False print(f" ! Couldn't open logbook as UTF-8 {lb}") except: logbook_parseable = False print(f" ! Very Bad Error opening {lb}") if logbook_parseable: # -------------------- parser = globals()[parsefunc] print(f" - {year} parsing with {parsefunc} - {lb}") logentries = parser(year, expedition, txt, sq) # this launches the right parser # -------------------- if len(logentries) == expect: # print(f"OK {year} {len(logentries):5d} is {expect}\n") pass else: print(f"Mismatch in number of log entries: {year} {len(logentries):5d} is not {expect}\n") return logentries def LoadLogbook(year): """One off logbook for testing purposes, and also reloadable on '/expedition/2022?reload' This is inside an atomic transaction""" expo = Expedition.objects.get(year=year) year = expo.year # some type funny clean_logbook_for_expedition(expo) logentries = [] logentries = parse_logbook_for_expedition(expo) # this actually loads the logbook for one expo if year in BLOG_PARSER_SETTINGS: logentries += parse_logbook_for_expedition(expo, blog=True) # this loads the blog logbook else: print( f" - Not a year with extant blog entries to import: '{year}' not in BLOG_PARSER_SETTINGS {BLOG_PARSER_SETTINGS}" ) for entrytuple in logentries: date, place, tripcave, triptitle, text, trippersons, author, expedition, tu, tid = entrytuple if expo == expedition: # unneeded check, we zeroed it before filling it #print(f" - {triptitle}") store_entry_into_database(date, place, tripcave, triptitle, text, trippersons, author, expedition, tu, tid) else: print(f" ! unexpected log entry labelled as '{expedition}' {tid}" ) expo.save() # to save logbook name property def LoadLogbooks(): """This is the master function for parsing all logbooks into the Troggle database. This should be rewritten to use coroutines to load all logbooks from disc in parallel, but must be serialised to write to database as sqlite is single-user. This is inside an atomic transaction. Maybe it shouldn't be.. """ global ENTRIES global logentries allentries = [] mem1 = get_process_memory() print(f" - MEM:{mem1:7.2f} MB now ", file=sys.stderr) start = time.time() clean_all_logbooks() expos = Expedition.objects.all() if len(expos) <= 1: message = " ! - No expeditions found. Attempting to 'people' first" DataIssue.objects.create(parser="logbooks", message=message) print(message) load_people_expos() expos = Expedition.objects.all() if len(expos) <= 1: message = " ! - No expeditions found, even after attempting to load 'people'. Abort." DataIssue.objects.create(parser="logbooks", message=message) print(message) return noexpo = [ "1986", "2020", "2021", ] # no expo lostlogbook = ["1976", "1977", "1978", "1979", "1980", "1981"] sqlfail = [""] # breaks mysql with db constraint fail - all now fixed.] nologbook = noexpo + lostlogbook + sqlfail nlbe = {} loglist = [] bloglist = [] for expo in expos: year = expo.year if year in sqlfail: print(" - Logbook for: " + year + " NO parsing attempted - known sql failures") message = f" ! - Not even attempting to parse logbook for {year} until code fixed" DataIssue.objects.create(parser="logbooks", message=message) print(message) if year not in nologbook: if year in ENTRIES: loglist.append(expo) else: print(" - No Logbook yet for: " + year) # catch case when preparing for next expo if year in BLOG_PARSER_SETTINGS: bloglist.append(expo) for ex in loglist: logentries = parse_logbook_for_expedition(ex) # this loads the logbook for one expo allentries += logentries for b in bloglist: print(f" - BLOG: {b}") logentries = parse_logbook_for_expedition(b, blog=True) # loads the blog logbook for one expo allentries += logentries print(f"total {len(allentries):,} log entries parsed in all expeditions") mem = get_process_memory() print(f" - MEM: {mem:7.2f} MB in use, {mem-mem1:7.2f} MB more", file=sys.stderr) duration = time.time() - start print(f" - TIME: {duration:7.2f} s", file=sys.stderr) # Now we serially store the parsed data in the database, updating 3 types of object: # - Expedition (the 'logbook.html' value) # - LogBookEntry (text, who when etc.) # - PersonLogEntry (who was on that specific trip mentione din the logbook entry) for entrytuple in allentries: date, place, tripcave, triptitle, text, trippersons, author, expedition, tu, tid = entrytuple store_entry_into_database(date, place, tripcave, triptitle, text, trippersons, author, expedition, tu, tid) for expo in expos: expo.save() # to save logbook name property mem = get_process_memory() print(f" - MEM: {mem:7.2f} MB in use, {mem-mem1:7.2f} MB more", file=sys.stderr) duration = time.time() - start print(f" - TIME: {duration:7.2f} s", file=sys.stderr) # dateRegex = re.compile(r'(\d\d\d\d)-(\d\d)-(\d\d)', re.S) # expeditionYearRegex = re.compile(r'(.*?)', re.S) # titleRegex = re.compile(r'

(.*?)

', re.S) # reportRegex = re.compile(r'(.*)\s*', re.S) # personRegex = re.compile(r'(.*?)', re.S) # nameAuthorRegex = re.compile(r'(.*?)', re.S) # TURegex = re.compile(r'([0-9]*\.?[0-9]+)', re.S) # locationRegex = re.compile(r'(.*?)', re.S) # caveRegex = re.compile(r'(.*?)', re.S)