import csv
import os
import re
# import pickle
# import shelve
import time
from random import randint
from datetime import datetime, date
from pathlib import Path
from django.conf import settings
from django.template.defaultfilters import slugify
from django.utils.timezone import get_current_timezone, make_aware
from troggle.core.models.troggle import DataIssue, Expedition
from troggle.core.utils import TROG, save_carefully
from troggle.core.models.caves import Cave, LogbookEntry, PersonTrip, GetCaveLookup
from parsers.people import GetPersonExpeditionNameLookup
'''
Parses and imports logbooks in all their wonderful confusion
# When we edit logbook entries, allow a "?" after any piece of data to say we've frigged it and
# it can be checked up later from the hard-copy if necessary; or it's not possible to determin (name, trip place, etc)
'''
todo='''
- refactor everything with some urgency, esp. LoadLogbookForExpedition()
- remove the TROG and lbo things since we need the database for multiuser access? Or not?
- profile the code to find bad repetitive things, of which there are many.
- far too many uses of Django field dereferencing to get values, which is SLOW
- Logbooks 1987, 1988, 1989 all crash on MySql - but not sqlite - with db constraint fail. Edit logbook to fix.
- import/parse/re-export-as-html the 'artisanal-format' old logbooks so that
we keep only a modern HTML05 format. Then we can retire the old parsers and reduce the
volume of code here substantially.
- rewrite to use generators rather than storing everything intermediate in lists - to reduce memory impact.
- We should ensure logbook.html is utf-8 and stop this crap:
file_in = open(logbookfile,'rb')
txt = file_in.read().decode("latin1")
- this is a slow and uncertain function: cave = getCaveByReference(caveRef)
- use Fixtures https://docs.djangoproject.com/en/4.1/ref/django-admin/#django-admin-loaddata to cache
data for old logbooks. New design needed, with a mechanism for flagging fixtures as outdated after edits.
'''
MAX_LOGBOOK_ENTRY_TITLE_LENGTH = 200
BLOG_PARSER_SETTINGS = {
# "2017": ("ukcavingblog.html", "parser_blog"), # now folded in to logbooks.html
"2018": ("ukcavingblog.html", "parser_blog"),
"2019": ("ukcavingblog.html", "parser_blog"),
"2022": ("ukcavingblog.html", "parser_blog"),
}
DEFAULT_LOGBOOK_FILE = "logbook.html"
DEFAULT_LOGBOOK_PARSER = "parser_html"
# All years since 2010 use the default value for Logbook parser
LOGBOOK_PARSER_SETTINGS = {
"2019": ("logbook.html", "parser_html"),
"2010": ("logbook.html", "parser_html"),
# "2009": ("2009logbook.txt", "wiki_parser"), # converted to html
# "2008": ("2008logbook.txt", "wiki_parser"), # converted to html
"2009": ("logbook.html", "parser_html"),
"2008": ("logbook.html", "parser_html"),
"2007": ("logbook.html", "parser_html"),
"2006": ("logbook.html", "parser_html"),
# "2006": ("logbook/logbook_06.txt", "wiki_parser"), # converted to html
"2006": ("logbook.html", "parser_html"),
"2005": ("logbook.html", "parser_html"),
"2004": ("logbook.html", "parser_html"),
"2003": ("logbook.html", "parser_html"),
"2002": ("logbook.html", "parser_html"),
"2001": ("log.htm", "parser_html_01"),
"2000": ("log.htm", "parser_html_01"),
"1999": ("log.htm", "parser_html_01"),
"1998": ("log.htm", "parser_html_01"),
"1997": ("log.htm", "parser_html_01"),
"1996": ("log.htm", "parser_html_01"),
"1995": ("log.htm", "parser_html_01"),
"1994": ("log.htm", "parser_html_01"),
"1993": ("log.htm", "parser_html_01"),
"1992": ("log.htm", "parser_html_01"),
"1991": ("log.htm", "parser_html_01"),
"1990": ("log.htm", "parser_html_01"),
"1989": ("log.htm", "parser_html_01"), #crashes MySQL
"1988": ("log.htm", "parser_html_01"), #crashes MySQL
"1987": ("log.htm", "parser_html_01"), #crashes MySQL
"1985": ("log.htm", "parser_html_01"),
"1984": ("log.htm", "parser_html_01"),
"1983": ("log.htm", "parser_html_01"),
"1982": ("log.htm", "parser_html_01"),
}
entries = { "2022": 64, "2019": 56, "2018": 75, "2017": 76, "2016": 81, "2015": 79,
"2014": 65, "2013": 51, "2012": 75, "2011": 68, "2010": 22, "2009": 53,
"2008": 49, "2007": 113, "2006": 60, "2005": 55, "2004": 76, "2003": 42, "2002": 31,
"2001": 48, "2000": 54, "1999": 79, "1998": 43, "1997": 53, "1996": 95, "1995": 42,
"1994": 32, "1993": 41, "1992": 62, "1991": 39, "1990": 87, "1989": 1,"1988": 1,"1987": 1,
"1985": 24, "1984": 32, "1983": 52, "1982": 42,}
# Logbooks log.htm exist for 1983, 84, 85, 87, 88, 89 but have no full-working parser, or need hand-editing.
logentries = [] # the entire logbook for one year is a single object: a list of entries
noncaveplaces = [ "Journey", "Loser Plateau", "UNKNOWN", 'plateau',
'base camp', 'basecamp', 'top camp', 'topcamp' ]
logdataissues = TROG['issues']['logdataissues']
trips ={}
#
# the logbook loading section
#
def set_trip_id(year, seq):
tid= f"{year}_s{seq:02d}"
return tid
rx_tripperson = re.compile(r'(?i)(.*?)$')
rx_round_bracket = re.compile(r"[\(\[].*?[\)\]]")
def GetTripPersons(trippeople, expedition, logtime_underground, tid=None):
res = [ ]
author = None
# print(f'# {tid}')
# print(f" - {tid} '{trippeople}' ")
for tripperson in re.split(r",|\+|&|&(?!\w+;)| and ", trippeople):
tripperson = tripperson.strip()
# mul = re.match(r"(?i)(.*?)$", tripperson)
mul = rx_tripperson.match(tripperson)
if mul:
tripperson = mul.group(1).strip()
if tripperson and tripperson[0] != '*':
tripperson = re.sub(rx_round_bracket, "", tripperson).strip()
if tripperson =="Wiggy":
tripperson = "Phil Wigglesworth"
if tripperson =="Animal":
tripperson = "Mike Richardson"
if tripperson =="MikeTA":
tripperson = "Mike Richardson"
if tripperson =="CavingPig":
tripperson = "Elaine Oliver"
if tripperson =="nobrotson":
tripperson = "Rob Watson"
if tripperson =="Tinywoman":
tripperson = "Nadia"
personyear = GetPersonExpeditionNameLookup(expedition).get(tripperson.lower())
if not personyear:
message = f" ! - {expedition.year} No name match for: '{tripperson}' in entry {tid=} for this expedition year."
print(message)
DataIssue.objects.create(parser='logbooks', message=message)
logdataissues[tid]=message
res.append((personyear, logtime_underground))
if mul:
author = personyear
if not author:
if not res:
return "", 0
author = res[-1][0] # the previous valid person and a time of 0 hours
#print(f" - {tid} [{author.person}] '{res[0][0].person}'...")
return res, author
def EnterLogIntoDbase(date, place, title, text, trippeople, expedition, logtime_underground, tid=None):
""" saves a logbook entry and related persontrips
Does NOT save the expeditionday_id - all NULLs. why? Because we are deprecating expeditionday !
troggle.log shows that we are creating lots of duplicates, which is no no problem with SQL as they just overwrite but we are saving the same thing too many times..
Until 18 Dec.2022, this was overwriting logbook entries for the same date with the same title, because
lookupAttribs={'date':date, 'title':title}
"""
# Nasty hack, must tidy this up..
if logtime_underground:
try:
logtime_underground = float(logtime_underground)
except:
# print(f"logtime_underground = {logtime_underground}")
tu_match = re.match(r"(T/U:\s*)?(\d+[.]?\d*).*", logtime_underground)
if tu_match:
# print(f"logtime_underground = {tu_match.group(2)}")
logtime_underground = float(tu_match.group(2))
else:
logtime_underground = 0
else:
logtime_underground = 0
try:
trippersons, author = GetTripPersons(trippeople, expedition, logtime_underground, tid=tid)
# print(f" - {author} - {logtime_underground}")
except:
message = f" ! - {expedition.year} Skipping logentry: {title} - GetTripPersons FAIL"
DataIssue.objects.create(parser='logbooks', message=message)
logdataissues["title"]=message
print(message)
raise
return
if not author:
message = f" ! - {expedition.year} Warning: logentry: {title} - no expo member author for entry '{tid}'"
DataIssue.objects.create(parser='logbooks', message=message)
logdataissues["title"]=message
print(message)
#return
# This needs attention. The slug field is derived from 'title'
# both GetCaveLookup() and GetTripCave() need to work together better. None of this data is *used* though?
#tripCave = GetTripCave(place):
lplace = place.lower()
cave=None
if lplace not in noncaveplaces:
cave = GetCaveLookup().get(lplace)
y = str(date)[:4]
text = text.replace(' src="', f' src="/years/{y}/' )
text = text.replace(" src='", f" src='/years/{y}/" )
text = text.replace(f' src="/years/{y}//years/{y}/', f' src="/years/{y}/' )
text = text.replace(f" src='/years/{y}//years/{y}/", f" src='/years/{y}/" )
text = text.replace('\t', '' )
text = text.replace('\n\n\n', '\n\n' )
#Check for an existing copy of the current entry, and save
expeditionday = expedition.get_expedition_day(date)
lookupAttribs={'date':date, 'title':title}
# 'cave' is converted to a string doing this, which renders as the cave slug.
# but it is a db query which we should try to avoid - rewrite this
#NEW slug for a logbook entry here! Unique id + slugified title fragment
if tid is not None:
slug = tid
# slug = tid + "_" + slugify(title)[:10].replace('-','_')
else:
slug = str(randint(1000,9999)) + "_" + slugify(title)[:10].replace('-','_')
nonLookupAttribs={'place':place, 'text':text, 'expedition':expedition,
'time_underground':logtime_underground, 'cave_slug':str(cave), 'slug': slug}
# This creates the lbo instance of LogbookEntry
lbo, created=save_carefully(LogbookEntry, lookupAttribs, nonLookupAttribs)
# for PersonTrip time_underground is float (decimal hours)
for tripperson, time_underground in trippersons:
# print(f" - {tid} '{tripperson}' author:{tripperson == author}")
lookupAttribs={'personexpedition':tripperson, 'logbook_entry':lbo}
nonLookupAttribs={'time_underground':time_underground, 'is_logbook_entry_author':(tripperson == author)}
# this creates the PersonTrip instance.
save_carefully(PersonTrip, lookupAttribs, nonLookupAttribs)
def ParseDate(tripdate, year):
""" Interprets dates in the expo logbooks and returns a correct datetime.date object """
dummydate = date(1970, 1, 1)
month = 1
day = 1
# message = f" ! - Trying to parse date in logbook: {tripdate} - {year}"
# print(message)
try:
mdatestandard = re.match(r"(\d\d\d\d)-(\d\d)-(\d\d)", tripdate)
mdategoof = re.match(r"(\d\d?)/0?(\d)/(20|19)?(\d\d)", tripdate)
if mdatestandard:
if not (mdatestandard.group(1) == year):
message = f" ! - Bad date (year) in logbook: {tripdate} - {year}"
DataIssue.objects.create(parser='logbooks', message=message)
logdataissues["tripdate"]=message
return dummydate
else:
year, month, day = int(mdatestandard.group(1)), int(mdatestandard.group(2)), int(mdatestandard.group(3))
elif mdategoof:
if not (not mdategoof.group(3) or mdategoof.group(3) == year[:2]):
message = " ! - Bad date mdategoof.group(3) in logbook: " + tripdate + " - " + mdategoof.group(3)
DataIssue.objects.create(parser='logbooks', message=message)
logdataissues["tripdate"]=message
return dummydate
else:
yadd = int(year[:2]) * 100
day, month, year = int(mdategoof.group(1)), int(mdategoof.group(2)), int(mdategoof.group(4)) + yadd
else:
year = 1970
message = f" ! - Bad date in logbook: {tripdate} - {year}"
DataIssue.objects.create(parser='logbooks', message=message)
logdataissues["tripdate"]=message
return date(year, month, day)
except:
message = f" ! - Failed to parse date in logbook: {tripdate} - {year}"
DataIssue.objects.create(parser='logbooks', message=message)
logdataissues["tripdate"]=message
return datetime.date(1970, 1, 1)
# (2006 - not any more), 2008 - 2009
def wiki_parser(year, expedition, txt, seq=""):
global logentries
global logdataissues
logbook_entry_count = 0
trippara = re.findall(r"===(.*?)===([\s\S]*?)(?====)", txt)
for triphead, triptext in trippara:
logbook_entry_count += 1
tid = set_trip_id(year,logbook_entry_count)
tripheadp = triphead.split("|")
if not (len(tripheadp) == 3):
message = " ! - Bad no of items in tripdate in logbook: " + tripdate + " - " + tripheadp
DataIssue.objects.create(parser='logbooks', message=message)
logdataissues["tripdate"]=message
tripdate, tripplace, trippeople = tripheadp
tripsplace = tripplace.split(" - ")
tripcave = tripsplace[0].strip()
if len(tripsplace) == 1:
tripsplace = tripsplace[0]
else:
tripsplace = tripsplace[1]
#tul = re.findall(r"T/?U:?\s*(\d+(?:\.\d*)?|unknown)\s*(hrs|hours)?", triptext)
tul = re.findall(r"T/U:?\s*(\d+[.]?\d*)\s*(hr|hrs|hours)?.*", triptext)
if tul:
tu = tul[0][0]
else:
tu = ""
print(f"! LOGBOOK {year} {logbook_entry_count:2} {len(triptext):4} T/U:{tu} '{tripcave} - {tripsplace}' ")
ldate = ParseDate(tripdate.strip(), year)
tripid = set_trip_id(year,logbook_entry_count)
ltriptext = re.sub(r"\n", "
\n", triptext)
ltriptext = ltriptext.replace("
\n
\n","
\n")
triptitle = f'{tripcave} - {tripsplace}'
entrytuple = (ldate, tripcave, triptitle, ltriptext,
trippeople, expedition, tu, tripid)
logentries.append(entrytuple)
# 2002, 2004 - now
def parser_html(year, expedition, txt, seq=""):
global logentries
global logdataissues
# extract front material and stash for later use when rebuilding from list of entries
headmatch = re.match(r"(?i)(?s).*
)? # second date \s*(?:\s*)? \s*
)? \s*
)? # second date \s*(?:\s*)? \s*
)? \s*
", "
", ltriptext).strip()
entrytuple = (ldate, tripcave, triptitle, ltriptext,
trippeople, expedition, tu, tripid1)
logentries.append(entrytuple)
# main parser for 1991 - 2001. simpler because the data has been hacked so much to fit it
# trying it out for years 1982 - 1990 too. Some logbook editing required by hand.. place
def parser_html_01(year, expedition, txt, seq=""):
global logentries
global logdataissues
errorcount = 0
# extract front material and stash for later use when rebuilding from list of entries
headmatch = re.match(r"(?i)(?s).*
]*>(T/?U.*)', triptext) if mtu: tu = mtu.group(1) triptext = triptext[:mtu.start(0)] + triptext[mtu.end():] else: tu = "" triptitles = triptitle.split(" - ") tripcave = triptitles[0].strip() ltriptext = triptext mtail = re.search(r'(?:[^<]*|\s|/|-|&|?p>|\((?:same day|\d+)\))*$', ltriptext) if mtail: ltriptext = ltriptext[:mtail.start(0)] ltriptext = re.sub(r"
", "", ltriptext) ltriptext = re.sub(r"\s*?\n\s*", " ", ltriptext) ltriptext = re.sub(r"?u>", "_", ltriptext) ltriptext = re.sub(r"?i>", "''", ltriptext) ltriptext = re.sub(r"?b>", "'''", ltriptext) ltriptext = re.sub(r"", "
", ltriptext).strip()
if ltriptext == "":
message = " ! - Zero content for logbook entry!: " + tid
DataIssue.objects.create(parser='logbooks', message=message)
logdataissues[tid]=message
print(message)
entrytuple = (ldate, tripcave, triptitle, ltriptext,
trippeople, expedition, tu, tid)
logentries.append(entrytuple)
except:
message = f" ! - Skipping logentry {year} due to exception in: {tid}"
DataIssue.objects.create(parser='logbooks', message=message)
logdataissues[tid]=message
print(message)
errorcount += 1
raise
if errorcount >5 :
message = f" !!- TOO MANY ERRORS - aborting at '{tid}' logbook: {year}"
DataIssue.objects.create(parser='logbooks', message=message)
logdataissues[tid]=message
print(message)
return
def parser_blog(year, expedition, txt, sq=""):
'''Parses the format of web pages collected as 'Save As HTML" from the UK Caving blog website.
Note that the entries have dates and authors, but no titles.
'''
global logentries
global logdataissues
errorcount = 0
tripheads = re.findall(r" (\d\d\d\d)-(\d\d)-(\d\d)', re.S)
# expeditionYearRegex = re.compile(r'(.*?)', re.S)
# titleRegex = re.compile(r'