troggle-unchained/parsers/logbooks.py
2023-07-13 21:36:48 +02:00

697 lines
28 KiB
Python

import os
import re
import sys
import time
from datetime import date, datetime
from pathlib import Path
from random import randint
from django.conf import settings
from django.template.defaultfilters import slugify
from parsers.people import GetPersonExpeditionNameLookup, load_people_expos
from troggle.core.models.caves import GetCaveLookup
from troggle.core.models.logbooks import LogbookEntry, PersonLogEntry
from troggle.core.models.troggle import DataIssue, Expedition
from troggle.core.utils import get_process_memory
"""
Parses and imports logbooks in all their wonderful confusion
See detailed explanation of the complete process:
https://expo.survex.com/handbook/computing/logbooks-parsing.html
"""
todo = """
- Most of the time is during the database writing (6s out of 8s).
- profile the code to find bad repetitive things, of which there are many.
- attach or link a DataIssue to an individual expo (logbook) so that it can be found and deleted
- replace explicit 1970 date with a constant EPOCH
- rewrite to use generators rather than storing everything intermediate in lists - to
reduce memory impact [low priority]
- We should ensure logbook.html is utf-8 and stop this crap:
file_in = open(logbookfile,'rb')
txt = file_in.read().decode("latin1")
- use Fixtures https://docs.djangoproject.com/en/dev/ref/django-admin/#django-admin-loaddata to cache
data for old logbooks? Not worth it..
"""
MAX_LOGBOOK_ENTRY_TITLE_LENGTH = 200
BLOG_PARSER_SETTINGS = { # no default, must be explicit
# "2022": ("ukcavingblog.html", "parser_blog"), # now folded in to logbooks.html
# "2019": ("ukcavingblog.html", "parser_blog"), # now folded in to logbooks.html
# "2018": ("ukcavingblog.html", "parser_blog"), # now folded in to logbooks.html
# "2017": ("ukcavingblog.html", "parser_blog"), # now folded in to logbooks.html
}
DEFAULT_LOGBOOK_FILE = "logbook.html"
DEFAULT_LOGBOOK_PARSER = "parser_html"
# All years now (Jan.2023) use the default value for Logbook parser
# dont forget to update expoweb/pubs.htm to match. 1982 left as reminder of expected format.
LOGBOOK_PARSER_SETTINGS = {
"1982": ("logbook.html", "parser_html"),
}
LOGBOOKS_DIR = "years" # subfolder of settings.EXPOWEB
ENTRIES = {
"2023": 27,
"2022": 90,
"2019": 55,
"2018": 95,
"2017": 74,
"2016": 86,
"2015": 80,
"2014": 67,
"2013": 52,
"2012": 76,
"2011": 71,
"2010": 22,
"2009": 53,
"2008": 49,
"2007": 113,
"2006": 60,
"2005": 55,
"2004": 76,
"2003": 42,
"2002": 31,
"2001": 49,
"2000": 54,
"1999": 79,
"1998": 43,
"1997": 53,
"1996": 95,
"1995": 42,
"1994": 32,
"1993": 41,
"1992": 62,
"1991": 39,
"1990": 87,
"1989": 63,
"1988": 61,
"1987": 34,
"1985": 24,
"1984": 32,
"1983": 52,
"1982": 42,
# "1979": 30, # to be hand-edited
"1978": 38,
}
# What about 1970s ! Yes, 80 and 81 are missing, so are 1976 and 1977.
logentries = [] # the entire logbook for one year is a single object: a list of entries
noncaveplaces = ["travel", "Journey", "Loser Plateau", "UNKNOWN", "plateau", "base camp", "basecamp", "top camp", "topcamp"]
def set_trip_id(year, seq):
tid = f"{year}_s{seq:02d}"
return tid
rx_tripperson = re.compile(r"(?i)<u>(.*?)</u>$")
rx_round_bracket = re.compile(r"[\(\[].*?[\)\]]")
def GetTripPersons(trippeople, expedition, logtime_underground, tid=None):
res = []
author = None
# print(f'# {tid}')
# print(f" - {tid} '{trippeople}' ")
for tripperson in re.split(r",|\+|&amp;|&(?!\w+;)| and ", trippeople):
tripperson = tripperson.strip()
# author_u = re.match(r"(?i)<u>(.*?)</u>$", tripperson)
author_u = rx_tripperson.match(tripperson)
if author_u:
tripperson = author_u.group(1).strip()
if tripperson:
if tripperson[0] != "*": # a name prefix of "*" is special
tripperson = re.sub(rx_round_bracket, "", tripperson).strip()
# Whacky aliases all handled in GetPersonExpeditionNameLookup()
personyear = GetPersonExpeditionNameLookup(expedition).get(tripperson.lower())
if not personyear:
message = f" ! - {expedition.year} No name match for: '{tripperson}' in entry {tid=} for this expedition year."
print(message)
DataIssue.objects.create(parser="logbooks", message=message)
res.append((personyear, logtime_underground))
if author_u:
author = personyear
else:
# a person but with * prefix. Ignored everywhere.
# print(f" ! - {expedition.year} * person : {tripperson}")
pass
if not author:
if not res:
return "", 0
author = res[-1][0] # the previous valid person and a time of 0 hours
# print(f" - {tid} [{author.person}] '{res[0][0].person}'...")
return res, author
def tidy_time_underground(logtime_underground):
# Nasty hack, must tidy this up..
if logtime_underground:
try:
logtime_underground = float(logtime_underground)
except:
# print(f"logtime_underground = {logtime_underground}")
tu_match = re.match(r"(T/U:\s*)?(\d+[.]?\d*).*", logtime_underground)
if tu_match:
# print(f"logtime_underground = {tu_match.group(2)}")
logtime_underground = float(tu_match.group(2))
else:
logtime_underground = 0
else:
logtime_underground = 0
return logtime_underground
def tidy_trip_persons(trippeople, title, expedition, logtime_underground, tid):
try:
trippersons, author = GetTripPersons(trippeople, expedition, logtime_underground, tid=tid)
# print(f" - {author} - {logtime_underground}")
except:
message = f" ! - {expedition.year} Skipping logentry: {title} - GetTripPersons FAIL"
DataIssue.objects.create(parser="logbooks", message=message)
print(message)
# raise
return
if not author:
message = f" ! - {expedition.year} Warning: logentry: {title} - no expo member author for entry '{tid}'"
DataIssue.objects.create(parser="logbooks", message=message)
print(message)
return trippersons, author
def tidy_trip_cave(place):
# GetCaveLookup() need to work better. None of this data is *used* though?
# 'tripcave' is converted to a string doing this, which renders as the cave slug.
lplace = place.lower()
cave = None
if lplace not in noncaveplaces:
cave = GetCaveLookup().get(lplace)
return cave
def tidy_trip_image_urls(text, date):
y = str(date)[:4]
text = text.replace(' src="', f' src="/years/{y}/')
text = text.replace(" src='", f" src='/years/{y}/")
text = text.replace(f' src="/years/{y}//years/{y}/', f' src="/years/{y}/')
text = text.replace(f" src='/years/{y}//years/{y}/", f" src='/years/{y}/")
text = text.replace("\t", "")
text = text.replace("\n\n\n", "\n\n")
return text
def tidy_tid(tid, title):
if tid is not None:
return tid
# print(f"! {title=} ")
tid = str(randint(1000, 9999)) + "_" + slugify(title)[:10].replace("-", "_")
return tid
def store_entry_into_database(date, place, tripcave, title, text, trippersons, author, expedition, logtime_underground, tid):
"""saves a single logbook entry and related personlogentry items
We could do a bulk update to save all the entries, but then we would need to do a query on
each one to get the primary key to asign to the PersonLogEntries. So overall probably not much
faster ?
"""
nonLookupAttribs = {
"place": place,
"text": text,
"expedition": expedition,
"time_underground": logtime_underground,
"cave_slug": str(tripcave),
"slug": tid,
}
lookupAttribs = {"date": date, "title": title}
lbo = LogbookEntry.objects.create(**nonLookupAttribs, **lookupAttribs)
pt_list = []
for tripperson, time_underground in trippersons:
lookupAttribs = {"personexpedition": tripperson, "logbook_entry": lbo} # lbo is primary key
nonLookupAttribs = {"time_underground": time_underground, "is_logbook_entry_author": (tripperson == author)}
pt_list.append(PersonLogEntry(**nonLookupAttribs, **lookupAttribs))
PersonLogEntry.objects.bulk_create(pt_list)
def parser_date(tripdate, year):
"""Interprets dates in the expo logbooks and returns a correct datetime.date object"""
dummydate = date(1970, 1, 1) # replace with _EPOCH
month = 1
day = 1
# message = f" ! - Trying to parse date in logbook: {tripdate} - {year}"
# print(message)
try:
mdatestandard = re.match(r"(\d\d\d\d)-(\d\d)-(\d\d)", tripdate)
mdategoof = re.match(r"(\d\d?)/0?(\d)/(20|19)?(\d\d)", tripdate)
if mdatestandard:
if not (mdatestandard.group(1) == year):
message = f" ! - Bad date (year) in logbook: {tripdate} - {year}"
DataIssue.objects.create(parser="logbooks", message=message)
return dummydate
else:
year, month, day = int(mdatestandard.group(1)), int(mdatestandard.group(2)), int(mdatestandard.group(3))
elif mdategoof:
if not (not mdategoof.group(3) or mdategoof.group(3) == year[:2]):
message = " ! - Bad date mdategoof.group(3) in logbook: " + tripdate + " - " + mdategoof.group(3)
DataIssue.objects.create(parser="logbooks", message=message)
return dummydate
else:
yadd = int(year[:2]) * 100
day, month, year = int(mdategoof.group(1)), int(mdategoof.group(2)), int(mdategoof.group(4)) + yadd
else:
year = 1970 # replace with _EPOCH
message = f" ! - Bad date in logbook: {tripdate} - {year}"
DataIssue.objects.create(parser="logbooks", message=message)
return date(year, month, day)
except:
message = f" ! - Failed to parse date in logbook: {tripdate} - {year}"
DataIssue.objects.create(parser="logbooks", message=message)
return datetime.date(1970, 1, 1) # replace with _EPOCH
def parser_html(year, expedition, txt, seq=""):
"""This uses some of the more obscure capabilities of regular expressions,
see https://docs.python.org/3/library/re.html
You can't see it here, but a round-trip export-then-import will move
the endmatter up to the frontmatter. This made sense when translating
from parser_html_01 format logfiles, believe me.
"""
logentries = []
dupl = {}
# extract front material and stash for later use when rebuilding from list of entries
headmatch = re.match(r"(?i)(?s).*<body[^>]*>(.*?)<hr.*", txt)
headpara = headmatch.groups()[0].strip()
#print(f" - headpara:\n'{headpara}'")
if len(headpara) > 0:
frontpath = Path(settings.EXPOWEB, LOGBOOKS_DIR, year, "frontmatter.html")
with open(frontpath, "w") as front:
front.write(headpara + "\n")
# extract END material and stash for later use when rebuilding from list of entries
endmatch = re.match(r"(?i)(?s).*<hr\s*/>([\s\S]*?)(?=</body)", txt)
endpara = endmatch.groups()[0].strip()
#print(f" - endpara:\n'{endpara}'")
if len(endpara) > 0:
endpath = Path(settings.EXPOWEB, LOGBOOKS_DIR, year, "endmatter.html")
with open(endpath, "w") as end:
end.write(endpara + "\n")
tripparas = re.findall(r"<hr\s*/>([\s\S]*?)(?=<hr)", txt)
logbook_entry_count = 0
for trippara in tripparas:
logbook_entry_count += 1
tid = set_trip_id(year, logbook_entry_count)
# print(f' - new tid:{tid} lbe count: {logbook_entry_count}')
s = re.match(
r"""(?x)(?:\s*<div\sclass="tripdate"\sid=".*?">.*?</div>\s*<p>)? # second date
\s*(?:<a\s+id="(.*?)"\s*/>\s*</a>)?
\s*<div\s+class="tripdate"\s*(?:id="(.*?)")?>(.*?)</div>(?:<p>)?
\s*<div\s+class="trippeople">\s*(.*?)</div>
\s*<div\s+class="triptitle">\s*(.*?)</div>
([\s\S]*?)
\s*(?:<div\s+class="timeug">\s*(.*?)</div>)?
\s*$
""",
trippara,
)
if s:
tripid, tripid1, tripdate, trippeople, triptitle, triptext, tu = s.groups()
else: # allow title and people to be swapped in order
msg = f" !- {year} Can't parse:{logbook_entry_count} '{trippara[:55]}'...'{trippara}'"
print(msg)
DataIssue.objects.create(parser="logbooks", message=msg)
s2 = re.match(
r"""(?x)(?:\s*<div\sclass="tripdate"\sid=".*?">.*?</div>\s*<p>)? # second date
\s*(?:<a\s+id="(.*?)"\s*/>\s*</a>)?
\s*<div\s+class="tripdate"\s*(?:id="(.*?)")?>(.*?)</div>(?:<p>)?
\s*<div\s+class="triptitle">\s*(.*?)</div>
\s*<div\s+class="trippeople">\s*(.*?)</div>
([\s\S]*?)
\s*(?:<div\s+class="timeug">\s*(.*?)</div>)?
\s*$
""",
trippara,
)
if s2:
tripid, tripid1, tripdate, triptitle, trippeople, triptext, tu = s2.groups()
else:
# if not re.search(r"Rigging Guide", trippara):
msg = f" !- Logbook. Can't parse entry on 2nd pass:{logbook_entry_count} '{trippara[:55]}'...'{trippara}'"
print(msg)
DataIssue.objects.create(parser="logbooks", message=msg)
continue
ldate = parser_date(tripdate.strip(), year)
triptitles = triptitle.split(" - ")
if len(triptitles) >= 2:
place = triptitles[0]
else:
place = "UNKNOWN"
tripcontent = re.sub(r"</p>", "", triptext)
tripcontent = re.sub(r"<p>", "<br /><br />", tripcontent).strip()
triptitle = triptitle.strip()
# triptitle must be unique for a given date. We fix this here.
check = (ldate, triptitle)
if check in dupl:
dupl[check] += 1
triptitle = f"{triptitle} #{dupl[check]}"
print(f" - {triptitle} -- {ldate}")
else:
dupl[check] = 1
tu = tidy_time_underground(tu)
trippersons, author = tidy_trip_persons(trippeople, triptitle, expedition, tu, tid)
tripcave = tidy_trip_cave(place)
tripcontent = tidy_trip_image_urls(tripcontent, ldate)
tid = tidy_tid(tid, triptitle)
entrytuple = (ldate, place, tripcave, triptitle, tripcontent, trippersons, author, expedition, tu, tid)
logentries.append(entrytuple)
return logentries
def parser_blog(year, expedition, txt, sq=""):
"""Parses the format of web pages collected as 'Save As HTML" from the UK Caving blog website.
Note that the entries have dates and authors, but no titles.
See detailed explanation of the complete process:
https://expo.survex.com/handbook/computing/logbooks-parsing.html
https://expo.survex.com/handbook/computing/log-blog-parsing.html
This uses some of the more obscure capabilities of regular expressions,
see https://docs.python.org/3/library/re.html
BLOG entries have this structure:
<article ... data-author="Tinywoman" data-content="post-298780" id="js-post-298780">
<article class="message-body js-selectToQuote">
</article>
</article>
So the content is nested inside the header. Attachments (images) come after the content.
It's a bugger, but it's out of our control.
"""
logentries = []
tripheads = re.findall(
r"<article class=\"message message--post js-post js-inlineModContainer\s*\"\s*([\s\S]*?)(?=</article)", txt
)
if not (tripheads):
message = f" ! - Skipping on failure to parse article header: {txt[:500]}"
print(message)
# (?= is a non-consuming match, see https://docs.python.org/3/library/re.html
tripparas = re.findall(
r"<article class=\"message-body js-selectToQuote\"\>\s*([\s\S]*?)(</article[^>]*>)([\s\S]*?)(?=</article)", txt
)
if not (tripparas):
message = f" ! - Skipping on failure to parse article content: {txt[:500]}"
print(message)
if len(tripheads) != len(tripparas):
print(f"{len(tripheads)} != {len(tripparas)}")
print(f"{len(tripheads)} - {len(tripparas)}")
location = "Plateau" # best guess, fix manually later
tu = 0 # no logged time underground in a blog entry
logbook_entry_count = 0
for i in range(0, len(tripparas)):
tripstuff = tripparas[i]
attach = tripstuff[2]
# note use on non-greedy *? regex idiom here
attach = re.sub(r"<div class=\"file-content\">[\s\S]*?(?=</li>)", "", attach)
attach = re.sub(r"<footer[\s\S]*(</footer>)", "", attach)
tripcontent = tripstuff[0] + attach
# print(f"{i} - {len(tripstuff)} - {tripstuff[1]}")
triphead = tripheads[i]
logbook_entry_count += 1
tid = set_trip_id(year, logbook_entry_count) + "_blog" + sq
# print(f" - tid: {tid}")
# data-author="tcacrossley"
match_author = re.search(r".*data-author=\"([^\"]*)\" data-content=.*", triphead)
if not (match_author):
message = f" ! - Skipping logentry {year}:{logbook_entry_count} on failure to parse data-author {tid} {triphead[:400]}..."
DataIssue.objects.create(parser="logbooks", message=message)
print(message)
break
trippeople = match_author.group(1)
# print(f" - tid: {tid} {trippeople}")
# datetime="2019-07-11T13:16:18+0100"
match_datetime = re.search(r".*datetime=\"([^\"]*)\" data-time=.*", triphead)
if not (match_datetime):
message = f" ! - Skipping logentry {year}:{logbook_entry_count} on failure to parse datetime {tid} {triphead[:400]}..."
DataIssue.objects.create(parser="logbooks", message=message)
print(message)
break
datestamp = match_datetime.group(1)
try:
tripdate = datetime.fromisoformat(datestamp)
except:
message = f" ! - FROMISOFORMAT fail logentry {year}:{logbook_entry_count} {tid} '{datestamp}'"
DataIssue.objects.create(parser="logbooks", message=message)
print(message)
# fallback, ignore the timestamp bits:
tripdate = datetime.fromisoformat(datestamp[0:10])
# print(f" - tid: {tid} '{trippeople}' '{tripdate}'")
# triptitle must have the location then a hyphen at the beginning as it is ignored by export function. We can't know what this is, so we set it as 'Expo' and 'Unknown'.
place = "Unknown"
# triptitle must be unique for a given date. We can enforce this here.
triptitle = f"Expo - UK Caving Blog{sq} post {logbook_entry_count}"
tripcontent = re.sub(r"(width=\"\d+\")", "", tripcontent)
tripcontent = re.sub(r"height=\"\d+\"", "", tripcontent)
tripcontent = re.sub(r"width: \d+px", "", tripcontent)
tripcontent = re.sub(r"\n\n+", "\n\n", tripcontent)
tripcontent = re.sub(r"<hr\s*>", "", tripcontent)
tripcontent = f"\n\n<!-- Content parsed from UK Caving Blog -->\nBlog Author: {trippeople}" + tripcontent
trippersons, author = tidy_trip_persons(trippeople, expedition, logtime_underground, tid)
tripcave = tidy_trip_cave(place)
tripcontent = tidy_trip_image_urls(tripcontent, date)
tid = tidy_tid(tid, triptitle)
entrytuple = (tripdate, place, tripcave, triptitle, tripcontent, trippersons, author, expedition, tu, tid)
logentries.append(entrytuple)
return logentries
def clean_all_logbooks():
DataIssue.objects.filter(parser="logbooks").delete()
LogbookEntry.objects.all().delete()
def clean_logbook_for_expedition(expedition):
"""Only used when loading a single logbook. Deletes database LogBookEntries and
DataIssues for this expedition year.
"""
lbes = LogbookEntry.objects.filter(expedition=expedition).delete()
dataissues = DataIssue.objects.filter(parser="logbooks")
for di in dataissues:
ph = expedition.year
if re.search(ph, di.message) is not None: # SLOW just to delete issues for one year
# print(f' - CLEANING dataissue {di.message}')
di.delete()
def parse_logbook_for_expedition(expedition, blog=False):
"""Parses all logbook entries for one expedition
"""
global ENTRIES
logentries = []
logbook_parseable = False
expologbase = Path(settings.EXPOWEB, LOGBOOKS_DIR)
year = expedition.year
expect = ENTRIES[year]
# print(" - Logbook for: " + year)
if year in LOGBOOK_PARSER_SETTINGS:
yearfile, parsefunc = LOGBOOK_PARSER_SETTINGS[year]
expedition.logbookfile = yearfile # don't change this if a blog
else:
yearfile = DEFAULT_LOGBOOK_FILE
expedition.logbookfile = DEFAULT_LOGBOOK_FILE # don't change this if a blog
parsefunc = DEFAULT_LOGBOOK_PARSER
if blog:
print(f" - BLOG file {yearfile} using parser {parsefunc}")
if year not in BLOG_PARSER_SETTINGS:
message = f" ! - Expecting blog parser buut none specified for {year}"
DataIssue.objects.create(parser="logbooks", message=message)
print(message)
else:
yearfile, parsefunc = BLOG_PARSER_SETTINGS[year]
logbookpath = Path(yearfile)
# print(f" - Logbook file {yearfile} using parser {parsefunc}")
# expedition.save()
for sq in ["", "2", "3", "4"]: # cope with blog saved as many separate files
lb = Path(expologbase, year, logbookpath.stem + sq + logbookpath.suffix)
if not (lb.is_file()):
# print(f" ! End of blog. Next blog file in sequence not there:{lb}")
break
try:
with open(lb, "rb") as file_in:
txt = file_in.read().decode("utf-8")
logbook_parseable = True
except (IOError):
logbook_parseable = False
print(f" ! Couldn't open logbook as UTF-8 {lb}")
except:
logbook_parseable = False
print(f" ! Very Bad Error opening {lb}")
if logbook_parseable:
# --------------------
parser = globals()[parsefunc]
print(f" - {year} parsing with {parsefunc} - {lb}")
logentries = parser(year, expedition, txt, sq) # this launches the right parser
# --------------------
if len(logentries) == expect:
# print(f"OK {year} {len(logentries):5d} is {expect}\n")
pass
else:
print(f"Mismatch in number of log entries: {year} {len(logentries):5d} is not {expect}\n")
return logentries
def LoadLogbook(year):
"""One off logbook for testing purposes, and also reloadable on '/expedition/2022?reload'
This is inside an atomic transaction"""
expo = Expedition.objects.get(year=year)
year = expo.year # some type funny
clean_logbook_for_expedition(expo)
logentries = []
logentries = parse_logbook_for_expedition(expo) # this actually loads the logbook for one expo
if year in BLOG_PARSER_SETTINGS:
logentries += parse_logbook_for_expedition(expo, blog=True) # this loads the blog logbook
else:
print(
f" - Not a year with extant blog entries to import: '{year}' not in BLOG_PARSER_SETTINGS {BLOG_PARSER_SETTINGS}"
)
for entrytuple in logentries:
date, place, tripcave, triptitle, text, trippersons, author, expedition, tu, tid = entrytuple
if expo == expedition: # unneeded check, we zeroed it before filling it
#print(f" - {triptitle}")
store_entry_into_database(date, place, tripcave, triptitle, text, trippersons, author, expedition, tu, tid)
else:
print(f" ! unexpected log entry labelled as '{expedition}' {tid}" )
expo.save() # to save logbook name property
def LoadLogbooks():
"""This is the master function for parsing all logbooks into the Troggle database.
This should be rewritten to use coroutines to load all logbooks from disc in parallel,
but must be serialised to write to database as sqlite is single-user.
This is inside an atomic transaction. Maybe it shouldn't be..
"""
global ENTRIES
global logentries
allentries = []
mem1 = get_process_memory()
print(f" - MEM:{mem1:7.2f} MB now ", file=sys.stderr)
start = time.time()
clean_all_logbooks()
expos = Expedition.objects.all()
if len(expos) <= 1:
message = " ! - No expeditions found. Attempting to 'people' first"
DataIssue.objects.create(parser="logbooks", message=message)
print(message)
load_people_expos()
expos = Expedition.objects.all()
if len(expos) <= 1:
message = " ! - No expeditions found, even after attempting to load 'people'. Abort."
DataIssue.objects.create(parser="logbooks", message=message)
print(message)
return
noexpo = [
"1986",
"2020",
"2021",
] # no expo
lostlogbook = ["1976", "1977", "1979", "1980", "1981"]
sqlfail = [""] # breaks mysql with db constraint fail - all now fixed.]
nologbook = noexpo + lostlogbook + sqlfail
nlbe = {}
loglist = []
bloglist = []
for expo in expos:
year = expo.year
if year in sqlfail:
print(" - Logbook for: " + year + " NO parsing attempted - known sql failures")
message = f" ! - Not even attempting to parse logbook for {year} until code fixed"
DataIssue.objects.create(parser="logbooks", message=message)
print(message)
if year not in nologbook:
if year in ENTRIES:
loglist.append(expo)
else:
print(" - No Logbook yet for: " + year) # catch case when preparing for next expo
if year in BLOG_PARSER_SETTINGS:
bloglist.append(expo)
for ex in loglist:
logentries = parse_logbook_for_expedition(ex) # this loads the logbook for one expo
allentries += logentries
for b in bloglist:
print(f" - BLOG: {b}")
logentries = parse_logbook_for_expedition(b, blog=True) # loads the blog logbook for one expo
allentries += logentries
print(f"total {len(allentries):,} log entries parsed in all expeditions")
mem = get_process_memory()
print(f" - MEM: {mem:7.2f} MB in use, {mem-mem1:7.2f} MB more", file=sys.stderr)
duration = time.time() - start
print(f" - TIME: {duration:7.2f} s", file=sys.stderr)
# Now we serially store the parsed data in the database, updating 3 types of object:
# - Expedition (the 'logbook.html' value)
# - LogBookEntry (text, who when etc.)
# - PersonLogEntry (who was on that specific trip mentione din the logbook entry)
for entrytuple in allentries:
date, place, tripcave, triptitle, text, trippersons, author, expedition, tu, tid = entrytuple
store_entry_into_database(date, place, tripcave, triptitle, text, trippersons, author, expedition, tu, tid)
for expo in expos:
expo.save() # to save logbook name property
mem = get_process_memory()
print(f" - MEM: {mem:7.2f} MB in use, {mem-mem1:7.2f} MB more", file=sys.stderr)
duration = time.time() - start
print(f" - TIME: {duration:7.2f} s", file=sys.stderr)
# dateRegex = re.compile(r'<span\s+class="date">(\d\d\d\d)-(\d\d)-(\d\d)</span>', re.S)
# expeditionYearRegex = re.compile(r'<span\s+class="expeditionyear">(.*?)</span>', re.S)
# titleRegex = re.compile(r'<H1>(.*?)</H1>', re.S)
# reportRegex = re.compile(r'<div\s+class="report">(.*)</div>\s*</body>', re.S)
# personRegex = re.compile(r'<div\s+class="person">(.*?)</div>', re.S)
# nameAuthorRegex = re.compile(r'<span\s+class="name(,author|)">(.*?)</span>', re.S)
# TURegex = re.compile(r'<span\s+class="TU">([0-9]*\.?[0-9]+)</span>', re.S)
# locationRegex = re.compile(r'<span\s+class="location">(.*?)</span>', re.S)
# caveRegex = re.compile(r'<span\s+class="cave">(.*?)</span>', re.S)