2011-07-11 02:10:22 +01:00
|
|
|
import csv
|
|
|
|
import os
|
2020-05-28 02:20:50 +01:00
|
|
|
import re
|
2021-11-11 20:57:49 +00:00
|
|
|
import time
|
|
|
|
from random import randint
|
|
|
|
from datetime import datetime, date
|
|
|
|
from pathlib import Path
|
2011-07-11 02:10:22 +01:00
|
|
|
|
2020-05-28 02:20:50 +01:00
|
|
|
from django.conf import settings
|
|
|
|
from django.template.defaultfilters import slugify
|
|
|
|
from django.utils.timezone import get_current_timezone, make_aware
|
|
|
|
|
2021-04-13 00:43:57 +01:00
|
|
|
from troggle.core.models.troggle import DataIssue, Expedition
|
2021-04-13 00:11:08 +01:00
|
|
|
from troggle.core.utils import TROG, save_carefully
|
2021-04-13 00:47:17 +01:00
|
|
|
from troggle.core.models.caves import Cave, LogbookEntry, PersonTrip, GetCaveLookup
|
2020-05-28 02:20:50 +01:00
|
|
|
from parsers.people import GetPersonExpeditionNameLookup
|
2011-07-11 02:10:22 +01:00
|
|
|
|
2021-04-13 01:37:42 +01:00
|
|
|
'''
|
|
|
|
Parses and imports logbooks in all their wonderful confusion
|
2022-12-19 11:38:34 +00:00
|
|
|
See detailed explanation of the complete process:
|
|
|
|
https://expo.survex.com/handbook/computing/logbooks-parsing.html
|
2021-04-13 01:37:42 +01:00
|
|
|
'''
|
2021-04-23 03:07:21 +01:00
|
|
|
todo='''
|
|
|
|
- refactor everything with some urgency, esp. LoadLogbookForExpedition()
|
|
|
|
|
2023-01-16 19:52:05 +00:00
|
|
|
- remove the TROG things since we need the database for multiuser access? Or not?
|
2022-12-09 23:45:07 +00:00
|
|
|
|
2022-08-30 15:58:49 +01:00
|
|
|
- profile the code to find bad repetitive things, of which there are many.
|
|
|
|
|
|
|
|
- far too many uses of Django field dereferencing to get values, which is SLOW
|
2022-03-24 01:05:50 +00:00
|
|
|
|
2022-08-30 15:58:49 +01:00
|
|
|
- Logbooks 1987, 1988, 1989 all crash on MySql - but not sqlite - with db constraint fail. Edit logbook to fix.
|
2021-11-05 21:01:10 +00:00
|
|
|
|
2021-04-23 03:07:21 +01:00
|
|
|
- import/parse/re-export-as-html the 'artisanal-format' old logbooks so that
|
2022-08-30 15:58:49 +01:00
|
|
|
we keep only a modern HTML05 format. Then we can retire the old parsers and reduce the
|
2021-04-23 03:07:21 +01:00
|
|
|
volume of code here substantially.
|
|
|
|
|
|
|
|
- rewrite to use generators rather than storing everything intermediate in lists - to reduce memory impact.
|
|
|
|
|
|
|
|
- We should ensure logbook.html is utf-8 and stop this crap:
|
|
|
|
file_in = open(logbookfile,'rb')
|
|
|
|
txt = file_in.read().decode("latin1")
|
2021-04-23 16:11:50 +01:00
|
|
|
|
|
|
|
- this is a slow and uncertain function: cave = getCaveByReference(caveRef)
|
2022-08-30 15:58:49 +01:00
|
|
|
|
2022-12-07 18:22:09 +00:00
|
|
|
- use Fixtures https://docs.djangoproject.com/en/4.1/ref/django-admin/#django-admin-loaddata to cache
|
2023-01-16 19:52:05 +00:00
|
|
|
data for old logbooks? Not worth it..
|
2022-12-07 18:22:09 +00:00
|
|
|
|
2021-04-23 03:07:21 +01:00
|
|
|
'''
|
2022-11-21 16:41:52 +00:00
|
|
|
MAX_LOGBOOK_ENTRY_TITLE_LENGTH = 200
|
2022-12-16 19:57:56 +00:00
|
|
|
BLOG_PARSER_SETTINGS = {
|
2022-12-21 02:05:26 +00:00
|
|
|
# "2022": ("ukcavingblog.html", "parser_blog"), # now folded in to logbooks.html
|
|
|
|
# "2019": ("ukcavingblog.html", "parser_blog"), # now folded in to logbooks.html
|
|
|
|
# "2018": ("ukcavingblog.html", "parser_blog"), # now folded in to logbooks.html
|
2022-12-19 00:33:32 +00:00
|
|
|
# "2017": ("ukcavingblog.html", "parser_blog"), # now folded in to logbooks.html
|
2022-12-16 19:57:56 +00:00
|
|
|
}
|
2022-11-21 16:41:52 +00:00
|
|
|
DEFAULT_LOGBOOK_FILE = "logbook.html"
|
2022-12-16 19:57:56 +00:00
|
|
|
DEFAULT_LOGBOOK_PARSER = "parser_html"
|
2022-12-18 21:20:30 +00:00
|
|
|
# All years since 2002 use the default value for Logbook parser
|
2023-01-16 19:52:05 +00:00
|
|
|
# dont forget to update expoweb/pubs.htm to match.
|
2022-12-18 21:20:30 +00:00
|
|
|
LOGBOOK_PARSER_SETTINGS = {
|
2022-11-21 16:47:25 +00:00
|
|
|
"2002": ("logbook.html", "parser_html"),
|
|
|
|
"2001": ("log.htm", "parser_html_01"),
|
|
|
|
"2000": ("log.htm", "parser_html_01"),
|
|
|
|
"1999": ("log.htm", "parser_html_01"),
|
|
|
|
"1998": ("log.htm", "parser_html_01"),
|
|
|
|
"1997": ("log.htm", "parser_html_01"),
|
|
|
|
"1996": ("log.htm", "parser_html_01"),
|
|
|
|
"1995": ("log.htm", "parser_html_01"),
|
2022-12-23 23:32:59 +00:00
|
|
|
"1994": ("logbook.html", "parser_html"),
|
|
|
|
"1993": ("logbook.html", "parser_html"),
|
2022-12-21 02:05:26 +00:00
|
|
|
"1992": ("logbook.html", "parser_html"),
|
|
|
|
"1991": ("logbook.html", "parser_html"),
|
|
|
|
"1990": ("logbook.html", "parser_html"),
|
2022-12-20 23:48:56 +00:00
|
|
|
"1989": ("logbook.html", "parser_html"),
|
2022-12-20 21:53:56 +00:00
|
|
|
"1988": ("logbook.html", "parser_html"),
|
2022-12-20 19:59:36 +00:00
|
|
|
"1987": ("logbook.html", "parser_html"),
|
2022-12-20 16:38:32 +00:00
|
|
|
"1985": ("logbook.html", "parser_html"),
|
|
|
|
"1984": ("logbook.html", "parser_html"),
|
|
|
|
"1983": ("logbook.html", "parser_html"),
|
|
|
|
"1982": ("logbook.html", "parser_html"),
|
2022-11-21 16:41:52 +00:00
|
|
|
}
|
2021-04-23 03:07:21 +01:00
|
|
|
|
2022-12-23 22:13:43 +00:00
|
|
|
entries = { "2022": 89, "2019": 55, "2018": 95, "2017": 74, "2016": 86, "2015": 80,
|
2022-12-21 02:05:26 +00:00
|
|
|
"2014": 65, "2013": 52, "2012": 75, "2011": 71, "2010": 22, "2009": 53,
|
2022-12-18 19:33:56 +00:00
|
|
|
"2008": 49, "2007": 113, "2006": 60, "2005": 55, "2004": 76, "2003": 42, "2002": 31,
|
2022-12-21 02:05:26 +00:00
|
|
|
"2001": 49, "2000": 54, "1999": 79, "1998": 43, "1997": 53, "1996": 95, "1995": 42,
|
2022-12-20 23:48:56 +00:00
|
|
|
"1994": 32, "1993": 41, "1992": 62, "1991": 39, "1990": 87, "1989": 63,"1988": 61,"1987": 34,
|
2022-12-10 13:00:57 +00:00
|
|
|
"1985": 24, "1984": 32, "1983": 52, "1982": 42,}
|
2022-03-24 01:05:50 +00:00
|
|
|
|
2022-11-21 16:41:52 +00:00
|
|
|
logentries = [] # the entire logbook for one year is a single object: a list of entries
|
2022-12-18 19:33:56 +00:00
|
|
|
noncaveplaces = [ "Journey", "Loser Plateau", "UNKNOWN", 'plateau',
|
2022-11-21 16:41:52 +00:00
|
|
|
'base camp', 'basecamp', 'top camp', 'topcamp' ]
|
|
|
|
logdataissues = TROG['issues']['logdataissues']
|
|
|
|
trips ={}
|
2011-07-11 02:10:22 +01:00
|
|
|
|
|
|
|
#
|
|
|
|
# the logbook loading section
|
|
|
|
#
|
2021-04-23 16:11:50 +01:00
|
|
|
def set_trip_id(year, seq):
|
2021-05-02 14:50:46 +01:00
|
|
|
tid= f"{year}_s{seq:02d}"
|
2021-04-23 16:11:50 +01:00
|
|
|
return tid
|
|
|
|
|
2022-11-18 20:42:03 +00:00
|
|
|
rx_tripperson = re.compile(r'(?i)<u>(.*?)</u>$')
|
|
|
|
rx_round_bracket = re.compile(r"[\(\[].*?[\)\]]")
|
|
|
|
|
2021-04-23 16:11:50 +01:00
|
|
|
|
|
|
|
def GetTripPersons(trippeople, expedition, logtime_underground, tid=None):
|
2011-07-11 02:10:22 +01:00
|
|
|
res = [ ]
|
|
|
|
author = None
|
2022-12-09 23:45:07 +00:00
|
|
|
# print(f'# {tid}')
|
|
|
|
# print(f" - {tid} '{trippeople}' ")
|
|
|
|
|
2019-03-06 23:20:34 +00:00
|
|
|
for tripperson in re.split(r",|\+|&|&(?!\w+;)| and ", trippeople):
|
2011-07-11 02:10:22 +01:00
|
|
|
tripperson = tripperson.strip()
|
2022-11-18 20:42:03 +00:00
|
|
|
# mul = re.match(r"(?i)<u>(.*?)</u>$", tripperson)
|
|
|
|
mul = rx_tripperson.match(tripperson)
|
2011-07-11 02:10:22 +01:00
|
|
|
if mul:
|
|
|
|
tripperson = mul.group(1).strip()
|
|
|
|
if tripperson and tripperson[0] != '*':
|
2022-11-18 20:42:03 +00:00
|
|
|
tripperson = re.sub(rx_round_bracket, "", tripperson).strip()
|
2021-04-24 01:23:55 +01:00
|
|
|
|
2022-12-18 21:20:30 +00:00
|
|
|
# these aliases should be moved to people.py GetPersonExpeditionNameLookup(expedition)
|
2021-04-24 01:23:55 +01:00
|
|
|
if tripperson =="Wiggy":
|
|
|
|
tripperson = "Phil Wigglesworth"
|
|
|
|
if tripperson =="Animal":
|
|
|
|
tripperson = "Mike Richardson"
|
2022-10-07 21:47:05 +01:00
|
|
|
if tripperson =="MikeTA":
|
|
|
|
tripperson = "Mike Richardson"
|
2022-12-17 17:05:55 +00:00
|
|
|
if tripperson =="CavingPig":
|
2022-12-17 03:02:08 +00:00
|
|
|
tripperson = "Elaine Oliver"
|
|
|
|
if tripperson =="nobrotson":
|
|
|
|
tripperson = "Rob Watson"
|
2022-12-17 17:05:55 +00:00
|
|
|
if tripperson =="Tinywoman":
|
|
|
|
tripperson = "Nadia"
|
2022-12-18 21:20:30 +00:00
|
|
|
if tripperson =="tcacrossley":
|
|
|
|
tripperson = "Tom Crossley"
|
2022-12-19 20:13:26 +00:00
|
|
|
if tripperson =="Samouse1":
|
|
|
|
tripperson = "Todd Rye"
|
|
|
|
|
2021-04-24 01:23:55 +01:00
|
|
|
|
|
|
|
|
2011-07-11 02:10:22 +01:00
|
|
|
personyear = GetPersonExpeditionNameLookup(expedition).get(tripperson.lower())
|
|
|
|
if not personyear:
|
2022-08-30 15:58:49 +01:00
|
|
|
message = f" ! - {expedition.year} No name match for: '{tripperson}' in entry {tid=} for this expedition year."
|
2021-02-06 00:18:48 +00:00
|
|
|
print(message)
|
2020-05-30 12:35:15 +01:00
|
|
|
DataIssue.objects.create(parser='logbooks', message=message)
|
2021-04-23 11:43:25 +01:00
|
|
|
logdataissues[tid]=message
|
2011-07-11 02:10:22 +01:00
|
|
|
res.append((personyear, logtime_underground))
|
|
|
|
if mul:
|
|
|
|
author = personyear
|
|
|
|
if not author:
|
|
|
|
if not res:
|
2022-12-17 03:02:08 +00:00
|
|
|
return "", 0
|
|
|
|
author = res[-1][0] # the previous valid person and a time of 0 hours
|
2021-04-24 01:23:55 +01:00
|
|
|
|
2022-12-09 23:45:07 +00:00
|
|
|
#print(f" - {tid} [{author.person}] '{res[0][0].person}'...")
|
2011-07-11 02:10:22 +01:00
|
|
|
return res, author
|
|
|
|
|
2022-08-30 15:58:49 +01:00
|
|
|
def EnterLogIntoDbase(date, place, title, text, trippeople, expedition, logtime_underground, tid=None):
|
2020-06-19 16:39:05 +01:00
|
|
|
""" saves a logbook entry and related persontrips
|
2021-05-02 15:50:20 +01:00
|
|
|
Does NOT save the expeditionday_id - all NULLs. why? Because we are deprecating expeditionday !
|
2022-03-23 22:55:59 +00:00
|
|
|
|
2022-12-18 19:33:56 +00:00
|
|
|
troggle.log shows that we are creating lots of duplicates, which is no no problem with SQL as they just overwrite but we are saving the same thing too many times..
|
|
|
|
|
|
|
|
Until 18 Dec.2022, this was overwriting logbook entries for the same date with the same title, because
|
|
|
|
lookupAttribs={'date':date, 'title':title}
|
2020-06-19 16:39:05 +01:00
|
|
|
"""
|
2022-12-18 19:33:56 +00:00
|
|
|
|
|
|
|
# Nasty hack, must tidy this up..
|
|
|
|
if logtime_underground:
|
|
|
|
try:
|
|
|
|
logtime_underground = float(logtime_underground)
|
|
|
|
except:
|
|
|
|
# print(f"logtime_underground = {logtime_underground}")
|
|
|
|
tu_match = re.match(r"(T/U:\s*)?(\d+[.]?\d*).*", logtime_underground)
|
|
|
|
if tu_match:
|
|
|
|
# print(f"logtime_underground = {tu_match.group(2)}")
|
|
|
|
logtime_underground = float(tu_match.group(2))
|
|
|
|
else:
|
|
|
|
logtime_underground = 0
|
|
|
|
else:
|
|
|
|
logtime_underground = 0
|
|
|
|
|
2021-04-23 11:43:25 +01:00
|
|
|
try:
|
|
|
|
trippersons, author = GetTripPersons(trippeople, expedition, logtime_underground, tid=tid)
|
2022-12-18 19:33:56 +00:00
|
|
|
# print(f" - {author} - {logtime_underground}")
|
2021-04-23 11:43:25 +01:00
|
|
|
except:
|
2021-04-23 16:11:50 +01:00
|
|
|
message = f" ! - {expedition.year} Skipping logentry: {title} - GetTripPersons FAIL"
|
2021-04-23 11:43:25 +01:00
|
|
|
DataIssue.objects.create(parser='logbooks', message=message)
|
|
|
|
logdataissues["title"]=message
|
2021-04-23 16:11:50 +01:00
|
|
|
print(message)
|
2022-11-18 20:42:03 +00:00
|
|
|
raise
|
2021-04-23 11:43:25 +01:00
|
|
|
return
|
|
|
|
|
2011-07-11 02:10:22 +01:00
|
|
|
if not author:
|
2022-12-14 23:46:14 +00:00
|
|
|
message = f" ! - {expedition.year} Warning: logentry: {title} - no expo member author for entry '{tid}'"
|
2020-05-30 12:35:15 +01:00
|
|
|
DataIssue.objects.create(parser='logbooks', message=message)
|
2021-02-06 00:18:48 +00:00
|
|
|
logdataissues["title"]=message
|
2021-04-23 16:11:50 +01:00
|
|
|
print(message)
|
2021-04-24 01:23:55 +01:00
|
|
|
#return
|
2019-03-31 15:39:53 +01:00
|
|
|
|
2020-06-19 16:39:05 +01:00
|
|
|
# This needs attention. The slug field is derived from 'title'
|
|
|
|
# both GetCaveLookup() and GetTripCave() need to work together better. None of this data is *used* though?
|
|
|
|
#tripCave = GetTripCave(place):
|
2019-03-31 15:39:53 +01:00
|
|
|
|
2011-07-11 02:10:22 +01:00
|
|
|
lplace = place.lower()
|
2020-06-19 16:39:05 +01:00
|
|
|
cave=None
|
2011-07-11 02:10:22 +01:00
|
|
|
if lplace not in noncaveplaces:
|
2020-06-19 16:39:05 +01:00
|
|
|
cave = GetCaveLookup().get(lplace)
|
2021-05-02 15:50:20 +01:00
|
|
|
|
|
|
|
y = str(date)[:4]
|
2022-12-17 03:02:08 +00:00
|
|
|
|
|
|
|
text = text.replace(' src="', f' src="/years/{y}/' )
|
|
|
|
text = text.replace(" src='", f" src='/years/{y}/" )
|
|
|
|
|
|
|
|
text = text.replace(f' src="/years/{y}//years/{y}/', f' src="/years/{y}/' )
|
|
|
|
text = text.replace(f" src='/years/{y}//years/{y}/", f" src='/years/{y}/" )
|
|
|
|
|
|
|
|
text = text.replace('\t', '' )
|
|
|
|
text = text.replace('\n\n\n', '\n\n' )
|
2011-07-11 02:10:22 +01:00
|
|
|
|
|
|
|
#Check for an existing copy of the current entry, and save
|
|
|
|
expeditionday = expedition.get_expedition_day(date)
|
2019-03-31 15:39:53 +01:00
|
|
|
lookupAttribs={'date':date, 'title':title}
|
2020-06-19 16:39:05 +01:00
|
|
|
# 'cave' is converted to a string doing this, which renders as the cave slug.
|
2020-07-01 22:49:38 +01:00
|
|
|
# but it is a db query which we should try to avoid - rewrite this
|
2021-04-23 16:11:50 +01:00
|
|
|
|
2021-05-02 15:50:20 +01:00
|
|
|
#NEW slug for a logbook entry here! Unique id + slugified title fragment
|
2022-11-23 00:36:44 +00:00
|
|
|
|
2021-11-11 20:57:49 +00:00
|
|
|
if tid is not None:
|
2022-12-09 23:45:07 +00:00
|
|
|
slug = tid
|
|
|
|
# slug = tid + "_" + slugify(title)[:10].replace('-','_')
|
2021-11-11 20:57:49 +00:00
|
|
|
else:
|
2022-12-09 23:45:07 +00:00
|
|
|
slug = str(randint(1000,9999)) + "_" + slugify(title)[:10].replace('-','_')
|
2022-12-18 19:33:56 +00:00
|
|
|
nonLookupAttribs={'place':place, 'text':text, 'expedition':expedition,
|
|
|
|
'time_underground':logtime_underground, 'cave_slug':str(cave), 'slug': slug}
|
2021-05-02 15:50:20 +01:00
|
|
|
|
2022-08-30 15:58:49 +01:00
|
|
|
# This creates the lbo instance of LogbookEntry
|
2020-05-30 12:35:15 +01:00
|
|
|
lbo, created=save_carefully(LogbookEntry, lookupAttribs, nonLookupAttribs)
|
2011-07-11 02:10:22 +01:00
|
|
|
|
2022-12-18 19:33:56 +00:00
|
|
|
# for PersonTrip time_underground is float (decimal hours)
|
2011-07-11 02:10:22 +01:00
|
|
|
for tripperson, time_underground in trippersons:
|
2022-12-09 23:45:07 +00:00
|
|
|
# print(f" - {tid} '{tripperson}' author:{tripperson == author}")
|
2011-07-11 02:10:22 +01:00
|
|
|
lookupAttribs={'personexpedition':tripperson, 'logbook_entry':lbo}
|
|
|
|
nonLookupAttribs={'time_underground':time_underground, 'is_logbook_entry_author':(tripperson == author)}
|
2022-03-23 22:55:59 +00:00
|
|
|
# this creates the PersonTrip instance.
|
2022-11-21 16:41:52 +00:00
|
|
|
save_carefully(PersonTrip, lookupAttribs, nonLookupAttribs)
|
2011-07-11 02:10:22 +01:00
|
|
|
|
|
|
|
def ParseDate(tripdate, year):
|
|
|
|
""" Interprets dates in the expo logbooks and returns a correct datetime.date object """
|
2022-07-15 13:11:49 +01:00
|
|
|
dummydate = date(1970, 1, 1)
|
2022-08-25 13:54:00 +01:00
|
|
|
month = 1
|
|
|
|
day = 1
|
2022-09-21 22:22:09 +01:00
|
|
|
# message = f" ! - Trying to parse date in logbook: {tripdate} - {year}"
|
|
|
|
# print(message)
|
2021-04-23 16:11:50 +01:00
|
|
|
try:
|
|
|
|
mdatestandard = re.match(r"(\d\d\d\d)-(\d\d)-(\d\d)", tripdate)
|
|
|
|
mdategoof = re.match(r"(\d\d?)/0?(\d)/(20|19)?(\d\d)", tripdate)
|
|
|
|
if mdatestandard:
|
|
|
|
if not (mdatestandard.group(1) == year):
|
2022-07-08 23:30:49 +01:00
|
|
|
message = f" ! - Bad date (year) in logbook: {tripdate} - {year}"
|
2021-04-23 16:11:50 +01:00
|
|
|
DataIssue.objects.create(parser='logbooks', message=message)
|
|
|
|
logdataissues["tripdate"]=message
|
2022-07-15 13:11:49 +01:00
|
|
|
return dummydate
|
2021-04-23 16:11:50 +01:00
|
|
|
else:
|
|
|
|
year, month, day = int(mdatestandard.group(1)), int(mdatestandard.group(2)), int(mdatestandard.group(3))
|
|
|
|
elif mdategoof:
|
|
|
|
if not (not mdategoof.group(3) or mdategoof.group(3) == year[:2]):
|
|
|
|
message = " ! - Bad date mdategoof.group(3) in logbook: " + tripdate + " - " + mdategoof.group(3)
|
|
|
|
DataIssue.objects.create(parser='logbooks', message=message)
|
|
|
|
logdataissues["tripdate"]=message
|
2022-07-15 13:11:49 +01:00
|
|
|
return dummydate
|
2021-04-23 16:11:50 +01:00
|
|
|
else:
|
|
|
|
yadd = int(year[:2]) * 100
|
|
|
|
day, month, year = int(mdategoof.group(1)), int(mdategoof.group(2)), int(mdategoof.group(4)) + yadd
|
2021-03-29 02:06:19 +01:00
|
|
|
else:
|
2022-08-25 13:54:00 +01:00
|
|
|
year = 1970
|
2022-07-08 23:30:49 +01:00
|
|
|
message = f" ! - Bad date in logbook: {tripdate} - {year}"
|
2021-03-29 02:06:19 +01:00
|
|
|
DataIssue.objects.create(parser='logbooks', message=message)
|
|
|
|
logdataissues["tripdate"]=message
|
2021-04-23 16:11:50 +01:00
|
|
|
|
|
|
|
return date(year, month, day)
|
|
|
|
except:
|
2022-07-08 23:30:49 +01:00
|
|
|
message = f" ! - Failed to parse date in logbook: {tripdate} - {year}"
|
2020-07-06 21:46:58 +01:00
|
|
|
DataIssue.objects.create(parser='logbooks', message=message)
|
2021-02-06 00:18:48 +00:00
|
|
|
logdataissues["tripdate"]=message
|
2022-07-08 23:55:11 +01:00
|
|
|
return datetime.date(1970, 1, 1)
|
2022-11-21 00:04:33 +00:00
|
|
|
|
2022-12-21 02:05:26 +00:00
|
|
|
# 2002 - now
|
2022-12-16 19:57:56 +00:00
|
|
|
def parser_html(year, expedition, txt, seq=""):
|
2022-12-19 11:38:34 +00:00
|
|
|
'''This uses some of the more obscure capabilities of regular expressions,
|
|
|
|
see https://docs.python.org/3/library/re.html
|
2022-12-21 02:05:26 +00:00
|
|
|
|
|
|
|
You can't see it here, but a round-trip export-then-import will move
|
|
|
|
the endmatter up to the frontmatter. This makes sense when moving
|
|
|
|
from parser_html_01 format logfiles, believe me.
|
2022-12-19 11:38:34 +00:00
|
|
|
'''
|
2020-06-08 21:33:32 +01:00
|
|
|
global logentries
|
|
|
|
global logdataissues
|
|
|
|
|
2022-12-16 19:57:56 +00:00
|
|
|
# extract front material and stash for later use when rebuilding from list of entries
|
|
|
|
headmatch = re.match(r"(?i)(?s).*<body[^>]*>(.*?)<hr.*", txt)
|
|
|
|
headpara = headmatch.groups()[0].strip()
|
|
|
|
|
|
|
|
# print(f" - headpara:\n'{headpara}'")
|
|
|
|
if(len(headpara)>0):
|
|
|
|
frontpath = Path(settings.EXPOWEB, "years", year, "frontmatter.html")
|
|
|
|
with open(frontpath,"w") as front:
|
|
|
|
front.write(headpara+"\n")
|
2022-12-21 02:05:26 +00:00
|
|
|
|
|
|
|
# extract END material and stash for later use when rebuilding from list of entries
|
|
|
|
endmatch = re.match(r"(?i)(?s).*<hr\s*/>([\s\S]*?)(?=</body)", txt)
|
|
|
|
endpara = endmatch.groups()[0].strip()
|
|
|
|
|
|
|
|
# print(f" - endpara:\n'{endpara}'")
|
|
|
|
if(len(endpara)>0):
|
|
|
|
endpath = Path(settings.EXPOWEB, "years", year, "endmatter.html")
|
|
|
|
with open(endpath,"w") as end:
|
|
|
|
end.write(endpara+"\n")
|
2022-12-16 19:57:56 +00:00
|
|
|
|
2019-03-06 23:20:34 +00:00
|
|
|
tripparas = re.findall(r"<hr\s*/>([\s\S]*?)(?=<hr)", txt)
|
|
|
|
logbook_entry_count = 0
|
2011-07-11 02:10:22 +01:00
|
|
|
for trippara in tripparas:
|
2019-03-06 23:20:34 +00:00
|
|
|
logbook_entry_count += 1
|
2021-04-23 16:11:50 +01:00
|
|
|
tid = set_trip_id(year,logbook_entry_count)
|
2022-03-24 01:16:43 +00:00
|
|
|
# print(f' - new tid:{tid} lbe count: {logbook_entry_count}')
|
2021-04-23 11:43:25 +01:00
|
|
|
|
2019-03-06 23:20:34 +00:00
|
|
|
s = re.match(r'''(?x)(?:\s*<div\sclass="tripdate"\sid=".*?">.*?</div>\s*<p>)? # second date
|
2011-07-11 02:10:22 +01:00
|
|
|
\s*(?:<a\s+id="(.*?)"\s*/>\s*</a>)?
|
|
|
|
\s*<div\s+class="tripdate"\s*(?:id="(.*?)")?>(.*?)</div>(?:<p>)?
|
|
|
|
\s*<div\s+class="trippeople">\s*(.*?)</div>
|
|
|
|
\s*<div\s+class="triptitle">\s*(.*?)</div>
|
|
|
|
([\s\S]*?)
|
|
|
|
\s*(?:<div\s+class="timeug">\s*(.*?)</div>)?
|
|
|
|
\s*$
|
|
|
|
''', trippara)
|
2022-08-25 14:12:13 +01:00
|
|
|
if s:
|
|
|
|
tripid, tripid1, tripdate, trippeople, triptitle, triptext, tu = s.groups()
|
|
|
|
else: # allow title and people to be swapped in order
|
2022-12-16 19:57:56 +00:00
|
|
|
msg = f" !- {year} Can't parse:{logbook_entry_count} '{trippara[:50]}'..."
|
2022-12-09 23:45:07 +00:00
|
|
|
print(msg)
|
|
|
|
DataIssue.objects.create(parser='logbooks', message=msg)
|
|
|
|
logdataissues[tid]=msg
|
|
|
|
|
2022-08-25 14:12:13 +01:00
|
|
|
s2 = re.match(r'''(?x)(?:\s*<div\sclass="tripdate"\sid=".*?">.*?</div>\s*<p>)? # second date
|
2022-08-25 13:54:00 +01:00
|
|
|
\s*(?:<a\s+id="(.*?)"\s*/>\s*</a>)?
|
|
|
|
\s*<div\s+class="tripdate"\s*(?:id="(.*?)")?>(.*?)</div>(?:<p>)?
|
|
|
|
\s*<div\s+class="triptitle">\s*(.*?)</div>
|
|
|
|
\s*<div\s+class="trippeople">\s*(.*?)</div>
|
|
|
|
([\s\S]*?)
|
|
|
|
\s*(?:<div\s+class="timeug">\s*(.*?)</div>)?
|
|
|
|
\s*$
|
|
|
|
''', trippara)
|
2022-08-25 14:12:13 +01:00
|
|
|
if s2:
|
|
|
|
tripid, tripid1, tripdate, triptitle, trippeople, triptext, tu = s2.groups()
|
|
|
|
else:
|
2022-12-16 19:57:56 +00:00
|
|
|
# if not re.search(r"Rigging Guide", trippara):
|
|
|
|
msg = f" !- Logbook. Can't parse entry on 2nd pass:{logbook_entry_count} '{trippara[:50]}'..."
|
|
|
|
print(msg)
|
|
|
|
DataIssue.objects.create(parser='logbooks', message=msg)
|
|
|
|
logdataissues[tid]=msg
|
2022-08-25 14:12:13 +01:00
|
|
|
continue
|
2022-08-25 13:54:00 +01:00
|
|
|
|
2011-07-11 02:10:22 +01:00
|
|
|
ldate = ParseDate(tripdate.strip(), year)
|
|
|
|
triptitles = triptitle.split(" - ")
|
|
|
|
if len(triptitles) >= 2:
|
|
|
|
tripcave = triptitles[0]
|
|
|
|
else:
|
|
|
|
tripcave = "UNKNOWN"
|
2019-03-06 23:20:34 +00:00
|
|
|
ltriptext = re.sub(r"</p>", "", triptext)
|
2022-12-18 19:33:56 +00:00
|
|
|
#ltriptext = re.sub(r"\s*?\n\s*", " ", ltriptext)
|
2022-12-09 23:45:07 +00:00
|
|
|
ltriptext = re.sub(r"<p>", "<br /><br />", ltriptext).strip()
|
2022-12-18 20:36:11 +00:00
|
|
|
|
|
|
|
triptitle = triptitle.strip()
|
2020-06-08 21:33:32 +01:00
|
|
|
entrytuple = (ldate, tripcave, triptitle, ltriptext,
|
2022-11-23 00:36:44 +00:00
|
|
|
trippeople, expedition, tu, tripid1)
|
2020-06-08 21:33:32 +01:00
|
|
|
logentries.append(entrytuple)
|
|
|
|
|
2019-03-30 13:58:38 +00:00
|
|
|
# main parser for 1991 - 2001. simpler because the data has been hacked so much to fit it
|
2022-12-16 19:57:56 +00:00
|
|
|
def parser_html_01(year, expedition, txt, seq=""):
|
2022-12-20 19:59:36 +00:00
|
|
|
global logentries
|
|
|
|
global logdataissues
|
|
|
|
errorcount = 0
|
2022-12-21 02:05:26 +00:00
|
|
|
|
|
|
|
# extract front material and stash for later use when rebuilding from list of entries
|
|
|
|
headmatch = re.match(r"(?i)(?s).*<body[^>]*>(.*?)<hr.*", txt)
|
|
|
|
headpara = headmatch.groups()[0].strip()
|
|
|
|
|
|
|
|
# print(f" - headpara:\n'{headpara}'")
|
|
|
|
if(len(headpara)>0):
|
|
|
|
frontpath = Path(settings.EXPOWEB, "years", year, "frontmatter.html")
|
|
|
|
with open(frontpath,"w") as front:
|
|
|
|
front.write(headpara+"\n")
|
2022-12-20 19:59:36 +00:00
|
|
|
|
2022-12-21 02:05:26 +00:00
|
|
|
# extract END material and stash for later use when rebuilding from list of entries
|
|
|
|
endmatch = re.match(r"(?i)(?s).*<hr\s*/>([\s\S]*?)(?=</body)", txt)
|
|
|
|
if endmatch:
|
|
|
|
endpara = endmatch.groups()[0].strip()
|
|
|
|
else:
|
|
|
|
print(f" ! - {year} NO endmatch")
|
|
|
|
endpara = ""
|
|
|
|
|
|
|
|
# print(f" - endpara:\n'{endpara}'")
|
|
|
|
if(len(endpara)>0):
|
|
|
|
endpath = Path(settings.EXPOWEB, "years", year, "endmatter.html")
|
|
|
|
with open(endpath,"w") as end:
|
|
|
|
end.write(endpara+"\n")
|
|
|
|
|
2022-12-20 19:59:36 +00:00
|
|
|
tripparas = re.findall(r"<hr[\s/]*>([\s\S]*?)(?=<hr)", txt)
|
|
|
|
logbook_entry_count = 0
|
|
|
|
for trippara in tripparas:
|
|
|
|
logbook_entry_count += 1
|
|
|
|
tid = set_trip_id(year,logbook_entry_count)
|
|
|
|
# print(f" #0 - tid: {tid}")
|
|
|
|
try:
|
|
|
|
#print(f" #1 - tid: {tid}")
|
|
|
|
s = re.match(r"(?i)(?s)\s*(?:<p>)?(.*?)</?p>(.*)$", trippara)
|
|
|
|
if not s:
|
|
|
|
message = " ! - Skipping logentry {year} failure to parse header: " + tid + trippara[:300] + "..."
|
|
|
|
DataIssue.objects.create(parser='logbooks', message=message)
|
|
|
|
logdataissues[tid]=message
|
|
|
|
print(message)
|
|
|
|
break
|
|
|
|
try:
|
|
|
|
tripheader, triptext = s.group(1), s.group(2)
|
|
|
|
except:
|
|
|
|
message = f" ! - Fail to set tripheader, triptext. trip:<{tid}> s:'{s}'"
|
|
|
|
DataIssue.objects.create(parser='logbooks', message=message)
|
|
|
|
logdataissues[tid]=message
|
|
|
|
print(message)
|
|
|
|
|
|
|
|
|
|
|
|
# mtripid = re.search(r'<a id="(.*?)"', tripheader)
|
|
|
|
# if not mtripid:
|
|
|
|
# message = f" ! - A tag id not found. Never mind. Not needed. trip:<{tid}> header:'{tripheader}'"
|
|
|
|
# DataIssue.objects.create(parser='logbooks', message=message)
|
|
|
|
# logdataissues[tid]=message
|
|
|
|
# print(message)
|
|
|
|
|
|
|
|
# tripid = mtripid and mtripid.group(1) or ""
|
|
|
|
# print(f" # - mtripid: {mtripid}")
|
|
|
|
tripheader = re.sub(r"</?(?:[ab]|span)[^>]*>", "", tripheader)
|
|
|
|
#print(f" #2 - tid: {tid}")
|
|
|
|
try:
|
|
|
|
tripdate, triptitle, trippeople = tripheader.split("|")
|
|
|
|
except:
|
|
|
|
message = f" ! - Fail 3 to split out date|title|people. trip:<{tid}> '{tripheader.split('|')}'"
|
|
|
|
DataIssue.objects.create(parser='logbooks', message=message)
|
|
|
|
logdataissues[tid]=message
|
|
|
|
print(message)
|
|
|
|
try:
|
|
|
|
tripdate, triptitle = tripheader.split("|")
|
|
|
|
trippeople = "GUESS ANON"
|
|
|
|
except:
|
|
|
|
message = f" ! - Skipping logentry {year} Fail 2 to split out date|title (anon). trip:<{tid}> '{tripheader.split('|')}' CRASHES MySQL !"
|
|
|
|
DataIssue.objects.create(parser='logbooks', message=message)
|
|
|
|
logdataissues[tid]=message
|
|
|
|
print(message)
|
|
|
|
break
|
|
|
|
#print(f" #3 - tid: {tid}")
|
|
|
|
ldate = ParseDate(tripdate.strip(), year)
|
|
|
|
#print(f" # - tid: {tid} <{tripdate}> <{triptitle}> <{trippeople}>")
|
|
|
|
#print(f" #4 - tid: {tid}")
|
|
|
|
|
|
|
|
mtu = re.search(r'<p[^>]*>(T/?U.*)', triptext)
|
|
|
|
if mtu:
|
|
|
|
tu = mtu.group(1)
|
|
|
|
triptext = triptext[:mtu.start(0)] + triptext[mtu.end():]
|
|
|
|
else:
|
|
|
|
tu = ""
|
|
|
|
|
|
|
|
triptitles = triptitle.split(" - ")
|
|
|
|
tripcave = triptitles[0].strip()
|
|
|
|
|
|
|
|
ltriptext = triptext
|
|
|
|
|
|
|
|
mtail = re.search(r'(?:<a href="[^"]*">[^<]*</a>|\s|/|-|&|</?p>|\((?:same day|\d+)\))*$', ltriptext)
|
|
|
|
if mtail:
|
|
|
|
ltriptext = ltriptext[:mtail.start(0)]
|
|
|
|
ltriptext = re.sub(r"</p>", "", ltriptext)
|
|
|
|
ltriptext = re.sub(r"\s*?\n\s*", " ", ltriptext)
|
|
|
|
ltriptext = re.sub(r"</?u>", "_", ltriptext)
|
|
|
|
ltriptext = re.sub(r"</?i>", "''", ltriptext)
|
|
|
|
ltriptext = re.sub(r"</?b>", "'''", ltriptext)
|
|
|
|
ltriptext = re.sub(r"<p>", "<br /><br />", ltriptext).strip()
|
|
|
|
|
|
|
|
if ltriptext == "":
|
|
|
|
message = " ! - Zero content for logbook entry!: " + tid
|
|
|
|
DataIssue.objects.create(parser='logbooks', message=message)
|
|
|
|
logdataissues[tid]=message
|
|
|
|
print(message)
|
|
|
|
|
|
|
|
|
|
|
|
entrytuple = (ldate, tripcave, triptitle, ltriptext,
|
|
|
|
trippeople, expedition, tu, tid)
|
|
|
|
logentries.append(entrytuple)
|
|
|
|
|
|
|
|
except:
|
|
|
|
message = f" ! - Skipping logentry {year} due to exception in: {tid}"
|
|
|
|
DataIssue.objects.create(parser='logbooks', message=message)
|
|
|
|
logdataissues[tid]=message
|
|
|
|
print(message)
|
|
|
|
errorcount += 1
|
|
|
|
raise
|
|
|
|
if errorcount >5 :
|
|
|
|
message = f" !!- TOO MANY ERRORS - aborting at '{tid}' logbook: {year}"
|
|
|
|
DataIssue.objects.create(parser='logbooks', message=message)
|
|
|
|
logdataissues[tid]=message
|
|
|
|
print(message)
|
|
|
|
return
|
|
|
|
|
2022-12-16 19:57:56 +00:00
|
|
|
def parser_blog(year, expedition, txt, sq=""):
|
|
|
|
'''Parses the format of web pages collected as 'Save As HTML" from the UK Caving blog website.
|
2022-12-14 23:46:14 +00:00
|
|
|
Note that the entries have dates and authors, but no titles.
|
2022-12-19 11:38:34 +00:00
|
|
|
See detailed explanation of the complete process:
|
|
|
|
https://expo.survex.com/handbook/computing/logbooks-parsing.html
|
|
|
|
https://expo.survex.com/handbook/computing/log-blog-parsing.html
|
|
|
|
|
|
|
|
This uses some of the more obscure capabilities of regular expressions,
|
|
|
|
see https://docs.python.org/3/library/re.html
|
2022-12-19 20:13:26 +00:00
|
|
|
|
|
|
|
BLOG entries have this structure:
|
|
|
|
<article ... data-author="Tinywoman" data-content="post-298780" id="js-post-298780">
|
|
|
|
<article class="message-body js-selectToQuote">
|
|
|
|
</article>
|
|
|
|
</article>
|
|
|
|
So the content is nested inside the header. Attachments (images) come after the content.
|
2022-12-14 23:46:14 +00:00
|
|
|
'''
|
|
|
|
global logentries
|
|
|
|
global logdataissues
|
|
|
|
errorcount = 0
|
2020-06-08 21:33:32 +01:00
|
|
|
|
2022-12-14 23:46:14 +00:00
|
|
|
tripheads = re.findall(r"<article class=\"message message--post js-post js-inlineModContainer\s*\"\s*([\s\S]*?)(?=</article)", txt)
|
|
|
|
if not ( tripheads ) :
|
|
|
|
message = f" ! - Skipping on failure to parse article header: {txt[:500]}"
|
|
|
|
print(message)
|
2020-06-08 21:33:32 +01:00
|
|
|
|
2022-12-19 11:38:34 +00:00
|
|
|
# (?= is a non-consuming match, see https://docs.python.org/3/library/re.html
|
2022-12-19 20:13:26 +00:00
|
|
|
tripparas = re.findall(r"<article class=\"message-body js-selectToQuote\"\>\s*([\s\S]*?)(</article[^>]*>)([\s\S]*?)(?=</article)", txt)
|
2022-12-14 23:46:14 +00:00
|
|
|
if not ( tripparas ) :
|
|
|
|
message = f" ! - Skipping on failure to parse article content: {txt[:500]}"
|
|
|
|
print(message)
|
|
|
|
|
|
|
|
if (len(tripheads) !=len(tripparas)):
|
|
|
|
print(f"{len(tripheads)} != {len(tripparas)}")
|
2022-12-19 20:13:26 +00:00
|
|
|
print(f"{len(tripheads)} - {len(tripparas)}")
|
2022-12-14 23:46:14 +00:00
|
|
|
|
2022-12-17 17:05:55 +00:00
|
|
|
location = "Plateau" # best guess, fix manually later
|
2022-12-14 23:46:14 +00:00
|
|
|
tu = 0
|
|
|
|
logbook_entry_count = 0
|
|
|
|
for i in range(0, len(tripparas)):
|
2022-12-19 20:13:26 +00:00
|
|
|
tripstuff = tripparas[i]
|
|
|
|
attach = tripstuff[2]
|
|
|
|
# note use on non-greedy *? regex idiom here
|
|
|
|
attach = re.sub(r"<div class=\"file-content\">[\s\S]*?(?=</li>)","",attach)
|
|
|
|
attach = re.sub(r"<footer[\s\S]*(</footer>)","",attach)
|
|
|
|
tripcontent = tripstuff[0] + attach
|
|
|
|
#print(f"{i} - {len(tripstuff)} - {tripstuff[1]}")
|
2022-12-14 23:46:14 +00:00
|
|
|
triphead = tripheads[i]
|
|
|
|
logbook_entry_count += 1
|
2022-12-16 19:57:56 +00:00
|
|
|
tid = set_trip_id(year,logbook_entry_count) +"_blog" + sq
|
2022-12-14 23:46:14 +00:00
|
|
|
# print(f" - tid: {tid}")
|
|
|
|
|
|
|
|
# data-author="tcacrossley"
|
|
|
|
match_author = re.search(r".*data-author=\"([^\"]*)\" data-content=.*", triphead)
|
|
|
|
if not ( match_author ) :
|
|
|
|
message = f" ! - Skipping logentry {year}:{logbook_entry_count} on failure to parse data-author {tid} {triphead[:400]}..."
|
|
|
|
DataIssue.objects.create(parser='logbooks', message=message)
|
|
|
|
logdataissues[tid]=message
|
|
|
|
print(message)
|
|
|
|
break
|
|
|
|
trippeople = match_author.group(1)
|
|
|
|
# print(f" - tid: {tid} {trippeople}")
|
2022-12-16 19:57:56 +00:00
|
|
|
# datetime="2019-07-11T13:16:18+0100"
|
2022-12-14 23:46:14 +00:00
|
|
|
match_datetime = re.search(r".*datetime=\"([^\"]*)\" data-time=.*", triphead)
|
|
|
|
if not ( match_datetime ) :
|
|
|
|
message = f" ! - Skipping logentry {year}:{logbook_entry_count} on failure to parse datetime {tid} {triphead[:400]}..."
|
|
|
|
DataIssue.objects.create(parser='logbooks', message=message)
|
|
|
|
logdataissues[tid]=message
|
|
|
|
print(message)
|
|
|
|
break
|
|
|
|
datestamp = match_datetime.group(1)
|
|
|
|
|
2022-12-15 01:06:54 +00:00
|
|
|
try:
|
|
|
|
tripdate = datetime.fromisoformat(datestamp)
|
|
|
|
except:
|
2022-12-16 19:57:56 +00:00
|
|
|
message = f" ! - FROMISOFORMAT fail logentry {year}:{logbook_entry_count} {tid} '{datestamp}'"
|
|
|
|
DataIssue.objects.create(parser='logbooks', message=message)
|
|
|
|
logdataissues[tid]=message
|
|
|
|
print(message)
|
|
|
|
# fallback, ignore the timestamp bits:
|
2022-12-15 01:06:54 +00:00
|
|
|
tripdate = datetime.fromisoformat(datestamp[0:10])
|
2022-12-17 03:02:08 +00:00
|
|
|
# print(f" - tid: {tid} '{trippeople}' '{tripdate}'")
|
2022-12-14 23:46:14 +00:00
|
|
|
|
2022-12-17 03:02:08 +00:00
|
|
|
# tripname must have the location then a hyphen at the beginning as it is ignored by export function
|
|
|
|
location = "Unknown"
|
|
|
|
tripname = f"Expo - UK Caving Blog{sq} post {logbook_entry_count}" # must be unique for a given date
|
2022-12-19 00:33:32 +00:00
|
|
|
tripcontent = re.sub(r"(width=\"\d+\")","",tripcontent)
|
|
|
|
tripcontent = re.sub(r"height=\"\d+\"","",tripcontent)
|
|
|
|
tripcontent = re.sub(r"width: \d+px","",tripcontent)
|
|
|
|
tripcontent = re.sub(r"\n\n+","\n\n",tripcontent)
|
2022-12-19 11:38:34 +00:00
|
|
|
tripcontent = re.sub(r"<hr\s*>","",tripcontent)
|
2022-12-20 15:18:07 +00:00
|
|
|
tripcontent = f"\n\n<!-- Content parsed from UK Caving Blog -->\nBlog Author: {trippeople}" + tripcontent
|
2020-06-08 21:33:32 +01:00
|
|
|
|
2022-12-16 19:57:56 +00:00
|
|
|
entrytuple = (tripdate, location, tripname, tripcontent,
|
2022-12-14 23:46:14 +00:00
|
|
|
trippeople, expedition, tu, tid)
|
|
|
|
logentries.append(entrytuple)
|
2019-03-06 23:20:34 +00:00
|
|
|
|
2022-12-14 23:46:14 +00:00
|
|
|
|
2022-12-15 00:35:48 +00:00
|
|
|
def LoadLogbookForExpedition(expedition, clean=True):
|
2020-05-30 20:31:20 +01:00
|
|
|
""" Parses all logbook entries for one expedition
|
2022-12-16 19:57:56 +00:00
|
|
|
if clean==True then it deletes all entries for this year first.
|
2020-05-30 20:31:20 +01:00
|
|
|
"""
|
2020-04-12 22:29:30 +01:00
|
|
|
global logentries
|
2022-12-09 23:45:07 +00:00
|
|
|
# absolutely horrid. REFACTOR THIS (all my fault..)
|
2021-04-23 03:07:21 +01:00
|
|
|
global logdataissues
|
2022-03-24 01:05:50 +00:00
|
|
|
global entries
|
|
|
|
|
2019-03-06 23:20:34 +00:00
|
|
|
logbook_parseable = False
|
2022-11-21 16:41:52 +00:00
|
|
|
yearlinks = LOGBOOK_PARSER_SETTINGS
|
2020-05-30 20:31:20 +01:00
|
|
|
expologbase = os.path.join(settings.EXPOWEB, "years")
|
2020-06-08 21:33:32 +01:00
|
|
|
logentries=[]
|
|
|
|
|
2022-03-24 01:05:50 +00:00
|
|
|
year = expedition.year
|
|
|
|
expect = entries[year]
|
|
|
|
# print(" - Logbook for: " + year)
|
|
|
|
|
2022-11-23 00:36:44 +00:00
|
|
|
|
2020-06-08 21:33:32 +01:00
|
|
|
|
2021-04-23 03:07:21 +01:00
|
|
|
def cleanerrors(year):
|
|
|
|
global logdataissues
|
|
|
|
dataissues = DataIssue.objects.filter(parser='logbooks')
|
|
|
|
for di in dataissues:
|
2021-04-23 11:43:25 +01:00
|
|
|
ph = year
|
2021-04-23 03:07:21 +01:00
|
|
|
if re.search(ph, di.message) is not None:
|
2021-04-23 16:11:50 +01:00
|
|
|
#print(f' - CLEANING dataissue {di.message}')
|
2021-04-23 03:07:21 +01:00
|
|
|
di.delete()
|
|
|
|
|
2021-04-23 16:11:50 +01:00
|
|
|
#print(f' - CLEAN {year} {len(logdataissues)} {type(logdataissues)} data issues for this year')
|
2021-04-23 11:43:25 +01:00
|
|
|
dellist = []
|
|
|
|
for key, value in logdataissues.items():
|
2021-04-23 16:11:50 +01:00
|
|
|
#print(f' - CLEANING logdataissues [{key}]: {value}')
|
|
|
|
if key.startswith(year):
|
|
|
|
#print(f' - CLEANING logdataissues [{key:12}]: {value} ')
|
2021-04-23 11:43:25 +01:00
|
|
|
dellist.append(key)
|
|
|
|
for i in dellist:
|
|
|
|
del logdataissues[i]
|
2022-12-15 00:35:48 +00:00
|
|
|
if (clean):
|
|
|
|
cleanerrors(year)
|
2021-04-23 03:07:21 +01:00
|
|
|
|
2022-03-24 01:05:50 +00:00
|
|
|
if year in yearlinks:
|
2022-12-14 23:46:14 +00:00
|
|
|
yearfile, yearparser = yearlinks[year]
|
2022-12-16 19:57:56 +00:00
|
|
|
logbookpath = Path(yearfile)
|
2022-12-14 23:46:14 +00:00
|
|
|
expedition.logbookfile = yearfile
|
|
|
|
parsefunc = yearparser
|
2022-12-15 00:35:48 +00:00
|
|
|
# print(f" - Logbook file {yearfile} using parser {yearparser}")
|
2022-12-14 23:46:14 +00:00
|
|
|
|
2020-05-30 20:31:20 +01:00
|
|
|
else:
|
2022-12-16 19:57:56 +00:00
|
|
|
logbookpath = Path(DEFAULT_LOGBOOK_FILE)
|
2022-11-21 16:41:52 +00:00
|
|
|
expedition.logbookfile = DEFAULT_LOGBOOK_FILE
|
|
|
|
parsefunc = DEFAULT_LOGBOOK_PARSER
|
2020-05-30 20:31:20 +01:00
|
|
|
|
2021-11-11 20:57:49 +00:00
|
|
|
expedition.save()
|
2022-12-09 23:45:07 +00:00
|
|
|
|
|
|
|
lbes = LogbookEntry.objects.filter(expedition=expedition)
|
2022-12-15 00:35:48 +00:00
|
|
|
if (clean):
|
|
|
|
for lbe in lbes:
|
|
|
|
lbe.delete()
|
2022-11-21 16:41:52 +00:00
|
|
|
|
2022-12-16 19:57:56 +00:00
|
|
|
for sq in ["", "2", "3", "4"]: # cope with blog saved as many separate files
|
|
|
|
lb = Path(expologbase, year, logbookpath.stem + sq + logbookpath.suffix)
|
|
|
|
if not (lb.is_file()):
|
|
|
|
# print(f" ! End of blog. Next blog file in sequence not there:{lb}")
|
|
|
|
break
|
2022-08-30 15:58:49 +01:00
|
|
|
try:
|
2022-12-16 19:57:56 +00:00
|
|
|
with open(lb,'rb') as file_in:
|
|
|
|
txt = file_in.read().decode("utf-8")
|
|
|
|
logbook_parseable = True
|
|
|
|
except (IOError):
|
|
|
|
logbook_parseable = False
|
|
|
|
print(f" ! Couldn't open logbook as UTF-8 {lb}")
|
|
|
|
except:
|
|
|
|
logbook_parseable = False
|
|
|
|
print(f" ! Very Bad Error opening {lb}")
|
|
|
|
|
|
|
|
if logbook_parseable:
|
|
|
|
|
|
|
|
# --------------------
|
|
|
|
parser = globals()[parsefunc]
|
|
|
|
print(f' - {year} parsing with {parsefunc} - {lb}')
|
|
|
|
parser(year, expedition, txt, sq) # this launches the right parser for this year
|
|
|
|
# --------------------
|
2022-12-18 19:33:56 +00:00
|
|
|
dupl = {}
|
2022-12-16 19:57:56 +00:00
|
|
|
for entrytuple in logentries:
|
2022-12-18 19:33:56 +00:00
|
|
|
date, tripcave, triptitle, text, trippeople, expedition, logtime_underground, tripid1 = entrytuple
|
|
|
|
check = (date, triptitle)
|
|
|
|
if check in dupl:
|
|
|
|
dupl[check] += 1
|
|
|
|
triptitle = f"{triptitle} #{dupl[check]}"
|
2022-12-18 20:36:11 +00:00
|
|
|
print(f' - {triptitle} -- {date}')
|
2022-12-18 19:33:56 +00:00
|
|
|
else:
|
|
|
|
dupl[check] = 1
|
|
|
|
EnterLogIntoDbase(date, tripcave, triptitle, text, trippeople, expedition, logtime_underground,
|
2022-12-16 19:57:56 +00:00
|
|
|
tripid1)
|
2022-03-24 01:05:50 +00:00
|
|
|
|
|
|
|
if len(logentries) == expect:
|
|
|
|
# print(f"OK {year} {len(logentries):5d} is {expect}\n")
|
|
|
|
pass
|
|
|
|
else:
|
2022-12-20 15:18:07 +00:00
|
|
|
print(f"Mismatch in number of log entries: {year} {len(logentries):5d} is not {expect}\n")
|
2022-03-24 01:05:50 +00:00
|
|
|
|
2020-06-06 22:51:55 +01:00
|
|
|
return len(logentries)
|
2020-05-28 04:54:53 +01:00
|
|
|
|
2022-12-18 19:33:56 +00:00
|
|
|
def LoadLogbook(year):
|
|
|
|
'''One off logbook for testing purposes
|
|
|
|
'''
|
|
|
|
global LOGBOOK_PARSER_SETTINGS
|
2022-12-14 23:46:14 +00:00
|
|
|
|
2022-12-18 19:33:56 +00:00
|
|
|
nlbe={}
|
|
|
|
TROG['pagecache']['expedition'][year] = None # clear cache
|
2022-12-14 23:46:14 +00:00
|
|
|
|
2022-12-19 20:13:26 +00:00
|
|
|
expo = Expedition.objects.get(year=year)
|
|
|
|
year = expo.year # some type funny
|
2022-12-18 19:33:56 +00:00
|
|
|
nlbe[expo] = LoadLogbookForExpedition(expo) # this actually loads the logbook for one expo
|
2022-12-19 20:13:26 +00:00
|
|
|
if year in BLOG_PARSER_SETTINGS:
|
|
|
|
print("BLOG parsing")
|
|
|
|
LOGBOOK_PARSER_SETTINGS[year] = BLOG_PARSER_SETTINGS[year]
|
|
|
|
nlbe[expo] = LoadLogbookForExpedition(expo, clean=False) # this loads the blog logbook for one expo
|
|
|
|
else:
|
2022-12-20 15:18:07 +00:00
|
|
|
print(f"Not a year with extant blog entries to import: '{year}' not in BLOG_PARSER_SETTINGS {BLOG_PARSER_SETTINGS}")
|
2022-12-14 23:46:14 +00:00
|
|
|
|
2011-07-11 02:10:22 +01:00
|
|
|
def LoadLogbooks():
|
2020-05-30 20:31:20 +01:00
|
|
|
""" This is the master function for parsing all logbooks into the Troggle database.
|
2022-11-21 16:26:30 +00:00
|
|
|
This should be rewritten to use coroutines to load all logbooks from disc in parallel,
|
|
|
|
but must be serialised to write to database as sqlite is single-user.
|
2020-05-30 20:31:20 +01:00
|
|
|
"""
|
2020-06-08 21:33:32 +01:00
|
|
|
global logdataissues
|
2022-03-24 01:05:50 +00:00
|
|
|
global entries
|
2020-06-08 21:33:32 +01:00
|
|
|
|
|
|
|
logdataissues = {}
|
2020-05-30 12:35:15 +01:00
|
|
|
DataIssue.objects.filter(parser='logbooks').delete()
|
|
|
|
expos = Expedition.objects.all()
|
2020-06-06 22:51:55 +01:00
|
|
|
if len(expos) <= 1:
|
2022-03-18 10:21:25 +00:00
|
|
|
message = f" ! - No expeditions found. Load 'people' first"
|
|
|
|
DataIssue.objects.create(parser='logbooks', message=message)
|
|
|
|
logdataissues[f"sqlfail 0000"]=message
|
|
|
|
print(message)
|
2022-12-15 00:35:48 +00:00
|
|
|
return
|
2022-03-18 10:21:25 +00:00
|
|
|
|
2022-03-02 21:15:24 +00:00
|
|
|
noexpo = ["1986", "2020", "2021",] #no expo
|
|
|
|
lostlogbook = ["1976", "1977", "1978", "1979", "1980", "1981"]
|
2022-12-20 23:48:56 +00:00
|
|
|
sqlfail = [""] # breaks mysql with db constraint fail - all now fixed.]
|
2022-03-02 21:15:24 +00:00
|
|
|
nologbook = noexpo + lostlogbook + sqlfail
|
2022-03-24 01:05:50 +00:00
|
|
|
|
2020-06-08 21:33:32 +01:00
|
|
|
nlbe={}
|
|
|
|
expd ={}
|
2022-12-15 00:35:48 +00:00
|
|
|
loglist = []
|
|
|
|
bloglist = []
|
2022-03-24 01:05:50 +00:00
|
|
|
|
2022-10-05 19:10:05 +01:00
|
|
|
for expo in expos: # pointless as we explicitly know the years in this code.
|
2022-03-24 01:05:50 +00:00
|
|
|
year = expo.year
|
|
|
|
TROG['pagecache']['expedition'][year] = None # clear cache
|
|
|
|
if year in sqlfail:
|
|
|
|
print(" - Logbook for: " + year + " NO parsing attempted - known sql failures")
|
|
|
|
message = f" ! - Not even attempting to parse logbook for {year} until code fixed"
|
|
|
|
DataIssue.objects.create(parser='logbooks', message=message)
|
|
|
|
logdataissues[f"sqlfail {year}"]=message
|
|
|
|
print(message)
|
2022-03-02 21:15:24 +00:00
|
|
|
|
2022-03-24 01:05:50 +00:00
|
|
|
if year not in nologbook:
|
|
|
|
if year in entries:
|
2022-12-15 00:35:48 +00:00
|
|
|
loglist.append(expo)
|
2022-03-24 01:05:50 +00:00
|
|
|
else:
|
|
|
|
print(" - No Logbook yet for: " + year) # catch case when preparing for next expo
|
2022-12-15 00:35:48 +00:00
|
|
|
|
2022-12-16 19:57:56 +00:00
|
|
|
if year in BLOG_PARSER_SETTINGS:
|
2022-12-15 00:35:48 +00:00
|
|
|
bloglist.append(expo)
|
2022-03-24 01:05:50 +00:00
|
|
|
|
2022-12-15 00:35:48 +00:00
|
|
|
|
|
|
|
for ex in loglist:
|
2022-12-16 19:57:56 +00:00
|
|
|
nlbe[ex] = LoadLogbookForExpedition(ex) # this loads the logbook for one expo
|
2022-12-15 00:35:48 +00:00
|
|
|
|
|
|
|
for b in bloglist:
|
2022-12-16 19:57:56 +00:00
|
|
|
if str(b) in LOGBOOK_PARSER_SETTINGS:
|
|
|
|
orig = LOGBOOK_PARSER_SETTINGS[str(b)]
|
|
|
|
else:
|
|
|
|
orig = (DEFAULT_LOGBOOK_FILE, DEFAULT_LOGBOOK_PARSER)
|
|
|
|
LOGBOOK_PARSER_SETTINGS[str(b)] = BLOG_PARSER_SETTINGS[str(b)]
|
|
|
|
print(f" - BLOG: {b}")
|
|
|
|
nlbe[b] = LoadLogbookForExpedition(b, clean=False) # this loads the blog logbook for one expo
|
2022-12-15 00:35:48 +00:00
|
|
|
LOGBOOK_PARSER_SETTINGS[str(b)] = orig
|
|
|
|
|
2022-03-24 01:05:50 +00:00
|
|
|
# tried to use map with concurrent threads - but sqlite database is not concurrent, so failed with database lock
|
|
|
|
# yt = 0
|
2022-12-15 00:35:48 +00:00
|
|
|
# for r in map(LoadLogbookForExpedition, loglist):
|
2022-03-24 01:05:50 +00:00
|
|
|
# yt = r
|
|
|
|
|
|
|
|
yt = 0
|
|
|
|
for e in nlbe:
|
|
|
|
yt += nlbe[e]
|
|
|
|
print(f"total {yt:,} log entries parsed in all expeditions")
|
|
|
|
|
2022-03-02 21:15:24 +00:00
|
|
|
|
2019-03-06 23:20:34 +00:00
|
|
|
|
2021-04-23 16:11:50 +01:00
|
|
|
# dateRegex = re.compile(r'<span\s+class="date">(\d\d\d\d)-(\d\d)-(\d\d)</span>', re.S)
|
|
|
|
# expeditionYearRegex = re.compile(r'<span\s+class="expeditionyear">(.*?)</span>', re.S)
|
|
|
|
# titleRegex = re.compile(r'<H1>(.*?)</H1>', re.S)
|
|
|
|
# reportRegex = re.compile(r'<div\s+class="report">(.*)</div>\s*</body>', re.S)
|
|
|
|
# personRegex = re.compile(r'<div\s+class="person">(.*?)</div>', re.S)
|
|
|
|
# nameAuthorRegex = re.compile(r'<span\s+class="name(,author|)">(.*?)</span>', re.S)
|
|
|
|
# TURegex = re.compile(r'<span\s+class="TU">([0-9]*\.?[0-9]+)</span>', re.S)
|
|
|
|
# locationRegex = re.compile(r'<span\s+class="location">(.*?)</span>', re.S)
|
|
|
|
# caveRegex = re.compile(r'<span\s+class="cave">(.*?)</span>', re.S)
|
2011-07-11 02:10:22 +01:00
|
|
|
|