2009-07-02 22:31:28 +01:00
|
|
|
import csv, re, datetime, os, shutil
|
2022-03-02 23:19:48 +00:00
|
|
|
from html import unescape
|
2019-07-11 12:29:38 +01:00
|
|
|
from unidecode import unidecode
|
2021-04-15 17:51:01 +01:00
|
|
|
from pathlib import Path
|
2009-05-13 05:53:37 +01:00
|
|
|
|
2021-04-13 00:11:08 +01:00
|
|
|
from django.conf import settings
|
|
|
|
|
2021-04-13 01:13:08 +01:00
|
|
|
from troggle.core.models.troggle import Expedition, Person, PersonExpedition
|
2021-04-15 17:51:01 +01:00
|
|
|
from troggle.core.models.troggle import DataIssue
|
|
|
|
from troggle.core.utils import save_carefully, TROG
|
2021-04-13 00:11:08 +01:00
|
|
|
|
|
|
|
'''These functions do not match how the stand-alone folk script works. So the script produces an HTML file which has
|
2021-02-06 00:18:48 +00:00
|
|
|
href links to pages in troggle which troggle does not think are right.
|
|
|
|
The standalone script needs to be renedred defucnt, and all the parsing needs to be in troggle. Either that,
|
|
|
|
or they should use the same code by importing a module.
|
|
|
|
'''
|
|
|
|
|
2021-04-15 17:51:01 +01:00
|
|
|
def parse_blurb(personline, header, person):
|
2009-07-03 05:31:49 +01:00
|
|
|
"""create mugshot Photo instance"""
|
2021-04-15 17:51:01 +01:00
|
|
|
ms_filename = personline[header["Mugshot"]]
|
|
|
|
ms_path = Path(settings.EXPOWEB, "folk", ms_filename)
|
|
|
|
|
|
|
|
if ms_filename:
|
|
|
|
if not ms_path.is_file():
|
|
|
|
message = f"! INVALID mug_shot field '{ms_filename}' for {person.fullname}"
|
|
|
|
print(message)
|
|
|
|
DataIssue.objects.create(parser='people', message=message, url=f"/person/{person.fullname}")
|
|
|
|
return
|
|
|
|
|
|
|
|
if ms_filename.startswith('i/'):
|
|
|
|
#if person just has an image, add it. It has format 'i/adama2018.jpg'
|
|
|
|
person.mug_shot = str(Path("/folk", ms_filename))
|
|
|
|
person.blurb = None
|
|
|
|
|
|
|
|
elif ms_filename.startswith('l/'):
|
|
|
|
# it has the format 'l/ollybetts.htm' the file may contain <img src="../i/mymug.jpg"> images
|
|
|
|
with open(ms_path,'r') as blurbfile:
|
|
|
|
blrb = blurbfile.read()
|
|
|
|
pblurb=re.search(r'<body>.*<hr',blrb,re.DOTALL)
|
|
|
|
if pblurb:
|
|
|
|
person.mug_shot = None
|
|
|
|
fragment= re.search('<body>(.*)<hr',blrb,re.DOTALL).group(1)
|
|
|
|
fragment = fragment.replace('src="../i/', 'src="/folk/i/')
|
|
|
|
fragment = fragment.replace("src='../i/", "src='/folk/i/")
|
|
|
|
fragment = re.sub(r'<h.*>[^<]*</h.>', '', fragment)
|
|
|
|
# replace src="../i/ with src="/folk/i
|
|
|
|
person.blurb = fragment
|
|
|
|
else:
|
|
|
|
message = f"! Blurb parse error in {ms_filename}"
|
|
|
|
print(message)
|
|
|
|
DataIssue.objects.create(parser='people', message=message, url="/folk/")
|
|
|
|
|
|
|
|
elif ms_filename == '':
|
2020-05-15 21:32:55 +01:00
|
|
|
pass
|
2021-04-15 17:51:01 +01:00
|
|
|
else:
|
|
|
|
message = f"! Unrecognised type of file at mug_shot field '{ms_filename}' for {person.fullname}"
|
|
|
|
print(message)
|
|
|
|
DataIssue.objects.create(parser='people', message=message, url="/folk/")
|
|
|
|
|
2009-05-13 05:53:37 +01:00
|
|
|
person.save()
|
2009-05-13 05:35:59 +01:00
|
|
|
|
2021-04-15 17:51:01 +01:00
|
|
|
def load_people_expos():
|
|
|
|
'''This is where the folk.csv file is parsed to read people's names.
|
|
|
|
Which it gets wrong for people like Lydia-Clare Leather and various 'von' and 'de' middle 'names'
|
|
|
|
and McLean and Mclean and McAdam - interaction with the url parser in urls.py too
|
|
|
|
'''
|
|
|
|
DataIssue.objects.filter(parser='people').delete()
|
2009-05-13 05:35:59 +01:00
|
|
|
|
2021-04-15 17:51:01 +01:00
|
|
|
persontab = open(os.path.join(settings.EXPOWEB, "folk", "folk.csv")) # should really be EXPOFOLK I guess
|
|
|
|
personreader = csv.reader(persontab) # this is an iterator
|
2020-05-24 01:57:06 +01:00
|
|
|
headers = next(personreader)
|
|
|
|
header = dict(list(zip(headers, list(range(len(headers))))))
|
2009-05-13 05:35:59 +01:00
|
|
|
|
2009-05-13 05:48:47 +01:00
|
|
|
# make expeditions
|
2020-04-27 23:51:41 +01:00
|
|
|
print(" - Loading expeditions")
|
2009-05-13 05:35:59 +01:00
|
|
|
years = headers[5:]
|
2009-05-13 05:53:37 +01:00
|
|
|
|
2009-05-13 05:35:59 +01:00
|
|
|
for year in years:
|
2009-05-19 06:32:42 +01:00
|
|
|
lookupAttribs = {'year':year}
|
|
|
|
nonLookupAttribs = {'name':"CUCC expo %s" % year}
|
|
|
|
|
2021-04-13 01:13:08 +01:00
|
|
|
save_carefully(Expedition, lookupAttribs, nonLookupAttribs)
|
2009-05-13 05:35:59 +01:00
|
|
|
|
2009-05-13 05:48:47 +01:00
|
|
|
# make persons
|
2020-04-27 23:51:41 +01:00
|
|
|
print(" - Loading personexpeditions")
|
2009-05-13 05:35:59 +01:00
|
|
|
|
2009-05-13 05:48:47 +01:00
|
|
|
for personline in personreader:
|
|
|
|
name = personline[header["Name"]]
|
2019-03-30 13:58:38 +00:00
|
|
|
name = re.sub(r"<.*?>", "", name)
|
2019-03-31 15:39:53 +01:00
|
|
|
|
2019-04-19 22:52:54 +01:00
|
|
|
firstname = ""
|
|
|
|
nickname = ""
|
|
|
|
|
|
|
|
rawlastname = personline[header["Lastname"]].strip()
|
|
|
|
matchlastname = re.match(r"^([\w&;\s]+)(?:\(([^)]*)\))?", rawlastname)
|
|
|
|
lastname = matchlastname.group(1).strip()
|
|
|
|
|
|
|
|
splitnick = re.match(r"^([\w&;\s]+)(?:\(([^)]*)\))?", name)
|
|
|
|
fullname = splitnick.group(1)
|
|
|
|
|
|
|
|
nickname = splitnick.group(2) or ""
|
|
|
|
|
|
|
|
fullname = fullname.strip()
|
|
|
|
names = fullname.split(' ')
|
|
|
|
firstname = names[0]
|
|
|
|
if len(names) == 1:
|
|
|
|
lastname = ""
|
|
|
|
|
2020-06-19 16:39:05 +01:00
|
|
|
if personline[header["VfHO member"]] =='':
|
|
|
|
vfho = False
|
|
|
|
else:
|
|
|
|
vfho = True
|
|
|
|
|
2019-04-19 22:52:54 +01:00
|
|
|
lookupAttribs={'first_name':firstname, 'last_name':(lastname or "")}
|
2020-06-19 16:39:05 +01:00
|
|
|
nonLookupAttribs={'is_vfho':vfho, 'fullname':fullname}
|
2021-04-13 01:13:08 +01:00
|
|
|
person, created = save_carefully(Person, lookupAttribs, nonLookupAttribs)
|
2019-03-31 15:39:53 +01:00
|
|
|
|
2021-04-15 17:51:01 +01:00
|
|
|
parse_blurb(personline=personline, header=header, person=person)
|
2009-05-13 05:35:59 +01:00
|
|
|
|
2009-05-13 05:48:47 +01:00
|
|
|
# make person expedition from table
|
2020-05-24 01:57:06 +01:00
|
|
|
for year, attended in list(zip(headers, personline))[5:]:
|
2021-04-13 01:13:08 +01:00
|
|
|
expedition = Expedition.objects.get(year=year)
|
2009-05-13 05:35:59 +01:00
|
|
|
if attended == "1" or attended == "-1":
|
2009-05-19 06:32:42 +01:00
|
|
|
lookupAttribs = {'person':person, 'expedition':expedition}
|
2020-07-06 20:27:31 +01:00
|
|
|
nonLookupAttribs = {'nickname':nickname, 'is_guest':(personline[header["Guest"]] == "1")}
|
2021-04-13 01:13:08 +01:00
|
|
|
save_carefully(PersonExpedition, lookupAttribs, nonLookupAttribs)
|
2021-04-27 20:44:24 +01:00
|
|
|
print("", flush=True)
|
2009-05-13 05:35:59 +01:00
|
|
|
|
2009-05-13 05:53:37 +01:00
|
|
|
|
2009-05-13 05:48:47 +01:00
|
|
|
# used in other referencing parser functions
|
2009-05-13 05:39:52 +01:00
|
|
|
# expedition name lookup cached for speed (it's a very big list)
|
2022-10-07 21:47:05 +01:00
|
|
|
# should have a LIST of nicknames, just populate the first entry from folk.csv
|
|
|
|
|
|
|
|
# Refactor. The dict GetPersonExpeditionNameLookup(expo) indexes by name and has values of personexpedition
|
|
|
|
# This is convoluted, the whole personexpedition concept is unnecessary.
|
|
|
|
|
2009-05-13 05:39:52 +01:00
|
|
|
Gpersonexpeditionnamelookup = { }
|
|
|
|
def GetPersonExpeditionNameLookup(expedition):
|
|
|
|
global Gpersonexpeditionnamelookup
|
|
|
|
res = Gpersonexpeditionnamelookup.get(expedition.name)
|
|
|
|
if res:
|
|
|
|
return res
|
|
|
|
|
2009-05-13 05:48:47 +01:00
|
|
|
res = { }
|
2009-05-13 05:39:52 +01:00
|
|
|
duplicates = set()
|
|
|
|
|
2020-05-14 19:37:46 +01:00
|
|
|
#print("Calculating GetPersonExpeditionNameLookup for " + expedition.year)
|
2021-04-13 01:13:08 +01:00
|
|
|
personexpeditions = PersonExpedition.objects.filter(expedition=expedition)
|
2009-05-13 05:39:52 +01:00
|
|
|
for personexpedition in personexpeditions:
|
|
|
|
possnames = [ ]
|
2022-03-02 23:19:48 +00:00
|
|
|
f = unidecode(unescape(personexpedition.person.first_name.lower()))
|
|
|
|
l = unidecode(unescape(personexpedition.person.last_name.lower()))
|
|
|
|
full = unidecode(unescape(personexpedition.person.fullname.lower()))
|
2009-05-13 05:39:52 +01:00
|
|
|
if l:
|
|
|
|
possnames.append(f + " " + l)
|
|
|
|
possnames.append(f + " " + l[0])
|
|
|
|
possnames.append(f + l[0])
|
|
|
|
possnames.append(f[0] + " " + l)
|
2022-10-07 21:47:05 +01:00
|
|
|
possnames.append(f[0] + l[0]) # initials e.g. gb or bl
|
2009-05-13 05:39:52 +01:00
|
|
|
possnames.append(f)
|
2019-04-19 22:52:54 +01:00
|
|
|
if full not in possnames:
|
|
|
|
possnames.append(full)
|
|
|
|
if personexpedition.nickname not in possnames:
|
2009-05-13 05:39:52 +01:00
|
|
|
possnames.append(personexpedition.nickname.lower())
|
2019-04-19 22:52:54 +01:00
|
|
|
if l:
|
2022-10-07 21:47:05 +01:00
|
|
|
# This allows for nickname to be used for short name
|
|
|
|
# eg Phil S is adding Phil Sargent to the list
|
2019-04-19 22:52:54 +01:00
|
|
|
if str(personexpedition.nickname.lower() + " " + l) not in possnames:
|
|
|
|
possnames.append(personexpedition.nickname.lower() + " " + l)
|
|
|
|
if str(personexpedition.nickname.lower() + " " + l[0]) not in possnames:
|
|
|
|
possnames.append(personexpedition.nickname.lower() + " " + l[0])
|
2019-07-11 12:29:38 +01:00
|
|
|
if str(personexpedition.nickname.lower() + l[0]) not in possnames:
|
|
|
|
possnames.append(personexpedition.nickname.lower() + l[0])
|
2009-05-13 05:39:52 +01:00
|
|
|
|
|
|
|
for possname in possnames:
|
|
|
|
if possname in res:
|
|
|
|
duplicates.add(possname)
|
|
|
|
else:
|
|
|
|
res[possname] = personexpedition
|
|
|
|
|
|
|
|
for possname in duplicates:
|
|
|
|
del res[possname]
|
|
|
|
|
|
|
|
Gpersonexpeditionnamelookup[expedition.name] = res
|
|
|
|
return res
|
|
|
|
|