2
0
mirror of https://expo.survex.com/repositories/troggle/.git synced 2024-11-22 07:11:52 +00:00

remove cache and setdatesfromlogbook

This commit is contained in:
Philip Sargent 2022-11-21 16:26:30 +00:00
parent 4260b0f092
commit bcb61f9cd9
2 changed files with 40 additions and 23 deletions

View File

@ -214,6 +214,7 @@ def logbookentry(request, date, slug):
svxothers = None svxothers = None
this_logbookentry=this_logbookentry[0] this_logbookentry=this_logbookentry[0]
# This is the only page that uses presontrip_next and persontrip_prev
return render(request, 'logbookentry.html', return render(request, 'logbookentry.html',
{'logbookentry': this_logbookentry, 'trips': trips, 'svxothers': svxothers, 'wallets': wallets}) {'logbookentry': this_logbookentry, 'trips': trips, 'svxothers': svxothers, 'wallets': wallets})
else: else:

View File

@ -24,8 +24,6 @@ Parses and imports logbooks in all their wonderful confusion
# it can be checked up later from the hard-copy if necessary; or it's not possible to determin (name, trip place, etc) # it can be checked up later from the hard-copy if necessary; or it's not possible to determin (name, trip place, etc)
''' '''
todo=''' todo='''
- Use the .shelve.db cache for all logbooks, not just individually
- refactor everything with some urgency, esp. LoadLogbookForExpedition() - refactor everything with some urgency, esp. LoadLogbookForExpedition()
- profile the code to find bad repetitive things, of which there are many. - profile the code to find bad repetitive things, of which there are many.
@ -46,9 +44,6 @@ todo='''
- this is a slow and uncertain function: cave = getCaveByReference(caveRef) - this is a slow and uncertain function: cave = getCaveByReference(caveRef)
- DB lock currently prevents multiple threads for loading logbooks. But asyncio might work..?
''' '''
logentries = [] # the entire logbook for one year is a single object: a list of entries logentries = [] # the entire logbook for one year is a single object: a list of entries
@ -120,7 +115,7 @@ def EnterLogIntoDbase(date, place, title, text, trippeople, expedition, logtime_
Does NOT save the expeditionday_id - all NULLs. why? Because we are deprecating expeditionday ! Does NOT save the expeditionday_id - all NULLs. why? Because we are deprecating expeditionday !
troggle.log shows that we are creating lots of duplicates, which is no no problem with SQL as they just overwrite troggle.log shows that we are creating lots of duplicates, which is no no problem with SQL as they just overwrite
but we are saving the same thing too many times.. Also seen in the ObjStore mimic but we are saving the same thing too many times..
""" """
try: try:
trippersons, author = GetTripPersons(trippeople, expedition, logtime_underground, tid=tid) trippersons, author = GetTripPersons(trippeople, expedition, logtime_underground, tid=tid)
@ -471,7 +466,15 @@ def Parseloghtml03(year, expedition, txt):
logentries.append(entrytuple) logentries.append(entrytuple)
def SetDatesFromLogbookEntries(expedition): def SetDatesFromLogbookEntries(expedition):
""" """Sets the next and previous entry for a persontrip by setting
persontrip_prev
persontrip_next
for each persontrip instance.
This is ONLY needed when a logbook entry is displayed. So could be called lazily
only when one of these entries is requested.
It does NOT do what the docstring says here:
Sets the date_from and date_to field for an expedition based on persontrips. Sets the date_from and date_to field for an expedition based on persontrips.
Then sets the expedition date_from and date_to based on the personexpeditions. Then sets the expedition date_from and date_to based on the personexpeditions.
""" """
@ -561,7 +564,7 @@ def LoadLogbookForExpedition(expedition):
expedition.save() expedition.save()
logbook_cached = False logbook_cached = False
if True: # enable cache system if False: # enable cache system
now = time.time() now = time.time()
bad_cache = False # temporarily disable reading the cache - buggy bad_cache = False # temporarily disable reading the cache - buggy
try: try:
@ -603,46 +606,59 @@ def LoadLogbookForExpedition(expedition):
except (IOError): except (IOError):
logbook_parseable = False logbook_parseable = False
print(" ! Couldn't open logbook as UTF-8 " + logbookpath) print(" ! Couldn't open logbook as UTF-8 " + logbookpath)
else:
try:
file_in = open(logbookpath,'rb')
txt = file_in.read().decode("utf-8")
file_in.close()
logbook_parseable = True
except (IOError):
logbook_parseable = False
print(" ! Couldn't open logbook as UTF-8 " + logbookpath)
except:
logbook_parseable = False
print(" ! Very Bad Error opening " + logbookpath)
if logbook_parseable: if logbook_parseable:
parser = globals()[parsefunc] parser = globals()[parsefunc]
print(f' - Using parser {parsefunc}') print(f' - {year} parsing with {parsefunc}')
parser(year, expedition, txt) # this launches the right parser for this year parser(year, expedition, txt) # this launches the right parser for this year
print(" - Setting dates from logbook entries") # SetDatesFromLogbookEntries(expedition)
SetDatesFromLogbookEntries(expedition) # if len(logentries) >0:
if len(logentries) >0: # print(" - Cacheing " , len(logentries), " log entries")
print(" - Cacheing " , len(logentries), " log entries") # lbsize = logbookpath.stat().st_size
lbsize = logbookpath.stat().st_size # with open(cache_filename, "wb") as fc: # we much check that permission are g+w ! or expo can't delete the cache
with open(cache_filename, "wb") as fc: # we much check that permission are g+w ! or expo can't delete the cache # logbk=(expedition,lbsize,len(logentries),logentries)
logbk=(expedition,lbsize,len(logentries),logentries) # pickle.dump(logbk, fc, protocol=4)
pickle.dump(logbk, fc, protocol=4) # else:
else: # print(" ! NO TRIP entries found in logbook, check the syntax.")
print(" ! NO TRIP entries found in logbook, check the syntax.")
i=0 i=0
for entrytuple in logentries: for entrytuple in logentries:
# date, tripcave, triptitle, text, trippeople, expedition, logtime_underground, tripid1 = entrytuple
try: try:
date, tripcave, triptitle, text, trippeople, expedition, logtime_underground, tripid1 = entrytuple date, tripcave, triptitle, text, trippeople, expedition, logtime_underground, tripid1 = entrytuple
except ValueError: # cope with removal of entry_type but still in cache files. Remove in Sept. 2022. except ValueError: # cope with removal of entry_type but still in cache files. Remove in Dec. 2022.
date, tripcave, triptitle, text, trippeople, expedition, logtime_underground, entry_type, tripid1 = entrytuple date, tripcave, triptitle, text, trippeople, expedition, logtime_underground, entry_type, tripid1 = entrytuple
EnterLogIntoDbase(date, tripcave, triptitle, text, trippeople, expedition, 0, EnterLogIntoDbase(date, tripcave, triptitle, text, trippeople, expedition, 0,
tripid1) tripid1)
i +=1 i +=1
SetDatesFromLogbookEntries(expedition) # SetDatesFromLogbookEntries(expedition)
if len(logentries) == expect: if len(logentries) == expect:
# print(f"OK {year} {len(logentries):5d} is {expect}\n") # print(f"OK {year} {len(logentries):5d} is {expect}\n")
pass pass
else: else:
print(f"BAD {year} {len(logentries):5d} is not {expect}\n") print(f"Mismatch {year} {len(logentries):5d} is not {expect}\n")
return len(logentries) return len(logentries)
def LoadLogbooks(): def LoadLogbooks():
""" This is the master function for parsing all logbooks into the Troggle database. """ This is the master function for parsing all logbooks into the Troggle database.
Parser settings appropriate for each year are set in settings.py LOGBOOK_PARSER_SETTINGS. Parser settings appropriate for each year are set in settings.py LOGBOOK_PARSER_SETTINGS.
This should be rewritten to use coroutines to load all logbooks from disc in parallel. This should be rewritten to use coroutines to load all logbooks from disc in parallel,
but must be serialised to write to database as sqlite is single-user.
""" """
global logdataissues global logdataissues
global entries global entries