diff --git a/core/admin.py b/core/admin.py index 0b7cf13a6..c5a1484e4 100644 --- a/core/admin.py +++ b/core/admin.py @@ -9,12 +9,12 @@ from troggle.core.views_other import downloadLogbook class TroggleModelAdmin(admin.ModelAdmin): - + def save_model(self, request, obj, form, change): """overriding admin save to fill the new_since parsing_field""" obj.new_since_parsing=True obj.save() - + class Media: js = ('jquery/jquery.min.js','js/QM_helper.js') @@ -44,7 +44,7 @@ class OtherCaveInline(admin.TabularInline): class SurveyAdmin(TroggleModelAdmin): inlines = (ScannedImageInline,) - search_fields = ('expedition__year','wallet_number') + search_fields = ('expedition__year','wallet_number') class QMsFoundInline(admin.TabularInline): @@ -52,7 +52,7 @@ class QMsFoundInline(admin.TabularInline): fk_name='found_by' fields=('number','grade','location_description','comment')#need to add foreignkey to cave part extra=1 - + class PhotoInline(admin.TabularInline): model = DPhoto @@ -68,7 +68,7 @@ class PersonTripInline(admin.TabularInline): #class LogbookEntryAdmin(VersionAdmin): class LogbookEntryAdmin(TroggleModelAdmin): - prepopulated_fields = {'slug':("title",)} + prepopulated_fields = {'slug':("title",)} search_fields = ('title','expedition__year') date_heirarchy = ('date') inlines = (PersonTripInline, PhotoInline, QMsFoundInline) @@ -77,11 +77,11 @@ class LogbookEntryAdmin(TroggleModelAdmin): "all": ("css/troggleadmin.css",) } actions=('export_logbook_entries_as_html','export_logbook_entries_as_txt') - + def export_logbook_entries_as_html(self, modeladmin, request, queryset): response=downloadLogbook(request=request, queryset=queryset, extension='html') return response - + def export_logbook_entries_as_txt(self, modeladmin, request, queryset): response=downloadLogbook(request=request, queryset=queryset, extension='txt') return response diff --git a/core/fileAbstraction.py b/core/fileAbstraction.py index 86191b76d..141ae0e30 100644 --- a/core/fileAbstraction.py +++ b/core/fileAbstraction.py @@ -15,7 +15,7 @@ def listdir(*path): for p in os.listdir(root): if os.path.isdir(os.path.join(root, p)): l += p + "/\n" - + elif os.path.isfile(os.path.join(root, p)): l += p + "\n" #Ignore non-files and non-directories @@ -28,7 +28,7 @@ def listdir(*path): c = c.replace("#", "%23") print("FILE: ", settings.FILES + "listdir/" + c) return urllib.urlopen(settings.FILES + "listdir/" + c).read() - + def dirsAsList(*path): return [d for d in listdir(*path).split("\n") if len(d) > 0 and d[-1] == "/"] diff --git a/core/forms.py b/core/forms.py index 826517894..15e86d8a0 100644 --- a/core/forms.py +++ b/core/forms.py @@ -16,7 +16,7 @@ class CaveForm(ModelForm): underground_centre_line = forms.CharField(required = False, widget=forms.Textarea()) notes = forms.CharField(required = False, widget=forms.Textarea()) references = forms.CharField(required = False, widget=forms.Textarea()) - url = forms.CharField(required = True) + url = forms.CharField(required = True) class Meta: model = Cave exclude = ("filename",) @@ -24,9 +24,9 @@ class CaveForm(ModelForm): def clean(self): if self.cleaned_data.get("kataster_number") == "" and self.cleaned_data.get("unofficial_number") == "": - self._errors["unofficial_number"] = self.error_class(["Either the kataster or unoffical number is required."]) + self._errors["unofficial_number"] = self.error_class(["Either the kataster or unoffical number is required."]) if self.cleaned_data.get("kataster_number") != "" and self.cleaned_data.get("official_name") == "": - self._errors["official_name"] = self.error_class(["This field is required when there is a kataster number."]) + self._errors["official_name"] = self.error_class(["This field is required when there is a kataster number."]) if self.cleaned_data.get("area") == []: self._errors["area"] = self.error_class(["This field is required."]) if self.cleaned_data.get("url") and self.cleaned_data.get("url").startswith("/"): @@ -82,11 +82,11 @@ class EntranceLetterForm(ModelForm): # This function returns html-formatted paragraphs for each of the # wikilink types that are related to this logbookentry. Each paragraph # contains a list of all of the related wikilinks. -# +# # Perhaps an admin javascript solution would be better. # """ # res = ["Please use the following wikilinks, which are related to this logbook entry:"] -# +# # res.append(r'
QMs found:') # for QM in LogbookEntry.instance.QMs_found.all(): # res.append(QM.wiki_link()) @@ -94,12 +94,12 @@ class EntranceLetterForm(ModelForm): # res.append(r'
QMs ticked off:') # for QM in LogbookEntry.instance.QMs_ticked_off.all(): # res.append(QM.wiki_link()) - + # res.append(r'
People') # for persontrip in LogbookEntry.instance.persontrip_set.all(): # res.append(persontrip.wiki_link()) # res.append(r'
') - + # return string.join(res, r']*>(T/?U.*)', triptext)
if mtu:
tu = mtu.group(1)
@@ -228,7 +228,7 @@ def Parseloghtml01(year, expedition, txt):
tripcave = triptitles[0].strip()
ltriptext = triptext
-
+
mtail = re.search(r'(?:[^<]*|\s|/|-|&|?p>|\((?:same day|\d+)\))*$', ltriptext)
if mtail:
#print mtail.group(0)
@@ -240,7 +240,6 @@ def Parseloghtml01(year, expedition, txt):
ltriptext = re.sub(r"?u>", "_", ltriptext)
ltriptext = re.sub(r"?i>", "''", ltriptext)
ltriptext = re.sub(r"?b>", "'''", ltriptext)
-
#print ldate, trippeople.strip()
# could includ the tripid (url link for cross referencing)
@@ -301,7 +300,7 @@ def SetDatesFromLogbookEntries(expedition):
def LoadLogbookForExpedition(expedition):
""" Parses all logbook entries for one expedition """
-
+
expowebbase = os.path.join(settings.EXPOWEB, "years")
yearlinks = settings.LOGBOOK_PARSER_SETTINGS
@@ -344,7 +343,7 @@ def LoadLogbooks():
expos = models.Expedition.objects.all()
for expo in expos:
print("\nLoading Logbook for: " + expo.year)
-
+
# Load logbook for expo
LoadLogbookForExpedition(expo)
@@ -378,17 +377,17 @@ def parseAutoLogBookEntry(filename):
expedition = models.Expedition.objects.get(year = expeditionYearMatch.groups()[0])
personExpeditionNameLookup = GetPersonExpeditionNameLookup(expedition)
except models.Expedition.DoesNotExist:
- errors.append("Expedition not in database")
+ errors.append("Expedition not in database")
else:
- errors.append("Expediton Year could not be parsed")
+ errors.append("Expediton Year could not be parsed")
titleMatch = titleRegex.search(contents)
if titleMatch:
title, = titleMatch.groups()
if len(title) > settings.MAX_LOGBOOK_ENTRY_TITLE_LENGTH:
- errors.append("Title too long")
+ errors.append("Title too long")
else:
- errors.append("Title could not be found")
+ errors.append("Title could not be found")
caveMatch = caveRegex.search(contents)
if caveMatch:
@@ -397,24 +396,24 @@ def parseAutoLogBookEntry(filename):
cave = models.getCaveByReference(caveRef)
except AssertionError:
cave = None
- errors.append("Cave not found in database")
+ errors.append("Cave not found in database")
else:
cave = None
locationMatch = locationRegex.search(contents)
if locationMatch:
- location, = locationMatch.groups()
+ location, = locationMatch.groups()
else:
location = None
-
+
if cave is None and location is None:
- errors.append("Location nor cave could not be found")
+ errors.append("Location nor cave could not be found")
reportMatch = reportRegex.search(contents)
if reportMatch:
report, = reportMatch.groups()
else:
- errors.append("Contents could not be found")
+ errors.append("Contents could not be found")
if errors:
return errors # Easiest to bail out at this point as we need to make sure that we know which expedition to look for people from.
people = []
@@ -429,7 +428,7 @@ def parseAutoLogBookEntry(filename):
author = bool(author)
else:
errors.append("Persons name could not be found")
-
+
TUMatch = TURegex.search(contents)
if TUMatch:
TU, = TUMatch.groups()
@@ -439,15 +438,15 @@ def parseAutoLogBookEntry(filename):
people.append((name, author, TU))
if errors:
return errors # Bail out before commiting to the database
- logbookEntry = models.LogbookEntry(date = date,
+ logbookEntry = models.LogbookEntry(date = date,
expedition = expedition,
- title = title, cave = cave, place = location,
+ title = title, cave = cave, place = location,
text = report, slug = slugify(title)[:50],
filename = filename)
logbookEntry.save()
for name, author, TU in people:
- models.PersonTrip(personexpedition = personExpo,
- time_underground = TU,
- logbook_entry = logbookEntry,
+ models.PersonTrip(personexpedition = personExpo,
+ time_underground = TU,
+ logbook_entry = logbookEntry,
is_logbook_entry_author = author).save()
print(logbookEntry)
diff --git a/parsers/people.py b/parsers/people.py
index 28a036a74..0d253c96a 100644
--- a/parsers/people.py
+++ b/parsers/people.py
@@ -12,22 +12,22 @@ def saveMugShot(mugShotPath, mugShotFilename, person):
mugShotFilename=mugShotFilename[2:]
else:
mugShotFilename=mugShotFilename # just in case one doesn't
-
+
dummyObj=models.DPhoto(file=mugShotFilename)
-
+
#Put a copy of the file in the right place. mugShotObj.file.path is determined by the django filesystemstorage specified in models.py
if not os.path.exists(dummyObj.file.path):
shutil.copy(mugShotPath, dummyObj.file.path)
-
+
mugShotObj, created = save_carefully(
models.DPhoto,
lookupAttribs={'is_mugshot':True, 'file':mugShotFilename},
nonLookupAttribs={'caption':"Mugshot for "+person.first_name+" "+person.last_name}
)
-
+
if created:
mugShotObj.contains_person.add(person)
- mugShotObj.save()
+ mugShotObj.save()
def parseMugShotAndBlurb(personline, header, person):
"""create mugshot Photo instance"""
@@ -45,20 +45,20 @@ def parseMugShotAndBlurb(personline, header, person):
person.save()
def LoadPersonsExpos():
-
+
persontab = open(os.path.join(settings.EXPOWEB, "folk", "folk.csv"))
personreader = csv.reader(persontab)
headers = personreader.next()
header = dict(zip(headers, range(len(headers))))
-
+
# make expeditions
print("Loading expeditions")
years = headers[5:]
-
+
for year in years:
lookupAttribs = {'year':year}
nonLookupAttribs = {'name':"CUCC expo %s" % year}
-
+
save_carefully(models.Expedition, lookupAttribs, nonLookupAttribs)
# make persons
@@ -91,7 +91,7 @@ def LoadPersonsExpos():
person, created = save_carefully(models.Person, lookupAttribs, nonLookupAttribs)
parseMugShotAndBlurb(personline=personline, header=header, person=person)
-
+
# make person expedition from table
for year, attended in zip(headers, personline)[5:]:
expedition = models.Expedition.objects.get(year=year)
@@ -108,10 +108,10 @@ def GetPersonExpeditionNameLookup(expedition):
res = Gpersonexpeditionnamelookup.get(expedition.name)
if res:
return res
-
+
res = { }
duplicates = set()
-
+
print("Calculating GetPersonExpeditionNameLookup for " + expedition.year)
personexpeditions = models.PersonExpedition.objects.filter(expedition=expedition)
htmlparser = HTMLParser()
@@ -139,16 +139,16 @@ def GetPersonExpeditionNameLookup(expedition):
possnames.append(personexpedition.nickname.lower() + " " + l[0])
if str(personexpedition.nickname.lower() + l[0]) not in possnames:
possnames.append(personexpedition.nickname.lower() + l[0])
-
+
for possname in possnames:
if possname in res:
duplicates.add(possname)
else:
res[possname] = personexpedition
-
+
for possname in duplicates:
del res[possname]
-
+
Gpersonexpeditionnamelookup[expedition.name] = res
return res
diff --git a/parsers/subcaves.py b/parsers/subcaves.py
index 739af4441..364da0dc2 100644
--- a/parsers/subcaves.py
+++ b/parsers/subcaves.py
@@ -1,5 +1,7 @@
'''
-This module is the part of troggle that parses descriptions of cave parts (subcaves) from the legacy html files and saves them in the troggle database as instances of the model Subcave. Unfortunately, this parser can not be very flexible because the legacy format is poorly structured.
+This module is the part of troggle that parses descriptions of cave parts (subcaves) from the legacy html
+files and saves them in the troggle database as instances of the model Subcave.
+Unfortunately, this parser can not be very flexible because the legacy format is poorly structured.
'''
import sys, os
@@ -29,12 +31,12 @@ def importSubcaves(cave):
link[0])
subcaveFile=open(subcaveFilePath,'r')
description=subcaveFile.read().decode('iso-8859-1').encode('utf-8')
-
+
lookupAttribs={'title':link[1], 'cave':cave}
nonLookupAttribs={'description':description}
newSubcave=save_carefully(Subcave,lookupAttribs=lookupAttribs,nonLookupAttribs=nonLookupAttribs)
- logging.info("Added " + unicode(newSubcave) + " to " + unicode(cave))
+ logging.info("Added " + unicode(newSubcave) + " to " + unicode(cave))
except IOError:
logging.info("Subcave import couldn't open "+subcaveFilePath)
diff --git a/parsers/survex.py b/parsers/survex.py
index 01f6d2102..c70a80b4b 100644
--- a/parsers/survex.py
+++ b/parsers/survex.py
@@ -19,12 +19,12 @@ def LoadSurvexLineLeg(survexblock, stardata, sline, comment, cave):
ls = sline.lower().split()
ssfrom = survexblock.MakeSurvexStation(ls[stardata["from"]])
ssto = survexblock.MakeSurvexStation(ls[stardata["to"]])
-
+
survexleg = models.SurvexLeg(block=survexblock, stationfrom=ssfrom, stationto=ssto)
if stardata["type"] == "normal":
try:
survexleg.tape = float(ls[stardata["tape"]])
- except ValueError:
+ except ValueError:
print("Tape misread in", survexblock.survexfile.path)
print("Stardata:", stardata)
print("Line:", ls)
@@ -69,7 +69,7 @@ def LoadSurvexLineLeg(survexblock, stardata, sline, comment, cave):
# only save proper legs
survexleg.save()
-
+
itape = stardata.get("tape")
if itape:
try:
@@ -106,7 +106,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
stardata = stardatadefault
teammembers = [ ]
- # uncomment to print out all files during parsing
+ # uncomment to print out all files during parsing
print(" - Reading file: " + survexblock.survexfile.path)
stamp = datetime.now()
lineno = 0
@@ -198,7 +198,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
#print('Cave -sline ' + str(cave))
if not sline:
continue
-
+
# detect the star command
mstar = regex_star.match(sline)
if not mstar:
@@ -214,7 +214,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
# print(' - Passage: ')
#Missing "station" in stardata.
continue
-
+
# detect the star command
cmd, line = mstar.groups()
cmd = cmd.lower()
@@ -238,7 +238,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
survexblock.save()
fininclude = includesurvexfile.OpenFile()
RecursiveLoad(survexblock, includesurvexfile, fininclude, textlines)
-
+
elif re.match("begin$(?i)", cmd):
if line:
newsvxpath = os.path.join(os.path.split(survexfile.path)[0], re.sub(r"\.svx$", "", line))
@@ -265,7 +265,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
RecursiveLoad(survexblockdown, survexfile, fin, textlinesdown)
else:
iblankbegins += 1
-
+
elif re.match("end$(?i)", cmd):
if iblankbegins:
iblankbegins -= 1
@@ -277,7 +277,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
timetaken = endstamp - stamp
# print(' - Time to process: ' + str(timetaken))
return
-
+
elif re.match("date$(?i)", cmd):
if len(line) == 10:
#print(' - Date found: ' + line)
@@ -288,7 +288,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
survexblock.expedition = expeditions[0]
survexblock.expeditionday = survexblock.expedition.get_expedition_day(survexblock.date)
survexblock.save()
-
+
elif re.match("team$(?i)", cmd):
pass
# print(' - Team found: ')
@@ -304,13 +304,13 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
if personexpedition:
personrole.person=personexpedition.person
personrole.save()
-
+
elif cmd == "title":
#print(' - Title found: ')
survextitle = models.SurvexTitle(survexblock=survexblock, title=line.strip('"'), cave=survexfile.cave)
survextitle.save()
pass
-
+
elif cmd == "require":
# should we check survex version available for processing?
pass
@@ -329,7 +329,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
stardata = stardatadefault
else:
assert ls[0] == "passage", line
-
+
elif cmd == "equate":
#print(' - Equate found: ')
LoadSurvexEquate(survexblock, line)
diff --git a/parsers/surveys.py b/parsers/surveys.py
index 0eed6f070..d5dc128b1 100644
--- a/parsers/surveys.py
+++ b/parsers/surveys.py
@@ -24,7 +24,7 @@ def readSurveysFromCSV():
try: # could probably combine these two
surveytab = open(os.path.join(settings.SURVEY_SCANS, "Surveys.csv"))
except IOError:
- import cStringIO, urllib
+ import cStringIO, urllib
surveytab = cStringIO.StringIO(urllib.urlopen(settings.SURVEY_SCANS + "/Surveys.csv").read())
dialect=csv.Sniffer().sniff(surveytab.read())
surveytab.seek(0,0)
@@ -37,7 +37,7 @@ def readSurveysFromCSV():
print("There are no expeditions in the database. Please run the logbook parser.")
sys.exit()
-
+
logging.info("Deleting all scanned images")
ScannedImage.objects.all().delete()
@@ -48,7 +48,7 @@ def readSurveysFromCSV():
for survey in surveyreader:
# I hate this, but some surveys have a letter eg 2000#34a. The next line deals with that.
- walletNumberLetter = re.match(r'(?P {{ pic.caption }} edit {{pic}} >
+ {{ pic.caption }}
-