forked from expo/troggle
Support html and wiki logbook entrys
Move nearest_station to nearest_station_name and make nearest_station a foreign key to SurvexStation Lots of tidying
This commit is contained in:
parent
c8551991b2
commit
9cd8734947
@ -104,21 +104,19 @@ class Expedition(TroggleModel):
|
||||
def day_max(self):
|
||||
res = self.expeditionday_set.all()
|
||||
return res and res[len(res) - 1] or None
|
||||
|
||||
|
||||
|
||||
|
||||
class ExpeditionDay(TroggleModel):
|
||||
expedition = models.ForeignKey("Expedition")
|
||||
date = models.DateField()
|
||||
|
||||
class Meta:
|
||||
ordering = ('date',)
|
||||
ordering = ('date',)
|
||||
|
||||
def GetPersonTrip(self, personexpedition):
|
||||
personexpeditions = self.persontrip_set.filter(expeditionday=self)
|
||||
return personexpeditions and personexpeditions[0] or None
|
||||
|
||||
|
||||
|
||||
#
|
||||
# single Person, can go on many years
|
||||
#
|
||||
@ -247,18 +245,22 @@ class PersonExpedition(TroggleModel):
|
||||
# Single parsed entry from Logbook
|
||||
#
|
||||
class LogbookEntry(TroggleModel):
|
||||
date = models.DateField()#MJG wants to turn this into a datetime such that multiple Logbook entries on the same day can be ordered.
|
||||
|
||||
LOGBOOK_ENTRY_TYPES = (
|
||||
("wiki", "Wiki style logbook"),
|
||||
("html", "Html style logbook")
|
||||
)
|
||||
|
||||
date = models.DateTimeField()#MJG wants to turn this into a datetime such that multiple Logbook entries on the same day can be ordered.ld()
|
||||
expeditionday = models.ForeignKey("ExpeditionDay", null=True)#MJG wants to KILL THIS (redundant information)
|
||||
expedition = models.ForeignKey(Expedition,blank=True,null=True) # yes this is double-
|
||||
#author = models.ForeignKey(PersonExpedition,blank=True,null=True) # the person who writes it up doesn't have to have been on the trip.
|
||||
# Re: the above- so this field should be "typist" or something, not "author". - AC 15 jun 09
|
||||
#MJG wants to KILL THIS, as it is typically redundant with PersonTrip.is_logbook_entry_author, in the rare it was not redundanty and of actually interest it could be added to the text.
|
||||
title = models.CharField(max_length=settings.MAX_LOGBOOK_ENTRY_TITLE_LENGTH)
|
||||
cave_slug = models.SlugField(max_length=50)
|
||||
place = models.CharField(max_length=100,blank=True,null=True,help_text="Only use this if you haven't chosen a cave")
|
||||
text = models.TextField()
|
||||
slug = models.SlugField(max_length=50)
|
||||
filename = models.CharField(max_length=200,null=True)
|
||||
title = models.CharField(max_length=settings.MAX_LOGBOOK_ENTRY_TITLE_LENGTH)
|
||||
cave_slug = models.SlugField(max_length=50)
|
||||
place = models.CharField(max_length=100,blank=True,null=True,help_text="Only use this if you haven't chosen a cave")
|
||||
text = models.TextField()
|
||||
slug = models.SlugField(max_length=50)
|
||||
filename = models.CharField(max_length=200,null=True)
|
||||
entry_type = models.CharField(default="wiki",null=True,choices=LOGBOOK_ENTRY_TYPES,max_length=50)
|
||||
|
||||
class Meta:
|
||||
verbose_name_plural = "Logbook Entries"
|
||||
@ -297,7 +299,7 @@ class LogbookEntry(TroggleModel):
|
||||
if self.cave:
|
||||
nextQMnumber=self.cave.new_QM_number(self.date.year)
|
||||
else:
|
||||
return none
|
||||
return None
|
||||
return nextQMnumber
|
||||
|
||||
def new_QM_found_link(self):
|
||||
@ -307,6 +309,7 @@ class LogbookEntry(TroggleModel):
|
||||
def DayIndex(self):
|
||||
return list(self.expeditionday.logbookentry_set.all()).index(self)
|
||||
|
||||
|
||||
#
|
||||
# Single Person going on a trip, which may or may not be written up (accounts for different T/U for people in same logbook entry)
|
||||
#
|
||||
@ -452,7 +455,7 @@ class Cave(TroggleModel):
|
||||
elif self.unofficial_number:
|
||||
href = self.unofficial_number
|
||||
else:
|
||||
href = official_name.lower()
|
||||
href = self.official_name.lower()
|
||||
#return settings.URL_ROOT + '/cave/' + href + '/'
|
||||
return urlparse.urljoin(settings.URL_ROOT, reverse('cave',kwargs={'cave_id':href,}))
|
||||
|
||||
@ -744,17 +747,17 @@ class QM(TroggleModel):
|
||||
|
||||
number = models.IntegerField(help_text="this is the sequential number in the year", )
|
||||
GRADE_CHOICES=(
|
||||
('A', 'A: Large obvious lead'),
|
||||
('B', 'B: Average lead'),
|
||||
('C', 'C: Tight unpromising lead'),
|
||||
('D', 'D: Dig'),
|
||||
('X', 'X: Unclimbable aven')
|
||||
('A', 'A: Large obvious lead'),
|
||||
('B', 'B: Average lead'),
|
||||
('C', 'C: Tight unpromising lead'),
|
||||
('D', 'D: Dig'),
|
||||
('X', 'X: Unclimbable aven')
|
||||
)
|
||||
grade = models.CharField(max_length=1, choices=GRADE_CHOICES)
|
||||
location_description = models.TextField(blank=True)
|
||||
#should be a foreignkey to surveystation
|
||||
nearest_station_description = models.CharField(max_length=400,null=True,blank=True)
|
||||
nearest_station = models.CharField(max_length=200,blank=True,null=True)
|
||||
nearest_station_name = models.CharField(max_length=200,blank=True,null=True)
|
||||
nearest_station = models.ForeignKey(SurvexStation,null=True,blank=True)
|
||||
area = models.CharField(max_length=100,blank=True,null=True)
|
||||
completion_description = models.TextField(blank=True,null=True)
|
||||
comment=models.TextField(blank=True,null=True)
|
||||
@ -834,7 +837,7 @@ class ScannedImage(TroggleImageModel):
|
||||
#This is an ugly hack to deal with the #s in our survey scan paths. The correct thing is to write a custom file storage backend which calls urlencode on the name for making file.url but not file.path.
|
||||
def correctURL(self):
|
||||
return string.replace(self.file.url,r'#',r'%23')
|
||||
|
||||
|
||||
def __unicode__(self):
|
||||
return get_scan_path(self,'')
|
||||
|
||||
|
@ -225,4 +225,4 @@ class TunnelFile(models.Model):
|
||||
|
||||
class Meta:
|
||||
ordering = ('tunnelpath',)
|
||||
|
||||
|
||||
|
@ -7,7 +7,6 @@ from troggle.core.models import QM, DPhoto, LogbookEntry, Cave
|
||||
import re, urlparse
|
||||
|
||||
register = template.Library()
|
||||
|
||||
|
||||
@register.filter()
|
||||
def plusone(n):
|
||||
@ -77,7 +76,7 @@ def wiki_to_html_short(value, autoescape=None):
|
||||
if number>1:
|
||||
return '<h'+num+'>'+matchobj.groups()[1]+'</h'+num+'>'
|
||||
else:
|
||||
print 'morethanone'
|
||||
print('morethanone')
|
||||
return matchobj.group()
|
||||
value = re.sub(r"(?m)^(=+)([^=]+)(=+)$",headerrepl,value)
|
||||
|
||||
@ -143,13 +142,13 @@ def wiki_to_html_short(value, autoescape=None):
|
||||
value = re.sub(photoSrcPattern,photoSrcRepl, value, re.DOTALL)
|
||||
|
||||
#make cave links
|
||||
value = re.sub("\[\[\s*cave:([^\s]+)\s*\s*\]\]", r'<a href="%scave/\1/">\1</a>' % settings.URL_ROOT, value, re.DOTALL)
|
||||
value = re.sub(r"\[\[\s*cave:([^\s]+)\s*\s*\]\]", r'<a href="%scave/\1/">\1</a>' % settings.URL_ROOT, value, re.DOTALL)
|
||||
#make people links
|
||||
value = re.sub("\[\[\s*person:(.+)\|(.+)\]\]",r'<a href="%sperson/\1/">\2</a>' % settings.URL_ROOT, value, re.DOTALL)
|
||||
value = re.sub(r"\[\[\s*person:(.+)\|(.+)\]\]",r'<a href="%sperson/\1/">\2</a>' % settings.URL_ROOT, value, re.DOTALL)
|
||||
#make subcave links
|
||||
value = re.sub("\[\[\s*subcave:(.+)\|(.+)\]\]",r'<a href="%ssubcave/\1/">\2</a>' % settings.URL_ROOT, value, re.DOTALL)
|
||||
value = re.sub(r"\[\[\s*subcave:(.+)\|(.+)\]\]",r'<a href="%ssubcave/\1/">\2</a>' % settings.URL_ROOT, value, re.DOTALL)
|
||||
#make cavedescription links
|
||||
value = re.sub("\[\[\s*cavedescription:(.+)\|(.+)\]\]",r'<a href="%scavedescription/\1/">\2</a>' % settings.URL_ROOT, value, re.DOTALL)
|
||||
value = re.sub(r"\[\[\s*cavedescription:(.+)\|(.+)\]\]",r'<a href="%scavedescription/\1/">\2</a>' % settings.URL_ROOT, value, re.DOTALL)
|
||||
|
||||
|
||||
|
||||
|
@ -17,19 +17,19 @@ def parseCaveQMs(cave,inputFile):
|
||||
try:
|
||||
steinBr=Cave.objects.get(official_name="Steinbrückenhöhle")
|
||||
except Cave.DoesNotExist:
|
||||
print "Steinbruckenhoehle is not in the database. Please run parsers.cavetab first."
|
||||
print("Steinbruckenhoehle is not in the database. Please run parsers.cavetab first.")
|
||||
return
|
||||
elif cave=='hauch':
|
||||
try:
|
||||
hauchHl=Cave.objects.get(official_name="Hauchhöhle")
|
||||
except Cave.DoesNotExist:
|
||||
print "Hauchhoele is not in the database. Please run parsers.cavetab first."
|
||||
print("Hauchhoele is not in the database. Please run parsers.cavetab first.")
|
||||
return
|
||||
elif cave =='kh':
|
||||
try:
|
||||
kh=Cave.objects.get(official_name="Kaninchenhöhle")
|
||||
except Cave.DoesNotExist:
|
||||
print "KH is not in the database. Please run parsers.cavetab first."
|
||||
print("KH is not in the database. Please run parsers.cavetab first.")
|
||||
parse_KH_QMs(kh, inputFile=inputFile)
|
||||
return
|
||||
|
||||
@ -48,7 +48,7 @@ def parseCaveQMs(cave,inputFile):
|
||||
elif cave=='hauch':
|
||||
placeholder, hadToCreate = LogbookEntry.objects.get_or_create(date__year=year, title="placeholder for QMs in 234", text="QMs temporarily attached to this should be re-attached to their actual trips", defaults={"date": date(year, 1, 1),"cave":hauchHl})
|
||||
if hadToCreate:
|
||||
print cave+" placeholder logbook entry for " + str(year) + " added to database"
|
||||
print(cave + " placeholder logbook entry for " + str(year) + " added to database")
|
||||
QMnum=re.match(r".*?-\d*?-X?(?P<numb>\d*)",line[0]).group("numb")
|
||||
newQM = QM()
|
||||
newQM.found_by=placeholder
|
||||
@ -71,19 +71,18 @@ def parseCaveQMs(cave,inputFile):
|
||||
if preexistingQM.new_since_parsing==False: #if the pre-existing QM has not been modified, overwrite it
|
||||
preexistingQM.delete()
|
||||
newQM.save()
|
||||
print "overwriting " + str(preexistingQM) +"\r",
|
||||
|
||||
print("overwriting " + str(preexistingQM) +"\r")
|
||||
else: # otherwise, print that it was ignored
|
||||
print "preserving "+ str(preexistingQM) + ", which was edited in admin \r",
|
||||
print("preserving " + str(preexistingQM) + ", which was edited in admin \r")
|
||||
|
||||
except QM.DoesNotExist: #if there is no pre-existing QM, save the new one
|
||||
newQM.save()
|
||||
print "QM "+str(newQM) + ' added to database\r',
|
||||
print("QM "+str(newQM) + ' added to database\r')
|
||||
|
||||
except KeyError: #check on this one
|
||||
continue
|
||||
except IndexError:
|
||||
print "Index error in " + str(line)
|
||||
print("Index error in " + str(line))
|
||||
continue
|
||||
|
||||
def parse_KH_QMs(kh, inputFile):
|
||||
@ -104,7 +103,7 @@ def parse_KH_QMs(kh, inputFile):
|
||||
}
|
||||
nonLookupArgs={
|
||||
'grade':res['grade'],
|
||||
'nearest_station':res['nearest_station'],
|
||||
'nearest_station_name':res['nearest_station'],
|
||||
'location_description':res['description']
|
||||
}
|
||||
|
||||
@ -115,3 +114,4 @@ parseCaveQMs(cave='stein',inputFile=r"1623/204/qm.csv")
|
||||
parseCaveQMs(cave='hauch',inputFile=r"1623/234/qm.csv")
|
||||
parseCaveQMs(cave='kh', inputFile="1623/161/qmtodo.htm")
|
||||
#parseCaveQMs(cave='balkonhoehle',inputFile=r"1623/264/qm.csv")
|
||||
|
||||
|
@ -45,7 +45,7 @@ def GetTripPersons(trippeople, expedition, logtime_underground):
|
||||
author = res[-1][0]
|
||||
return res, author
|
||||
|
||||
def GetTripCave(place): #need to be fuzzier about matching here. Already a very slow function...
|
||||
def GetTripCave(place): #need to be fuzzier about matching here. Already a very slow function...
|
||||
# print "Getting cave for " , place
|
||||
try:
|
||||
katastNumRes=[]
|
||||
@ -74,23 +74,23 @@ def GetTripCave(place): #need to be fuzzier about matching h
|
||||
|
||||
|
||||
noncaveplaces = [ "Journey", "Loser Plateau" ]
|
||||
def EnterLogIntoDbase(date, place, title, text, trippeople, expedition, logtime_underground):
|
||||
def EnterLogIntoDbase(date, place, title, text, trippeople, expedition, logtime_underground, entry_type="wiki"):
|
||||
""" saves a logbook entry and related persontrips """
|
||||
trippersons, author = GetTripPersons(trippeople, expedition, logtime_underground)
|
||||
if not author:
|
||||
print(" - skipping logentry" + title + " no author for entry")
|
||||
print(" - Skipping logentry: " + title + " no author for entry")
|
||||
return
|
||||
|
||||
# tripCave = GetTripCave(place)
|
||||
#
|
||||
|
||||
#tripCave = GetTripCave(place)
|
||||
|
||||
lplace = place.lower()
|
||||
if lplace not in noncaveplaces:
|
||||
cave=GetCaveLookup().get(lplace)
|
||||
|
||||
#Check for an existing copy of the current entry, and save
|
||||
expeditionday = expedition.get_expedition_day(date)
|
||||
lookupAttribs={'date':date, 'title':title}
|
||||
nonLookupAttribs={'place':place, 'text':text, 'expedition':expedition, 'cave':cave, 'slug':slugify(title)[:50]}
|
||||
lookupAttribs={'date':date, 'title':title}
|
||||
nonLookupAttribs={'place':place, 'text':text, 'expedition':expedition, 'cave':cave, 'slug':slugify(title)[:50], 'entry_type':entry_type}
|
||||
lbo, created=save_carefully(models.LogbookEntry, lookupAttribs, nonLookupAttribs)
|
||||
|
||||
for tripperson, time_underground in trippersons:
|
||||
@ -177,7 +177,9 @@ def Parseloghtmltxt(year, expedition, txt):
|
||||
ltriptext = re.sub(r"</p>", "", triptext)
|
||||
ltriptext = re.sub(r"\s*?\n\s*", " ", ltriptext)
|
||||
ltriptext = re.sub(r"<p>", "\n\n", ltriptext).strip()
|
||||
EnterLogIntoDbase(date = ldate, place = tripcave, title = triptitle, text = ltriptext, trippeople=trippeople, expedition=expedition, logtime_underground=0)
|
||||
EnterLogIntoDbase(date = ldate, place = tripcave, title = triptitle, text = ltriptext,
|
||||
trippeople=trippeople, expedition=expedition, logtime_underground=0,
|
||||
entry_type="html")
|
||||
if logbook_entry_count == 0:
|
||||
print(" - No trip entrys found in logbook, check the syntax matches htmltxt format")
|
||||
|
||||
@ -226,7 +228,9 @@ def Parseloghtml01(year, expedition, txt):
|
||||
|
||||
#print ldate, trippeople.strip()
|
||||
# could includ the tripid (url link for cross referencing)
|
||||
EnterLogIntoDbase(date=ldate, place=tripcave, title=triptitle, text=ltriptext, trippeople=trippeople, expedition=expedition, logtime_underground=0)
|
||||
EnterLogIntoDbase(date=ldate, place=tripcave, title=triptitle, text=ltriptext,
|
||||
trippeople=trippeople, expedition=expedition, logtime_underground=0,
|
||||
entry_type="html")
|
||||
|
||||
# parser for 2003
|
||||
def Parseloghtml03(year, expedition, txt):
|
||||
@ -255,7 +259,9 @@ def Parseloghtml03(year, expedition, txt):
|
||||
ltriptext = re.sub(r"\s*?\n\s*", " ", ltriptext)
|
||||
ltriptext = re.sub(r"<p>", "\n\n", ltriptext).strip()
|
||||
ltriptext = re.sub(r"[^\s0-9a-zA-Z\-.,:;'!&()\[\]<>?=+*%]", "_NONASCII_", ltriptext)
|
||||
EnterLogIntoDbase(date = ldate, place = tripcave, title = triptitle, text = ltriptext, trippeople=trippeople, expedition=expedition, logtime_underground=0)
|
||||
EnterLogIntoDbase(date = ldate, place = tripcave, title = triptitle,
|
||||
text = ltriptext, trippeople=trippeople, expedition=expedition,
|
||||
logtime_underground=0, entry_type="html")
|
||||
|
||||
|
||||
def SetDatesFromLogbookEntries(expedition):
|
||||
|
@ -59,22 +59,19 @@ def LoadPersonsExpos():
|
||||
|
||||
save_carefully(models.Expedition, lookupAttribs, nonLookupAttribs)
|
||||
|
||||
|
||||
# make persons
|
||||
print("Loading personexpeditions")
|
||||
#expoers2008 = """Edvin Deadman,Kathryn Hopkins,Djuke Veldhuis,Becka Lawson,Julian Todd,Natalie Uomini,Aaron Curtis,Tony Rooke,Ollie Stevens,Frank Tully,Martin Jahnke,Mark Shinwell,Jess Stirrups,Nial Peters,Serena Povia,Olly Madge,Steve Jones,Pete Harley,Eeva Makiranta,Keith Curtis""".split(",")
|
||||
#expomissing = set(expoers2008)
|
||||
|
||||
for personline in personreader:
|
||||
name = personline[header["Name"]]
|
||||
name = re.sub(r"<.*?>", "", name)
|
||||
mname = re.match(r"(\w+)(?:\s((?:van |ten )?\w+))?(?:\s\(([^)]*)\))?", name)
|
||||
nickname = mname.group(3) or ""
|
||||
|
||||
|
||||
lookupAttribs={'first_name':mname.group(1), 'last_name':(mname.group(2) or "")}
|
||||
nonLookupAttribs={'is_vfho':personline[header["VfHO member"]],}
|
||||
person, created = save_carefully(models.Person, lookupAttribs, nonLookupAttribs)
|
||||
|
||||
|
||||
parseMugShotAndBlurb(personline=personline, header=header, person=person)
|
||||
|
||||
# make person expedition from table
|
||||
@ -88,6 +85,8 @@ def LoadPersonsExpos():
|
||||
|
||||
# this fills in those people for whom 2008 was their first expo
|
||||
#print "Loading personexpeditions 2008"
|
||||
#expoers2008 = """Edvin Deadman,Kathryn Hopkins,Djuke Veldhuis,Becka Lawson,Julian Todd,Natalie Uomini,Aaron Curtis,Tony Rooke,Ollie Stevens,Frank Tully,Martin Jahnke,Mark Shinwell,Jess Stirrups,Nial Peters,Serena Povia,Olly Madge,Steve Jones,Pete Harley,Eeva Makiranta,Keith Curtis""".split(",")
|
||||
#expomissing = set(expoers2008)
|
||||
#for name in expomissing:
|
||||
# firstname, lastname = name.split()
|
||||
# is_guest = name in ["Eeva Makiranta", "Keith Curtis"]
|
||||
@ -103,18 +102,6 @@ def LoadPersonsExpos():
|
||||
# personexpedition = models.PersonExpedition(person=person, expedition=expedition, nickname="", is_guest=is_guest)
|
||||
# personexpedition.save()
|
||||
|
||||
#Notability is now a method of person. Makes no sense to store it in the database; it would need to be recalculated every time something changes. - AC 16 Feb 09
|
||||
# could rank according to surveying as well
|
||||
#print "Setting person notability"
|
||||
#for person in models.Person.objects.all():
|
||||
#person.notability = 0.0
|
||||
#for personexpedition in person.personexpedition_set.all():
|
||||
#if not personexpedition.is_guest:
|
||||
#person.notability += 1.0 / (2012 - int(personexpedition.expedition.year))
|
||||
#person.bisnotable = person.notability > 0.3 # I don't know how to filter by this
|
||||
#person.save()
|
||||
|
||||
|
||||
# used in other referencing parser functions
|
||||
# expedition name lookup cached for speed (it's a very big list)
|
||||
Gpersonexpeditionnamelookup = { }
|
||||
|
@ -92,13 +92,13 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
|
||||
teammembers = [ ]
|
||||
|
||||
# uncomment to print out all files during parsing
|
||||
print("Reading file:", survexblock.survexfile.path)
|
||||
print("Reading file: " + survexblock.survexfile.path)
|
||||
while True:
|
||||
svxline = fin.readline().decode("latin1")
|
||||
if not svxline:
|
||||
return
|
||||
textlines.append(svxline)
|
||||
|
||||
|
||||
# break the line at the comment
|
||||
sline, comment = re.match(r"([^;]*?)\s*(?:;\s*(.*))?\n?$", svxline.strip()).groups()
|
||||
|
||||
|
@ -67,7 +67,12 @@
|
||||
<div id="col1">
|
||||
<div class="logbookentry">
|
||||
<b>{{logbookentry.date}}</b>
|
||||
{{logbookentry.text}}</div>
|
||||
{% if logbookentry.entry_type == "html" %}
|
||||
<p>{{logbookentry.text|safe}}</p>
|
||||
{% else %}
|
||||
{{logbookentry.text|wiki_to_html}}
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user