2
0
mirror of https://expo.survex.com/repositories/troggle/.git synced 2024-11-21 23:01:52 +00:00

Convert codebase for python3 usage

This commit is contained in:
Philip Sargent 2020-05-24 01:57:06 +01:00 committed by Wookey
parent 35f85c55f1
commit 50d753a87b
38 changed files with 288 additions and 261 deletions

View File

@ -1,6 +1,7 @@
import troggle.settings as settings import troggle.settings as settings
import os import os
import urllib import urllib.request, urllib.parse, urllib.error
from functools import reduce
def urljoin(x, y): return x + "/" + y def urljoin(x, y): return x + "/" + y
@ -26,8 +27,8 @@ def listdir(*path):
else: else:
c = "" c = ""
c = c.replace("#", "%23") c = c.replace("#", "%23")
print("FILE: ", settings.FILES + "listdir/" + c) print(("FILE: ", settings.FILES + "listdir/" + c))
return urllib.urlopen(settings.FILES + "listdir/" + c).read() return urllib.request.urlopen(settings.FILES + "listdir/" + c).read()
def dirsAsList(*path): def dirsAsList(*path):
return [d for d in listdir(*path).split("\n") if len(d) > 0 and d[-1] == "/"] return [d for d in listdir(*path).split("\n") if len(d) > 0 and d[-1] == "/"]
@ -39,5 +40,5 @@ def readFile(*path):
try: try:
f = open(os.path.join(settings.FILES, *path)) f = open(os.path.join(settings.FILES, *path))
except: except:
f = urllib.urlopen(settings.FILES + "download/" + reduce(urljoin, path)) f = urllib.request.urlopen(settings.FILES + "download/" + reduce(urljoin, path))
return f.read() return f.read()

View File

@ -1,5 +1,5 @@
from django.forms import ModelForm from django.forms import ModelForm
from models import Cave, Person, PersonExpedition, LogbookEntry, QM, Expedition, Entrance, CaveAndEntrance from .models import Cave, Person, PersonExpedition, LogbookEntry, QM, Expedition, Entrance, CaveAndEntrance
import django.forms as forms import django.forms as forms
from django.forms.models import modelformset_factory from django.forms.models import modelformset_factory
from django.contrib.admin.widgets import AdminDateWidget from django.contrib.admin.widgets import AdminDateWidget
@ -114,8 +114,7 @@ def getTripForm(expedition):
class TripForm(forms.Form): class TripForm(forms.Form):
date = forms.DateField() date = forms.DateField()
title = forms.CharField(max_length=200) title = forms.CharField(max_length=200)
caves = [cave.reference() for cave in Cave.objects.all()] caves = sorted([cave.reference() for cave in Cave.objects.all()])
caves.sort()
caves = ["-----"] + caves caves = ["-----"] + caves
cave = forms.ChoiceField([(c, c) for c in caves], required=False) cave = forms.ChoiceField([(c, c) for c in caves], required=False)
location = forms.CharField(max_length=200, required=False) location = forms.CharField(max_length=200, required=False)
@ -123,7 +122,7 @@ def getTripForm(expedition):
html = forms.CharField(widget=TinyMCE(attrs={'cols': 80, 'rows': 30})) html = forms.CharField(widget=TinyMCE(attrs={'cols': 80, 'rows': 30}))
def clean(self): def clean(self):
print(dir(self)) print((dir(self)))
if self.cleaned_data.get("caveOrLocation") == "cave" and not self.cleaned_data.get("cave"): if self.cleaned_data.get("caveOrLocation") == "cave" and not self.cleaned_data.get("cave"):
self._errors["cave"] = self.error_class(["This field is required"]) self._errors["cave"] = self.error_class(["This field is required"])
if self.cleaned_data.get("caveOrLocation") == "location" and not self.cleaned_data.get("location"): if self.cleaned_data.get("caveOrLocation") == "location" and not self.cleaned_data.get("location"):
@ -131,8 +130,7 @@ def getTripForm(expedition):
return self.cleaned_data return self.cleaned_data
class PersonTripForm(forms.Form): class PersonTripForm(forms.Form):
names = [get_name(pe) for pe in PersonExpedition.objects.filter(expedition = expedition)] names = sorted([get_name(pe) for pe in PersonExpedition.objects.filter(expedition = expedition)])
names.sort()
names = ["-----"] + names names = ["-----"] + names
name = forms.ChoiceField([(n, n) for n in names]) name = forms.ChoiceField([(n, n) for n in names])
TU = forms.FloatField(required=False) TU = forms.FloatField(required=False)

View File

@ -1,5 +1,18 @@
import urllib, urlparse, string, os, datetime, logging, re import string
import os
import datetime
import logging
import re
import subprocess import subprocess
from urllib.request import *
from urllib.parse import *
from urllib.error import *
from decimal import Decimal, getcontext
getcontext().prec=2 #use 2 significant figures for decimal calculations
import settings
from django.forms import ModelForm from django.forms import ModelForm
from django.db import models from django.db import models
from django.contrib import admin from django.contrib import admin
@ -8,12 +21,8 @@ from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType from django.contrib.contenttypes.models import ContentType
from django.db.models import Min, Max from django.db.models import Min, Max
from django.conf import settings from django.conf import settings
from decimal import Decimal, getcontext
from django.core.urlresolvers import reverse from django.core.urlresolvers import reverse
from imagekit.models import ImageModel
from django.template import Context, loader from django.template import Context, loader
import settings
getcontext().prec=2 #use 2 significant figures for decimal calculations
from troggle.core.models_survex import * from troggle.core.models_survex import *
@ -30,7 +39,7 @@ def get_related_by_wikilinks(wiki_text):
number = qmdict['number']) number = qmdict['number'])
res.append(qm) res.append(qm)
except QM.DoesNotExist: except QM.DoesNotExist:
print('fail on '+str(wikilink)) print(('fail on '+str(wikilink)))
return res return res
@ -52,7 +61,7 @@ class TroggleModel(models.Model):
return self._meta.object_name return self._meta.object_name
def get_admin_url(self): def get_admin_url(self):
return urlparse.urljoin(settings.URL_ROOT, "/admin/core/" + self.object_name().lower() + "/" + str(self.pk)) return urllib.parse.urljoin(settings.URL_ROOT, "/admin/core/" + self.object_name().lower() + "/" + str(self.pk))
class Meta: class Meta:
abstract = True abstract = True
@ -64,7 +73,7 @@ class TroggleImageModel(models.Model):
return self._meta.object_name return self._meta.object_name
def get_admin_url(self): def get_admin_url(self):
return urlparse.urljoin(settings.URL_ROOT, "/admin/core/" + self.object_name().lower() + "/" + str(self.pk)) return urllib.parse.urljoin(settings.URL_ROOT, "/admin/core/" + self.object_name().lower() + "/" + str(self.pk))
class Meta: class Meta:
@ -85,7 +94,7 @@ class Expedition(TroggleModel):
get_latest_by = 'year' get_latest_by = 'year'
def get_absolute_url(self): def get_absolute_url(self):
return urlparse.urljoin(settings.URL_ROOT, reverse('expedition', args=[self.year])) return urllib.parse.urljoin(settings.URL_ROOT, reverse('expedition', args=[self.year]))
# construction function. should be moved out # construction function. should be moved out
def get_expedition_day(self, date): def get_expedition_day(self, date):
@ -117,10 +126,9 @@ class ExpeditionDay(TroggleModel):
personexpeditions = self.persontrip_set.filter(expeditionday=self) personexpeditions = self.persontrip_set.filter(expeditionday=self)
return personexpeditions and personexpeditions[0] or None return personexpeditions and personexpeditions[0] or None
#
# single Person, can go on many years
#
class Person(TroggleModel): class Person(TroggleModel):
"""single Person, can go on many years
"""
first_name = models.CharField(max_length=100) first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100) last_name = models.CharField(max_length=100)
fullname = models.CharField(max_length=200) fullname = models.CharField(max_length=200)
@ -132,7 +140,7 @@ class Person(TroggleModel):
orderref = models.CharField(max_length=200) # for alphabetic orderref = models.CharField(max_length=200) # for alphabetic
user = models.OneToOneField(User, null=True, blank=True) user = models.OneToOneField(User, null=True, blank=True)
def get_absolute_url(self): def get_absolute_url(self):
return urlparse.urljoin(settings.URL_ROOT,reverse('person',kwargs={'first_name':self.first_name,'last_name':self.last_name})) return urllib.parse.urljoin(settings.URL_ROOT,reverse('person',kwargs={'first_name':self.first_name,'last_name':self.last_name}))
class Meta: class Meta:
verbose_name_plural = "People" verbose_name_plural = "People"
@ -153,7 +161,7 @@ class Person(TroggleModel):
for personexpedition in self.personexpedition_set.all(): for personexpedition in self.personexpedition_set.all():
if not personexpedition.is_guest: if not personexpedition.is_guest:
print(personexpedition.expedition.year) print((personexpedition.expedition.year))
notability += Decimal(1) / (max_expo_val - int(personexpedition.expedition.year)) notability += Decimal(1) / (max_expo_val - int(personexpedition.expedition.year))
return notability return notability
@ -178,10 +186,9 @@ class Person(TroggleModel):
#self.notability = 0.0 # set temporarily #self.notability = 0.0 # set temporarily
#
# Person's attenance to one Expo
#
class PersonExpedition(TroggleModel): class PersonExpedition(TroggleModel):
"""Person's attendance to one Expo
"""
expedition = models.ForeignKey(Expedition) expedition = models.ForeignKey(Expedition)
person = models.ForeignKey(Person) person = models.ForeignKey(Person)
slugfield = models.SlugField(max_length=50,blank=True,null=True) slugfield = models.SlugField(max_length=50,blank=True,null=True)
@ -213,7 +220,6 @@ class PersonExpedition(TroggleModel):
def __unicode__(self): def __unicode__(self):
return "%s: (%s)" % (self.person, self.expedition) return "%s: (%s)" % (self.person, self.expedition)
#why is the below a function in personexpedition, rather than in person? - AC 14 Feb 09 #why is the below a function in personexpedition, rather than in person? - AC 14 Feb 09
def name(self): def name(self):
if self.nickname: if self.nickname:
@ -223,7 +229,7 @@ class PersonExpedition(TroggleModel):
return self.person.first_name return self.person.first_name
def get_absolute_url(self): def get_absolute_url(self):
return urlparse.urljoin(settings.URL_ROOT, reverse('personexpedition',kwargs={'first_name':self.person.first_name,'last_name':self.person.last_name,'year':self.expedition.year})) return urllib.parse.urljoin(settings.URL_ROOT, reverse('personexpedition',kwargs={'first_name':self.person.first_name,'last_name':self.person.last_name,'year':self.expedition.year}))
def surveyedleglength(self): def surveyedleglength(self):
survexblocks = [personrole.survexblock for personrole in self.personrole_set.all() ] survexblocks = [personrole.survexblock for personrole in self.personrole_set.all() ]
@ -238,11 +244,9 @@ class PersonExpedition(TroggleModel):
res = self.persontrip_set.all().aggregate(day_max=Max("expeditionday__date")) res = self.persontrip_set.all().aggregate(day_max=Max("expeditionday__date"))
return res["day_max"] return res["day_max"]
#
# Single parsed entry from Logbook
#
class LogbookEntry(TroggleModel): class LogbookEntry(TroggleModel):
"""Single parsed entry from Logbook
"""
LOGBOOK_ENTRY_TYPES = ( LOGBOOK_ENTRY_TYPES = (
("wiki", "Wiki style logbook"), ("wiki", "Wiki style logbook"),
("html", "Html style logbook") ("html", "Html style logbook")
@ -265,22 +269,27 @@ class LogbookEntry(TroggleModel):
ordering = ('-date',) ordering = ('-date',)
def __getattribute__(self, item): def __getattribute__(self, item):
if item == "cave": #Allow a logbookentries cave to be directly accessed despite not having a proper foreignkey if item == "cave":
return CaveSlug.objects.get(slug = self.cave_slug).cave #Allow a logbookentries cave to be directly accessed despite not having a proper foreignkey
return super(LogbookEntry, self).__getattribute__(item) return CaveSlug.objects.get(slug = self.cave_slug).cave
# parse error in python3.8
# https://stackoverflow.com/questions/41343263/provide-classcell-example-for-python-3-6-metaclass
return super(LogbookEntry, self).__getattribute__(item)
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
if "cave" in kwargs.keys(): if "cave" in list(kwargs.keys()):
if kwargs["cave"] is not None: if kwargs["cave"] is not None:
kwargs["cave_slug"] = CaveSlug.objects.get(cave=kwargs["cave"], primary=True).slug kwargs["cave_slug"] = CaveSlug.objects.get(cave=kwargs["cave"], primary=True).slug
kwargs.pop("cave") kwargs.pop("cave")
# parse error in python3.8
# https://stackoverflow.com/questions/41343263/provide-classcell-example-for-python-3-6-metaclass
return super(LogbookEntry, self).__init__(*args, **kwargs) return super(LogbookEntry, self).__init__(*args, **kwargs)
def isLogbookEntry(self): # Function used in templates def isLogbookEntry(self): # Function used in templates
return True return True
def get_absolute_url(self): def get_absolute_url(self):
return urlparse.urljoin(settings.URL_ROOT, reverse('logbookentry',kwargs={'date':self.date,'slug':self.slug})) return urllib.parse.urljoin(settings.URL_ROOT, reverse('logbookentry',kwargs={'date':self.date,'slug':self.slug}))
def __unicode__(self): def __unicode__(self):
return "%s: (%s)" % (self.date, self.title) return "%s: (%s)" % (self.date, self.title)
@ -357,9 +366,9 @@ class Area(TroggleModel):
parent = models.ForeignKey('Area', blank=True, null=True) parent = models.ForeignKey('Area', blank=True, null=True)
def __unicode__(self): def __unicode__(self):
if self.parent: if self.parent:
return unicode(self.parent) + u" - " + unicode(self.short_name) return str(self.parent) + " - " + str(self.short_name)
else: else:
return unicode(self.short_name) return str(self.short_name)
def kat_area(self): def kat_area(self):
if self.short_name in ["1623", "1626"]: if self.short_name in ["1623", "1626"]:
return self.short_name return self.short_name
@ -371,7 +380,7 @@ class CaveAndEntrance(models.Model):
entrance = models.ForeignKey('Entrance') entrance = models.ForeignKey('Entrance')
entrance_letter = models.CharField(max_length=20,blank=True,null=True) entrance_letter = models.CharField(max_length=20,blank=True,null=True)
def __unicode__(self): def __unicode__(self):
return unicode(self.cave) + unicode(self.entrance_letter) return str(self.cave) + str(self.entrance_letter)
class CaveSlug(models.Model): class CaveSlug(models.Model):
cave = models.ForeignKey('Cave') cave = models.ForeignKey('Cave')
@ -454,10 +463,10 @@ class Cave(TroggleModel):
else: else:
href = self.official_name.lower() href = self.official_name.lower()
#return settings.URL_ROOT + '/cave/' + href + '/' #return settings.URL_ROOT + '/cave/' + href + '/'
return urlparse.urljoin(settings.URL_ROOT, reverse('cave',kwargs={'cave_id':href,})) return urllib.parse.urljoin(settings.URL_ROOT, reverse('cave',kwargs={'cave_id':href,}))
def __unicode__(self, sep = u": "): def __unicode__(self, sep = ": "):
return unicode("slug:"+self.slug()) return str("slug:"+self.slug())
def get_QMs(self): def get_QMs(self):
return QM.objects.filter(found_by__cave_slug=self.caveslug_set.all()) return QM.objects.filter(found_by__cave_slug=self.caveslug_set.all())
@ -539,7 +548,7 @@ def getCaveByReference(reference):
area = Area.objects.get(short_name = areaname) area = Area.objects.get(short_name = areaname)
#print(area) #print(area)
foundCaves = list(Cave.objects.filter(area = area, kataster_number = code).all()) + list(Cave.objects.filter(area = area, unofficial_number = code).all()) foundCaves = list(Cave.objects.filter(area = area, kataster_number = code).all()) + list(Cave.objects.filter(area = area, unofficial_number = code).all())
print(list(foundCaves)) print((list(foundCaves)))
if len(foundCaves) == 1: if len(foundCaves) == 1:
return foundCaves[0] return foundCaves[0]
else: else:
@ -549,7 +558,7 @@ class OtherCaveName(TroggleModel):
name = models.CharField(max_length=160) name = models.CharField(max_length=160)
cave = models.ForeignKey(Cave) cave = models.ForeignKey(Cave)
def __unicode__(self): def __unicode__(self):
return unicode(self.name) return str(self.name)
class EntranceSlug(models.Model): class EntranceSlug(models.Model):
entrance = models.ForeignKey('Entrance') entrance = models.ForeignKey('Entrance')
@ -597,7 +606,7 @@ class Entrance(TroggleModel):
cached_primary_slug = models.CharField(max_length=200,blank=True,null=True) cached_primary_slug = models.CharField(max_length=200,blank=True,null=True)
def __unicode__(self): def __unicode__(self):
return unicode(self.slug()) return str(self.slug())
def exact_location(self): def exact_location(self):
return SurvexStation.objects.lookup(self.exact_station) return SurvexStation.objects.lookup(self.exact_station)
@ -714,12 +723,12 @@ class CaveDescription(TroggleModel):
def __unicode__(self): def __unicode__(self):
if self.long_name: if self.long_name:
return unicode(self.long_name) return str(self.long_name)
else: else:
return unicode(self.short_name) return str(self.short_name)
def get_absolute_url(self): def get_absolute_url(self):
return urlparse.urljoin(settings.URL_ROOT, reverse('cavedescription', args=(self.short_name,))) return urllib.parse.urljoin(settings.URL_ROOT, reverse('cavedescription', args=(self.short_name,)))
def save(self): def save(self):
""" """
@ -734,7 +743,7 @@ class CaveDescription(TroggleModel):
class NewSubCave(TroggleModel): class NewSubCave(TroggleModel):
name = models.CharField(max_length=200, unique = True) name = models.CharField(max_length=200, unique = True)
def __unicode__(self): def __unicode__(self):
return unicode(self.name) return str(self.name)
class QM(TroggleModel): class QM(TroggleModel):
#based on qm.csv in trunk/expoweb/1623/204 which has the fields: #based on qm.csv in trunk/expoweb/1623/204 which has the fields:
@ -762,14 +771,14 @@ class QM(TroggleModel):
comment=models.TextField(blank=True,null=True) comment=models.TextField(blank=True,null=True)
def __unicode__(self): def __unicode__(self):
return u"%s %s" % (self.code(), self.grade) return "%s %s" % (self.code(), self.grade)
def code(self): def code(self):
return u"%s-%s-%s" % (unicode(self.found_by.cave)[6:], self.found_by.date.year, self.number) return "%s-%s-%s" % (str(self.found_by.cave)[6:], self.found_by.date.year, self.number)
def get_absolute_url(self): def get_absolute_url(self):
#return settings.URL_ROOT + '/cave/' + self.found_by.cave.kataster_number + '/' + str(self.found_by.date.year) + '-' + '%02d' %self.number #return settings.URL_ROOT + '/cave/' + self.found_by.cave.kataster_number + '/' + str(self.found_by.date.year) + '-' + '%02d' %self.number
return urlparse.urljoin(settings.URL_ROOT, reverse('qm',kwargs={'cave_id':self.found_by.cave.kataster_number,'year':self.found_by.date.year,'qm_id':self.number,'grade':self.grade})) return urllib.parse.urljoin(settings.URL_ROOT, reverse('qm',kwargs={'cave_id':self.found_by.cave.kataster_number,'year':self.found_by.date.year,'qm_id':self.number,'grade':self.grade}))
def get_next_by_id(self): def get_next_by_id(self):
return QM.objects.get(id=self.id+1) return QM.objects.get(id=self.id+1)
@ -778,7 +787,7 @@ class QM(TroggleModel):
return QM.objects.get(id=self.id-1) return QM.objects.get(id=self.id-1)
def wiki_link(self): def wiki_link(self):
return u"%s%s%s" % ('[[QM:',self.code(),']]') return "%s%s%s" % ('[[QM:',self.code(),']]')
#photoFileStorage = FileSystemStorage(location=settings.PHOTOS_ROOT, base_url=settings.PHOTOS_URL) #photoFileStorage = FileSystemStorage(location=settings.PHOTOS_ROOT, base_url=settings.PHOTOS_URL)
#class DPhoto(TroggleImageModel): #class DPhoto(TroggleImageModel):
@ -880,4 +889,4 @@ class DataIssue(TroggleModel):
ordering = ['date'] ordering = ['date']
def __unicode__(self): def __unicode__(self):
return u"%s - %s" % (self.parser, self.message) return "%s - %s" % (self.parser, self.message)

View File

@ -1,7 +1,7 @@
from django.db import models from django.db import models
from django.conf import settings from django.conf import settings
import os import os
import urlparse import urllib.parse
import re import re
from django.core.urlresolvers import reverse from django.core.urlresolvers import reverse
@ -128,8 +128,8 @@ class SurvexBlock(models.Model):
def isSurvexBlock(self): # Function used in templates def isSurvexBlock(self): # Function used in templates
return True return True
def __unicode__(self): def __str__(self):
return self.name and unicode(self.name) or 'no name' return self.name and str(self.name) or 'no name'
def GetPersonroles(self): def GetPersonroles(self):
res = [ ] res = [ ]
@ -185,7 +185,7 @@ class SurvexPersonRole(models.Model):
expeditionday = models.ForeignKey("ExpeditionDay", null=True) expeditionday = models.ForeignKey("ExpeditionDay", null=True)
def __unicode__(self): def __unicode__(self):
return unicode(self.person) + " - " + unicode(self.survexblock) + " - " + unicode(self.nrole) return str(self.person) + " - " + str(self.survexblock) + " - " + str(self.nrole)
class SurvexScansFolder(models.Model): class SurvexScansFolder(models.Model):
@ -196,10 +196,10 @@ class SurvexScansFolder(models.Model):
ordering = ('walletname',) ordering = ('walletname',)
def get_absolute_url(self): def get_absolute_url(self):
return urlparse.urljoin(settings.URL_ROOT, reverse('surveyscansfolder', kwargs={"path":re.sub("#", "%23", self.walletname)})) return urllib.parse.urljoin(settings.URL_ROOT, reverse('surveyscansfolder', kwargs={"path":re.sub("#", "%23", self.walletname)}))
def __unicode__(self): def __unicode__(self):
return unicode(self.walletname) + " (Survey Scans Folder)" return str(self.walletname) + " (Survey Scans Folder)"
class SurvexScanSingle(models.Model): class SurvexScanSingle(models.Model):
ffile = models.CharField(max_length=200) ffile = models.CharField(max_length=200)
@ -210,10 +210,10 @@ class SurvexScanSingle(models.Model):
ordering = ('name',) ordering = ('name',)
def get_absolute_url(self): def get_absolute_url(self):
return urlparse.urljoin(settings.URL_ROOT, reverse('surveyscansingle', kwargs={"path":re.sub("#", "%23", self.survexscansfolder.walletname), "file":self.name})) return urllib.parse.urljoin(settings.URL_ROOT, reverse('surveyscansingle', kwargs={"path":re.sub("#", "%23", self.survexscansfolder.walletname), "file":self.name}))
def __unicode__(self): def __unicode__(self):
return "Survey Scan Image: " + unicode(self.name) + " in " + unicode(self.survexscansfolder) return "Survey Scan Image: " + str(self.name) + " in " + str(self.survexscansfolder)
class TunnelFile(models.Model): class TunnelFile(models.Model):

View File

@ -5,5 +5,5 @@ register = template.Library()
@register.filter() @register.filter()
def link(value): def link(value):
return mark_safe("<a href=\'%s\'>"%value.get_absolute_url()+unicode(value)+"</a>") return mark_safe("<a href=\'%s\'>"%value.get_absolute_url()+str(value)+"</a>")

View File

@ -4,7 +4,7 @@ from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe from django.utils.safestring import mark_safe
from django.conf import settings from django.conf import settings
from troggle.core.models import QM, LogbookEntry, Cave from troggle.core.models import QM, LogbookEntry, Cave
import re, urlparse import re, urllib.parse
register = template.Library() register = template.Library()
@ -94,7 +94,7 @@ def wiki_to_html_short(value, autoescape=None):
qm=QM.objects.get(found_by__cave__kataster_number = qmdict['cave'], qm=QM.objects.get(found_by__cave__kataster_number = qmdict['cave'],
found_by__date__year = qmdict['year'], found_by__date__year = qmdict['year'],
number = qmdict['number']) number = qmdict['number'])
return r'<a href="%s" id="q%s">%s</a>' % (qm.get_absolute_url(), qm.code, unicode(qm)) return r'<a href="%s" id="q%s">%s</a>' % (qm.get_absolute_url(), qm.code, str(qm))
except QM.DoesNotExist: #bother aaron to make him clean up the below code - AC except QM.DoesNotExist: #bother aaron to make him clean up the below code - AC
try: try:
placeholder=LogbookEntry.objects.get(date__year=qmdict['year'],cave__kataster_number=qmdict['cave'], title__icontains='placeholder') placeholder=LogbookEntry.objects.get(date__year=qmdict['year'],cave__kataster_number=qmdict['cave'], title__icontains='placeholder')
@ -105,7 +105,7 @@ def wiki_to_html_short(value, autoescape=None):
title='placeholder' title='placeholder'
) )
qm=QM(found_by = placeholder, number = qmdict['number']) qm=QM(found_by = placeholder, number = qmdict['number'])
return r'<a class="redtext" href="%s" id="q%s">%s</a>' % (qm.get_absolute_url(), qm.code, unicode(qm)) return r'<a class="redtext" href="%s" id="q%s">%s</a>' % (qm.get_absolute_url(), qm.code, str(qm))
value = re.sub(qmMatchPattern,qmrepl, value, re.DOTALL) value = re.sub(qmMatchPattern,qmrepl, value, re.DOTALL)

View File

@ -1,12 +1,12 @@
from django.conf import settings from django.conf import settings
import fileAbstraction from . import fileAbstraction
from django.shortcuts import render_to_response from django.shortcuts import render_to_response
from django.http import HttpResponse, Http404 from django.http import HttpResponse, Http404
import os, stat import os, stat
import re import re
from troggle.core.models import SurvexScansFolder, SurvexScanSingle, SurvexBlock, TunnelFile from troggle.core.models import SurvexScansFolder, SurvexScanSingle, SurvexBlock, TunnelFile
import parsers.surveys import parsers.surveys
import urllib import urllib.request, urllib.parse, urllib.error
# inline fileabstraction into here if it's not going to be useful anywhere else # inline fileabstraction into here if it's not going to be useful anywhere else
# keep things simple and ignore exceptions everywhere for now # keep things simple and ignore exceptions everywhere for now
@ -113,19 +113,19 @@ def UniqueFile(fname):
# join it all up and then split them off for the directories that don't exist # join it all up and then split them off for the directories that don't exist
# anyway, this mkdir doesn't work # anyway, this mkdir doesn't work
def SaveImageInDir(name, imgdir, project, fdata, bbinary): def SaveImageInDir(name, imgdir, project, fdata, bbinary):
print ("hihihihi", fdata, settings.SURVEYS) print(("hihihihi", fdata, settings.SURVEYS))
fimgdir = os.path.join(settings.SURVEYS, imgdir) fimgdir = os.path.join(settings.SURVEYS, imgdir)
if not os.path.isdir(fimgdir): if not os.path.isdir(fimgdir):
print "*** Making directory", fimgdir print("*** Making directory", fimgdir)
os.path.mkdir(fimgdir) os.path.mkdir(fimgdir)
fprojdir = os.path.join(fimgdir, project) fprojdir = os.path.join(fimgdir, project)
if not os.path.isdir(fprojdir): if not os.path.isdir(fprojdir):
print "*** Making directory", fprojdir print("*** Making directory", fprojdir)
os.path.mkdir(fprojdir) os.path.mkdir(fprojdir)
print "hhh" print("hhh")
fname = os.path.join(fprojdir, name) fname = os.path.join(fprojdir, name)
print fname, "fff" print(fname, "fff")
fname = UniqueFile(fname) fname = UniqueFile(fname)
p2, p1 = os.path.split(fname) p2, p1 = os.path.split(fname)
@ -133,7 +133,7 @@ def SaveImageInDir(name, imgdir, project, fdata, bbinary):
p4, p3 = os.path.split(p3) p4, p3 = os.path.split(p3)
res = os.path.join(p3, p2, p1) res = os.path.join(p3, p2, p1)
print "saving file", fname print("saving file", fname)
fout = open(fname, (bbinary and "wb" or "w")) fout = open(fname, (bbinary and "wb" or "w"))
fout.write(fdata.read()) fout.write(fdata.read())
fout.close() fout.close()
@ -145,33 +145,33 @@ def SaveImageInDir(name, imgdir, project, fdata, bbinary):
def jgtuploadfile(request): def jgtuploadfile(request):
filesuploaded = [ ] filesuploaded = [ ]
project, user, password, tunnelversion = request.POST["tunnelproject"], request.POST["tunneluser"], request.POST["tunnelpassword"], request.POST["tunnelversion"] project, user, password, tunnelversion = request.POST["tunnelproject"], request.POST["tunneluser"], request.POST["tunnelpassword"], request.POST["tunnelversion"]
print (project, user, tunnelversion) print((project, user, tunnelversion))
for uploadedfile in request.FILES.values(): for uploadedfile in list(request.FILES.values()):
if uploadedfile.field_name in ["tileimage", "backgroundimage"] and \ if uploadedfile.field_name in ["tileimage", "backgroundimage"] and \
uploadedfile.content_type in ["image/png", "image/jpeg"]: uploadedfile.content_type in ["image/png", "image/jpeg"]:
fname = user + "_" + re.sub("[\\\\/]", "-", uploadedfile.name) # very escaped \ fname = user + "_" + re.sub("[\\\\/]", "-", uploadedfile.name) # very escaped \
print fname print(fname)
fileuploaded = SaveImageInDir(fname, uploadedfile.field_name, project, uploadedfile, True) fileuploaded = SaveImageInDir(fname, uploadedfile.field_name, project, uploadedfile, True)
filesuploaded.append(settings.URL_ROOT + "/jgtfile/" + fileuploaded) filesuploaded.append(settings.URL_ROOT + "/jgtfile/" + fileuploaded)
if uploadedfile.field_name in ["sketch"] and \ if uploadedfile.field_name in ["sketch"] and \
uploadedfile.content_type in ["text/plain"]: uploadedfile.content_type in ["text/plain"]:
fname = user + "_" + re.sub("[\\\\/]", "-", uploadedfile.name) # very escaped \ fname = user + "_" + re.sub("[\\\\/]", "-", uploadedfile.name) # very escaped \
print fname print(fname)
fileuploaded = SaveImageInDir(fname, uploadedfile.field_name, project, uploadedfile, False) fileuploaded = SaveImageInDir(fname, uploadedfile.field_name, project, uploadedfile, False)
filesuploaded.append(settings.URL_ROOT + "/jgtfile/" + fileuploaded) filesuploaded.append(settings.URL_ROOT + "/jgtfile/" + fileuploaded)
#print "FF", request.FILES #print "FF", request.FILES
#print ("FFF", request.FILES.values()) #print ("FFF", request.FILES.values())
message = "" message = ""
print "gothere" print("gothere")
return render_to_response('fileupload.html', {'message':message, 'filesuploaded':filesuploaded, 'settings': settings}) return render_to_response('fileupload.html', {'message':message, 'filesuploaded':filesuploaded, 'settings': settings})
def surveyscansfolder(request, path): def surveyscansfolder(request, path):
#print [ s.walletname for s in SurvexScansFolder.objects.all() ] #print [ s.walletname for s in SurvexScansFolder.objects.all() ]
survexscansfolder = SurvexScansFolder.objects.get(walletname=urllib.unquote(path)) survexscansfolder = SurvexScansFolder.objects.get(walletname=urllib.parse.unquote(path))
return render_to_response('survexscansfolder.html', { 'survexscansfolder':survexscansfolder, 'settings': settings }) return render_to_response('survexscansfolder.html', { 'survexscansfolder':survexscansfolder, 'settings': settings })
def surveyscansingle(request, path, file): def surveyscansingle(request, path, file):
survexscansfolder = SurvexScansFolder.objects.get(walletname=urllib.unquote(path)) survexscansfolder = SurvexScansFolder.objects.get(walletname=urllib.parse.unquote(path))
survexscansingle = SurvexScanSingle.objects.get(survexscansfolder=survexscansfolder, name=file) survexscansingle = SurvexScanSingle.objects.get(survexscansfolder=survexscansfolder, name=file)
return HttpResponse(content=open(survexscansingle.ffile), content_type=getMimeType(path.split(".")[-1])) return HttpResponse(content=open(survexscansingle.ffile), content_type=getMimeType(path.split(".")[-1]))
#return render_to_response('survexscansfolder.html', { 'survexscansfolder':survexscansfolder, 'settings': settings }) #return render_to_response('survexscansfolder.html', { 'survexscansfolder':survexscansfolder, 'settings': settings })
@ -187,21 +187,21 @@ def tunneldata(request):
def tunnelfile(request, path): def tunnelfile(request, path):
tunnelfile = TunnelFile.objects.get(tunnelpath=urllib.unquote(path)) tunnelfile = TunnelFile.objects.get(tunnelpath=urllib.parse.unquote(path))
tfile = os.path.join(settings.TUNNEL_DATA, tunnelfile.tunnelpath) tfile = os.path.join(settings.TUNNEL_DATA, tunnelfile.tunnelpath)
return HttpResponse(content=open(tfile), content_type="text/plain") return HttpResponse(content=open(tfile), content_type="text/plain")
def tunnelfileupload(request, path): def tunnelfileupload(request, path):
tunnelfile = TunnelFile.objects.get(tunnelpath=urllib.unquote(path)) tunnelfile = TunnelFile.objects.get(tunnelpath=urllib.parse.unquote(path))
tfile = os.path.join(settings.TUNNEL_DATA, tunnelfile.tunnelpath) tfile = os.path.join(settings.TUNNEL_DATA, tunnelfile.tunnelpath)
project, user, password, tunnelversion = request.POST["tunnelproject"], request.POST["tunneluser"], request.POST["tunnelpassword"], request.POST["tunnelversion"] project, user, password, tunnelversion = request.POST["tunnelproject"], request.POST["tunneluser"], request.POST["tunnelpassword"], request.POST["tunnelversion"]
print (project, user, tunnelversion) print((project, user, tunnelversion))
assert len(request.FILES.values()) == 1, "only one file to upload" assert len(list(request.FILES.values())) == 1, "only one file to upload"
uploadedfile = request.FILES.values()[0] uploadedfile = list(request.FILES.values())[0]
if uploadedfile.field_name != "sketch": if uploadedfile.field_name != "sketch":
return HttpResponse(content="Error: non-sketch file uploaded", content_type="text/plain") return HttpResponse(content="Error: non-sketch file uploaded", content_type="text/plain")

View File

@ -1,8 +1,8 @@
# primary namespace # primary namespace
import view_surveys from . import view_surveys
import views_caves from . import views_caves
import views_survex from . import views_survex
import views_logbooks from . import views_logbooks
import views_other from . import views_other

View File

@ -14,9 +14,7 @@ from django import forms
from django.core.urlresolvers import reverse from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect from django.http import HttpResponse, HttpResponseRedirect
from django.conf import settings from django.conf import settings
import re import re, urllib.parse
import urlparse
#import urllib.parse
from django.shortcuts import get_object_or_404, render from django.shortcuts import get_object_or_404, render
import settings import settings

View File

@ -62,8 +62,7 @@ def expedition(request, expeditionname):
expeditions = Expedition.objects.all() expeditions = Expedition.objects.all()
personexpeditiondays = [ ] personexpeditiondays = [ ]
dateditems = list(this_expedition.logbookentry_set.all()) + list(this_expedition.survexblock_set.all()) dateditems = list(this_expedition.logbookentry_set.all()) + list(this_expedition.survexblock_set.all())
dates = list(set([item.date for item in dateditems])) dates = sorted(set([item.date for item in dateditems]))
dates.sort()
for personexpedition in this_expedition.personexpedition_set.all(): for personexpedition in this_expedition.personexpedition_set.all():
prow = [ ] prow = [ ]
for date in dates: for date in dates:
@ -115,8 +114,7 @@ def GetPersonChronology(personexpedition):
a.setdefault("personroles", [ ]).append(personrole.survexblock) a.setdefault("personroles", [ ]).append(personrole.survexblock)
# build up the tables # build up the tables
rdates = res.keys() rdates = sorted(list(res.keys()))
rdates.sort()
res2 = [ ] res2 = [ ]
@ -206,8 +204,8 @@ def pathsreport(request):
ncodes = len(pathsdict) ncodes = len(pathsdict)
bycodeslist = sorted(pathsdict.iteritems()) bycodeslist = sorted(pathsdict.items())
bypathslist = sorted(pathsdict.iteritems(), key=lambda x: x[1]) bypathslist = sorted(iter(pathsdict.items()), key=lambda x: x[1])
return render(request, 'pathsreport.html', { return render(request, 'pathsreport.html', {
"pathsdict":pathsdict, "pathsdict":pathsdict,
@ -264,7 +262,7 @@ def newLogbookEntry(request, expeditionyear, pdate = None, pslug = None):
'expeditionyear': expeditionyear}) 'expeditionyear': expeditionyear})
f.write(template.render(context)) f.write(template.render(context))
f.close() f.close()
print(logbookparsers.parseAutoLogBookEntry(filename)) print((logbookparsers.parseAutoLogBookEntry(filename)))
return HttpResponseRedirect(reverse('expedition', args=[expedition.year])) # Redirect after POST return HttpResponseRedirect(reverse('expedition', args=[expedition.year])) # Redirect after POST
else: else:
if pslug and pdate: if pslug and pdate:

View File

@ -60,7 +60,7 @@ def controlPanel(request):
databaseReset.dirsredirect() databaseReset.dirsredirect()
for item in importlist: for item in importlist:
if item in request.POST: if item in request.POST:
print("running"+ " databaseReset."+item+"()") print(("running"+ " databaseReset."+item+"()"))
exec("databaseReset."+item+"()") exec("databaseReset."+item+"()")
jobs_completed.append(item) jobs_completed.append(item)
else: else:
@ -180,7 +180,7 @@ def logbook_entry_suggestions(request):
try: try:
lbo=LogbookEntry.objects.get(date__year=qm['year'],title__icontains="placeholder for QMs in") lbo=LogbookEntry.objects.get(date__year=qm['year'],title__icontains="placeholder for QMs in")
except: except:
print("failed to get placeholder for year "+str(qm['year'])) print(("failed to get placeholder for year "+str(qm['year'])))
temp_QM=QM(found_by=lbo,number=qm['number'],grade=qm['grade']) temp_QM=QM(found_by=lbo,number=qm['number'],grade=qm['grade'])
temp_QM.grade=qm['grade'] temp_QM.grade=qm['grade']

View File

@ -266,8 +266,7 @@ def survexcaveslist(request):
subdircaves = [ ] subdircaves = [ ]
# first sort the file list # first sort the file list
fnumlist = [ (-int(re.match(r"\d*", f).group(0) or "0"), f) for f in os.listdir(cavesdir) ] fnumlist = sorted([ (-int(re.match(r"\d*", f).group(0) or "0"), f) for f in os.listdir(cavesdir) ])
fnumlist.sort()
print(fnumlist) print(fnumlist)

View File

@ -100,7 +100,7 @@ def import_surveyimgs():
for future re-working to manage progress against notes, plans and elevs. for future re-working to manage progress against notes, plans and elevs.
""" """
#import troggle.parsers.surveys #import troggle.parsers.surveys
print("NOT Importing survey images") #print("Importing survey images")
#troggle.parsers.surveys.parseSurveys(logfile=settings.LOGFILE) #troggle.parsers.surveys.parseSurveys(logfile=settings.LOGFILE)
def import_surveyscans(): def import_surveyscans():
@ -258,7 +258,7 @@ class JobQueue():
print("-- ", settings.DATABASES['default']['NAME'], settings.DATABASES['default']['ENGINE']) print("-- ", settings.DATABASES['default']['NAME'], settings.DATABASES['default']['ENGINE'])
#print("-- DATABASES.default", settings.DATABASES['default']) print("-- DATABASES.default", settings.DATABASES['default'])
# but because the user may be expecting to add this to a db with lots of tables already there, # but because the user may be expecting to add this to a db with lots of tables already there,
# the jobqueue may not start from scratch so we need to initialise the db properly first # the jobqueue may not start from scratch so we need to initialise the db properly first
@ -334,11 +334,7 @@ class JobQueue():
print(" this", end=' ') print(" this", end=' ')
else: else:
# prints one place to the left of where you expect # prints one place to the left of where you expect
if r[len(r)-1]: days = (r[i]-r[len(r)-1])/(24*60*60)
s = r[i]-r[len(r)-1]
else:
s = 0
days = (s)/(24*60*60)
print('%8.2f' % days, end=' ') print('%8.2f' % days, end=' ')
elif r[i]: elif r[i]:
print('%8.1f' % r[i], end=' ') print('%8.1f' % r[i], end=' ')

View File

@ -1 +0,0 @@
requirements.txt.dj-1.7.11

9
docker/requirements.txt Normal file
View File

@ -0,0 +1,9 @@
Django==1.7.11
django-registration==2.1.2
mysql
#imagekit
django-imagekit
Image
django-tinymce==2.7.0
smartencoding
unidecode

View File

@ -22,10 +22,10 @@ def qmRow(qm):
} }
qmRow=['' for x in range(len(headers))] qmRow=['' for x in range(len(headers))]
for column, modelField in columnsToModelFields.items(): for column, modelField in list(columnsToModelFields.items()):
if modelField: if modelField:
# Very sorry about the atrocious replace below. I will fix this soon if noone beats me to it. - AC # Very sorry about the atrocious replace below. I will fix this soon if noone beats me to it. - AC
qmRow[headers.index(column)]=modelField.replace(u'\xd7','x').replace(u'\u201c','').replace(u'\u2013','').replace(u'\xbd','') qmRow[headers.index(column)]=modelField.replace('\xd7','x').replace('\u201c','').replace('\u2013','').replace('\xbd','')
return qmRow return qmRow
def writeQmTable(outfile,cave): def writeQmTable(outfile,cave):

View File

@ -12,7 +12,7 @@ class SimpleTest(TestCase):
""" """
Tests that 1 + 1 always equals 2. Tests that 1 + 1 always equals 2.
""" """
self.failUnlessEqual(1 + 1, 2) self.assertEqual(1 + 1, 2)
__test__ = {"doctest": """ __test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2. Another way to test that 1 + 1 is equal to 2.

View File

@ -1,18 +1,19 @@
import troggle.settings as settings import os
from troggle.helper import login_required_if_public import re
from django.shortcuts import render
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect, Http404 from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.core.urlresolvers import reverse from django.core.urlresolvers import reverse
from django.template import Context, loader from django.template import Context, loader
import django.forms as forms import django.forms as forms
from tinymce.widgets import TinyMCE from tinymce.widgets import TinyMCE
from troggle.helper import login_required_if_public
from troggle.flatpages.models import Redirect, EntranceRedirect from troggle.flatpages.models import Redirect, EntranceRedirect
from troggle.core.models import Cave from troggle.core.models import Cave
import troggle.core.views_caves import troggle.core.views_caves
import troggle.settings as settings
import os
import re
def flatpage(request, path): def flatpage(request, path):
try: try:
@ -35,7 +36,7 @@ def flatpage(request, path):
if path.startswith("noinfo") and settings.PUBLIC_SITE and not request.user.is_authenticated(): if path.startswith("noinfo") and settings.PUBLIC_SITE and not request.user.is_authenticated():
print("flat path noinfo", path) print(("flat path noinfo", path))
return HttpResponseRedirect(reverse("auth_login") + '?next=%s' % request.path) return HttpResponseRedirect(reverse("auth_login") + '?next=%s' % request.path)
if path.endswith("/") or path == "": if path.endswith("/") or path == "":
@ -57,32 +58,32 @@ def flatpage(request, path):
if path.endswith(".htm") or path.endswith(".html"): if path.endswith(".htm") or path.endswith(".html"):
html = o.read() html = o.read()
m = re.search(r"(.*)<\s*head([^>]*)>(.*)<\s*/head\s*>(.*)<\s*body([^>]*)>(.*)<\s*/body\s*>(.*)", html, re.DOTALL + re.IGNORECASE) m = re.search(rb'(.*)<\s*head([^>]*)>(.*)<\s*/head\s*>(.*)<\s*body([^>]*)>(.*)<\s*/body\s*>(.*)', html, re.DOTALL + re.IGNORECASE)
if m: if m:
preheader, headerattrs, head, postheader, bodyattrs, body, postbody = m.groups() preheader, headerattrs, head, postheader, bodyattrs, body, postbody = m.groups()
else: else:
return HttpResponse(html + "Page could not be split into header and body") return HttpResponse(html + "Page could not be split into header and body")
m = re.search(r"<title>(.*)</title>", head, re.DOTALL + re.IGNORECASE) m = re.search(rb"<title>(.*)</title>", head, re.DOTALL + re.IGNORECASE)
if m: if m:
title, = m.groups() title, = m.groups()
else: else:
title = "" title = ""
m = re.search(r"<meta([^>]*)noedit", head, re.DOTALL + re.IGNORECASE) m = re.search(rb"<meta([^>]*)noedit", head, re.DOTALL + re.IGNORECASE)
if m: if m:
editable = False editable = False
else: else:
editable = True editable = True
has_menu = False has_menu = False
menumatch = re.match('(.*)<div id="menu">', body, re.DOTALL + re.IGNORECASE) menumatch = re.match(rb'(.*)<div id="menu">', body, re.DOTALL + re.IGNORECASE)
if menumatch: if menumatch:
has_menu = True has_menu = True
menumatch = re.match('(.*)<ul id="links">', body, re.DOTALL + re.IGNORECASE) menumatch = re.match(rb'(.*)<ul id="links">', body, re.DOTALL + re.IGNORECASE)
if menumatch: if menumatch:
has_menu = True has_menu = True
#body, = menumatch.groups() #body, = menumatch.groups()
if re.search(r"iso-8859-1", html): if re.search(rb"iso-8859-1", html):
body = unicode(body, "iso-8859-1") body = str(body, "iso-8859-1")
body.strip body.strip
return render(request, 'flatpage.html', {'editable': editable, 'path': path, 'title': title, 'body': body, 'homepage': (path == "index.htm"), 'has_menu': has_menu}) return render(request, 'flatpage.html', {'editable': editable, 'path': path, 'title': title, 'body': body, 'homepage': (path == "index.htm"), 'has_menu': has_menu})
else: else:
@ -129,7 +130,7 @@ def editflatpage(request, path):
if linksmatch: if linksmatch:
body, links = linksmatch.groups() body, links = linksmatch.groups()
if re.search(r"iso-8859-1", html): if re.search(r"iso-8859-1", html):
body = unicode(body, "iso-8859-1") body = str(body, "iso-8859-1")
else: else:
return HttpResponse("Page could not be split into header and body") return HttpResponse("Page could not be split into header and body")
except IOError: except IOError:
@ -154,7 +155,7 @@ def editflatpage(request, path):
postbody = "</html>" postbody = "</html>"
body = flatpageForm.cleaned_data["html"] body = flatpageForm.cleaned_data["html"]
body = body.replace("\r", "") body = body.replace("\r", "")
result = u"%s<head%s>%s</head>%s<body%s>\n%s</body>%s" % (preheader, headerargs, head, postheader, bodyargs, body, postbody) result = "%s<head%s>%s</head>%s<body%s>\n%s</body>%s" % (preheader, headerargs, head, postheader, bodyargs, body, postbody)
f = open(filepath, "w") f = open(filepath, "w")
f.write(result) f.write(result)
f.close() f.close()

View File

@ -20,9 +20,9 @@ def flush_cache(apps, options):
""" """
apps = [a.strip(',') for a in apps] apps = [a.strip(',') for a in apps]
if apps: if apps:
print 'Flushing cache for %s...' % ', '.join(apps) print('Flushing cache for %s...' % ', '.join(apps))
else: else:
print 'Flushing caches...' print('Flushing caches...')
for app_label in apps: for app_label in apps:
app = cache.get_app(app_label) app = cache.get_app(app_label)

View File

@ -47,7 +47,7 @@ class ImageModelBase(ModelBase):
except ImportError: except ImportError:
raise ImportError('Unable to load imagekit config module: %s' % \ raise ImportError('Unable to load imagekit config module: %s' % \
opts.spec_module) opts.spec_module)
for spec in [spec for spec in module.__dict__.values() \ for spec in [spec for spec in list(module.__dict__.values()) \
if isinstance(spec, type) \ if isinstance(spec, type) \
and issubclass(spec, specs.ImageSpec) \ and issubclass(spec, specs.ImageSpec) \
and spec != specs.ImageSpec]: and spec != specs.ImageSpec]:
@ -56,7 +56,7 @@ class ImageModelBase(ModelBase):
setattr(cls, '_ik', opts) setattr(cls, '_ik', opts)
class ImageModel(models.Model): class ImageModel(models.Model, metaclass=ImageModelBase):
""" Abstract base class implementing all core ImageKit functionality """ Abstract base class implementing all core ImageKit functionality
Subclasses of ImageModel are augmented with accessors for each defined Subclasses of ImageModel are augmented with accessors for each defined
@ -64,7 +64,6 @@ class ImageModel(models.Model):
storage locations and other options. storage locations and other options.
""" """
__metaclass__ = ImageModelBase
class Meta: class Meta:
abstract = True abstract = True
@ -81,10 +80,10 @@ class ImageModel(models.Model):
self._ik.admin_thumbnail_spec self._ik.admin_thumbnail_spec
else: else:
if hasattr(self, 'get_absolute_url'): if hasattr(self, 'get_absolute_url'):
return u'<a href="%s"><img src="%s"></a>' % \ return '<a href="%s"><img src="%s"></a>' % \
(self.get_absolute_url(), prop.url) (self.get_absolute_url(), prop.url)
else: else:
return u'<a href="%s"><img src="%s"></a>' % \ return '<a href="%s"><img src="%s"></a>' % \
(self._imgfield.url, prop.url) (self._imgfield.url, prop.url)
admin_thumbnail_view.short_description = _('Thumbnail') admin_thumbnail_view.short_description = _('Thumbnail')
admin_thumbnail_view.allow_tags = True admin_thumbnail_view.allow_tags = True

View File

@ -18,6 +18,6 @@ class Options(object):
spec_module = 'imagekit.defaults' spec_module = 'imagekit.defaults'
def __init__(self, opts): def __init__(self, opts):
for key, value in opts.__dict__.iteritems(): for key, value in opts.__dict__.items():
setattr(self, key, value) setattr(self, key, value)
self.specs = [] self.specs = []

View File

@ -6,7 +6,7 @@ spec found.
""" """
import os import os
from StringIO import StringIO from io import StringIO
from imagekit.lib import * from imagekit.lib import *
from imagekit.utils import img_to_fobj from imagekit.utils import img_to_fobj
from django.core.files.base import ContentFile from django.core.files.base import ContentFile

View File

@ -83,4 +83,4 @@ class IKTest(TestCase):
# make sure image file is deleted # make sure image file is deleted
path = self.p.image.path path = self.p.image.path
self.p.delete() self.p.delete()
self.failIf(os.path.isfile(path)) self.assertFalse(os.path.isfile(path))

View File

@ -1,21 +1,25 @@
import os import os
import time import time
import timeit import timeit
import settings import settings
os.environ['PYTHONPATH'] = settings.PYTHON_PATH os.environ['PYTHONPATH'] = settings.PYTHON_PATH
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings') os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
from django.core import management from django.core import management
from django.db import connection, close_old_connections from django.db import connection, close_old_connections
from django.contrib.auth.models import User from django.contrib.auth.models import User
from django.http import HttpResponse from django.http import HttpResponse
from django.core.urlresolvers import reverse from django.core.urlresolvers import reverse
from troggle.core.models import Cave, Entrance from troggle.core.models import Cave, Entrance
import troggle.flatpages.models import troggle.flatpages.models
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def import_auto_logbooks(): def import_auto_logbooks():
import parsers.logbooks
import os import os
import troggle.parsers.logbooks
for pt in troggle.core.models.PersonTrip.objects.all(): for pt in troggle.core.models.PersonTrip.objects.all():
pt.delete() pt.delete()
for lbe in troggle.core.models.LogbookEntry.objects.all(): for lbe in troggle.core.models.LogbookEntry.objects.all():
@ -27,7 +31,7 @@ def import_auto_logbooks():
"autologbook") "autologbook")
for root, dirs, filenames in os.walk(directory): for root, dirs, filenames in os.walk(directory):
for filename in filenames: for filename in filenames:
print(os.path.join(root, filename)) print((os.path.join(root, filename)))
parsers.logbooks.parseAutoLogBookEntry(os.path.join(root, filename)) parsers.logbooks.parseAutoLogBookEntry(os.path.join(root, filename))
#Temporary function until definitive source of data transfered. #Temporary function until definitive source of data transfered.
@ -50,7 +54,7 @@ def dumplogbooks():
filename = os.path.join(directory, filename = os.path.join(directory,
dateStr + "." + slugify(lbe.title)[:50] + ".html") dateStr + "." + slugify(lbe.title)[:50] + ".html")
if lbe.cave: if lbe.cave:
print(lbe.cave.reference()) print((lbe.cave.reference()))
trip = {"title": lbe.title, "html":lbe.text, "cave": lbe.cave.reference(), "caveOrLocation": "cave"} trip = {"title": lbe.title, "html":lbe.text, "cave": lbe.cave.reference(), "caveOrLocation": "cave"}
else: else:
trip = {"title": lbe.title, "html":lbe.text, "location":lbe.place, "caveOrLocation": "location"} trip = {"title": lbe.title, "html":lbe.text, "location":lbe.place, "caveOrLocation": "location"}
@ -63,6 +67,6 @@ def dumplogbooks():
'date': dateStr, 'date': dateStr,
'expeditionyear': lbe.expedition.year}) 'expeditionyear': lbe.expedition.year})
output = template.render(context) output = template.render(context)
f.write(unicode(output).encode( "utf-8" )) f.write(str(output).encode( "utf-8" ))
f.close() f.close()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

View File

@ -27,7 +27,7 @@ class SmartAppendSlashMiddleware(object):
if settings.SMART_APPEND_SLASH and (not old_url[1].endswith('/')) and not _resolves(old_url[1]) and _resolves(old_url[1] + '/'): if settings.SMART_APPEND_SLASH and (not old_url[1].endswith('/')) and not _resolves(old_url[1]) and _resolves(old_url[1] + '/'):
new_url[1] = new_url[1] + '/' new_url[1] = new_url[1] + '/'
if settings.DEBUG and request.method == 'POST': if settings.DEBUG and request.method == 'POST':
raise RuntimeError, "You called this URL via POST, but the URL doesn't end in a slash and you have SMART_APPEND_SLASH set. Django can't redirect to the slash URL while maintaining POST data. Change your form to point to %s%s (note the trailing slash), or set SMART_APPEND_SLASH=False in your Django settings." % (new_url[0], new_url[1]) raise RuntimeError("You called this URL via POST, but the URL doesn't end in a slash and you have SMART_APPEND_SLASH set. Django can't redirect to the slash URL while maintaining POST data. Change your form to point to %s%s (note the trailing slash), or set SMART_APPEND_SLASH=False in your Django settings." % (new_url[0], new_url[1]))
if new_url != old_url: if new_url != old_url:
# Redirect # Redirect
if new_url[0]: if new_url[0]:

View File

@ -33,7 +33,7 @@ import getopt, sys
from django.core.management import setup_environ from django.core.management import setup_environ
try: try:
import settings from . import settings
except ImportError: except ImportError:
pass pass
else: else:
@ -180,22 +180,22 @@ def main():
try: try:
opts, args = getopt.getopt(sys.argv[1:], "hd", opts, args = getopt.getopt(sys.argv[1:], "hd",
["help", "disable_fields"]) ["help", "disable_fields"])
except getopt.GetoptError, error: except getopt.GetoptError as error:
print __doc__ print(__doc__)
sys.exit(error) sys.exit(error)
else: else:
if not args: if not args:
print __doc__ print(__doc__)
sys.exit() sys.exit()
kwargs = {} kwargs = {}
for opt, arg in opts: for opt, arg in opts:
if opt in ("-h", "--help"): if opt in ("-h", "--help"):
print __doc__ print(__doc__)
sys.exit() sys.exit()
if opt in ("-d", "--disable_fields"): if opt in ("-d", "--disable_fields"):
kwargs['disable_fields'] = True kwargs['disable_fields'] = True
print generate_dot(args, **kwargs) print(generate_dot(args, **kwargs))
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View File

@ -38,7 +38,7 @@ def parseCaveQMs(cave,inputFile):
dialect=csv.Sniffer().sniff(qmCSVContents.read()) dialect=csv.Sniffer().sniff(qmCSVContents.read())
qmCSVContents.seek(0,0) qmCSVContents.seek(0,0)
qmReader = csv.reader(qmCSVContents,dialect=dialect) qmReader = csv.reader(qmCSVContents,dialect=dialect)
qmReader.next() # Skip header row next(qmReader) # Skip header row
for line in qmReader: for line in qmReader:
try: try:
year=int(line[0][1:5]) year=int(line[0][1:5])
@ -48,7 +48,7 @@ def parseCaveQMs(cave,inputFile):
elif cave=='hauch': elif cave=='hauch':
placeholder, hadToCreate = LogbookEntry.objects.get_or_create(date__year=year, title="placeholder for QMs in 234", text="QMs temporarily attached to this should be re-attached to their actual trips", defaults={"date": date(year, 1, 1),"cave":hauchHl}) placeholder, hadToCreate = LogbookEntry.objects.get_or_create(date__year=year, title="placeholder for QMs in 234", text="QMs temporarily attached to this should be re-attached to their actual trips", defaults={"date": date(year, 1, 1),"cave":hauchHl})
if hadToCreate: if hadToCreate:
print(cave + " placeholder logbook entry for " + str(year) + " added to database") print((cave + " placeholder logbook entry for " + str(year) + " added to database"))
QMnum=re.match(r".*?-\d*?-X?(?P<numb>\d*)",line[0]).group("numb") QMnum=re.match(r".*?-\d*?-X?(?P<numb>\d*)",line[0]).group("numb")
newQM = QM() newQM = QM()
newQM.found_by=placeholder newQM.found_by=placeholder
@ -71,9 +71,9 @@ def parseCaveQMs(cave,inputFile):
if preexistingQM.new_since_parsing==False: #if the pre-existing QM has not been modified, overwrite it if preexistingQM.new_since_parsing==False: #if the pre-existing QM has not been modified, overwrite it
preexistingQM.delete() preexistingQM.delete()
newQM.save() newQM.save()
print("overwriting " + str(preexistingQM) +"\r") print(("overwriting " + str(preexistingQM) +"\r"))
else: # otherwise, print that it was ignored else: # otherwise, print that it was ignored
print("preserving " + str(preexistingQM) + ", which was edited in admin \r") print(("preserving " + str(preexistingQM) + ", which was edited in admin \r"))
except QM.DoesNotExist: #if there is no pre-existing QM, save the new one except QM.DoesNotExist: #if there is no pre-existing QM, save the new one
newQM.save() newQM.save()
@ -82,7 +82,7 @@ def parseCaveQMs(cave,inputFile):
except KeyError: #check on this one except KeyError: #check on this one
continue continue
except IndexError: except IndexError:
print("Index error in " + str(line)) print(("Index error in " + str(line)))
continue continue
def parse_KH_QMs(kh, inputFile): def parse_KH_QMs(kh, inputFile):

View File

@ -1,9 +1,10 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import troggle.core.models as models
from django.conf import settings
import os import os
import re import re
from django.conf import settings
import troggle.core.models as models
def readcaves(): def readcaves():
@ -14,11 +15,11 @@ def readcaves():
area_1626 = models.Area.objects.update_or_create(short_name = "1626", parent = None) area_1626 = models.Area.objects.update_or_create(short_name = "1626", parent = None)
print(" - Reading Entrances") print(" - Reading Entrances")
#print "list of <Slug> <Filename>" #print "list of <Slug> <Filename>"
for filename in os.walk(settings.ENTRANCEDESCRIPTIONS).next()[2]: #Should be a better way of getting a list of files for filename in next(os.walk(settings.ENTRANCEDESCRIPTIONS))[2]: #Should be a better way of getting a list of files
if filename.endswith('.html'): if filename.endswith('.html'):
readentrance(filename) readentrance(filename)
print (" - Reading Caves") print (" - Reading Caves")
for filename in os.walk(settings.CAVEDESCRIPTIONS).next()[2]: #Should be a better way of getting a list of files for filename in next(os.walk(settings.CAVEDESCRIPTIONS))[2]: #Should be a better way of getting a list of files
if filename.endswith('.html'): if filename.endswith('.html'):
readcave(filename) readcave(filename)

View File

@ -58,7 +58,7 @@ def LoadCaveTab():
cavetab = open(os.path.join(settings.EXPOWEB, "noinfo", "CAVETAB2.CSV"),'rU') cavetab = open(os.path.join(settings.EXPOWEB, "noinfo", "CAVETAB2.CSV"),'rU')
caveReader = csv.reader(cavetab) caveReader = csv.reader(cavetab)
caveReader.next() # Strip out column headers next(caveReader) # Strip out column headers
logging.info("Beginning to import caves from "+str(cavetab)+"\n"+"-"*60+"\n") logging.info("Beginning to import caves from "+str(cavetab)+"\n"+"-"*60+"\n")

View File

@ -40,7 +40,7 @@ def GetTripPersons(trippeople, expedition, logtime_underground):
tripperson = re.sub(round_bracket_regex, "", tripperson).strip() tripperson = re.sub(round_bracket_regex, "", tripperson).strip()
personyear = GetPersonExpeditionNameLookup(expedition).get(tripperson.lower()) personyear = GetPersonExpeditionNameLookup(expedition).get(tripperson.lower())
if not personyear: if not personyear:
print(" - No name match for: '%s'" % tripperson) print((" - No name match for: '%s'" % tripperson))
message = "No name match for: '%s' in year '%s'" % (tripperson, expedition.year) message = "No name match for: '%s' in year '%s'" % (tripperson, expedition.year)
models.DataIssue.objects.create(parser='logbooks', message=message) models.DataIssue.objects.create(parser='logbooks', message=message)
res.append((personyear, logtime_underground)) res.append((personyear, logtime_underground))
@ -72,11 +72,11 @@ def GetTripCave(place): #need to be fuzzier about matching here. Already a very
return tripCaveRes return tripCaveRes
elif len(tripCaveRes)>1: elif len(tripCaveRes)>1:
print("Ambiguous place " + str(place) + " entered. Choose from " + str(tripCaveRes)) print(("Ambiguous place " + str(place) + " entered. Choose from " + str(tripCaveRes)))
correctIndex=input("type list index of correct cave") correctIndex=eval(input("type list index of correct cave"))
return tripCaveRes[correctIndex] return tripCaveRes[correctIndex]
else: else:
print("No cave found for place " , place) print(("No cave found for place " , place))
return return
logentries = [] # the entire logbook is a single object: a list of entries logentries = [] # the entire logbook is a single object: a list of entries
@ -92,7 +92,7 @@ def EnterLogIntoDbase(date, place, title, text, trippeople, expedition, logtime_
trippersons, author = GetTripPersons(trippeople, expedition, logtime_underground) trippersons, author = GetTripPersons(trippeople, expedition, logtime_underground)
if not author: if not author:
print(" - Skipping logentry: " + title + " - no author for entry") print((" - Skipping logentry: " + title + " - no author for entry"))
message = "Skipping logentry: %s - no author for entry in year '%s'" % (title, expedition.year) message = "Skipping logentry: %s - no author for entry in year '%s'" % (title, expedition.year)
models.DataIssue.objects.create(parser='logbooks', message=message) models.DataIssue.objects.create(parser='logbooks', message=message)
return return
@ -175,7 +175,7 @@ def Parseloghtmltxt(year, expedition, txt):
''', trippara) ''', trippara)
if not s: if not s:
if not re.search(r"Rigging Guide", trippara): if not re.search(r"Rigging Guide", trippara):
print("can't parse: ", trippara) # this is 2007 which needs editing print(("can't parse: ", trippara)) # this is 2007 which needs editing
#assert s, trippara #assert s, trippara
continue continue
tripid, tripid1, tripdate, trippeople, triptitle, triptext, tu = s.groups() tripid, tripid1, tripdate, trippeople, triptitle, triptext, tu = s.groups()
@ -203,7 +203,7 @@ def Parseloghtmltxt(year, expedition, txt):
def Parseloghtml01(year, expedition, txt): def Parseloghtml01(year, expedition, txt):
tripparas = re.findall(r"<hr[\s/]*>([\s\S]*?)(?=<hr)", txt) tripparas = re.findall(r"<hr[\s/]*>([\s\S]*?)(?=<hr)", txt)
for trippara in tripparas: for trippara in tripparas:
s = re.match(u"(?s)\s*(?:<p>)?(.*?)</?p>(.*)$(?i)", trippara) s = re.match("(?s)\s*(?:<p>)?(.*?)</?p>(.*)$(?i)", trippara)
assert s, trippara[:300] assert s, trippara[:300]
tripheader, triptext = s.group(1), s.group(2) tripheader, triptext = s.group(1), s.group(2)
mtripid = re.search(r'<a id="(.*?)"', tripheader) mtripid = re.search(r'<a id="(.*?)"', tripheader)
@ -251,7 +251,7 @@ def Parseloghtml01(year, expedition, txt):
def Parseloghtml03(year, expedition, txt): def Parseloghtml03(year, expedition, txt):
tripparas = re.findall(r"<hr\s*/>([\s\S]*?)(?=<hr)", txt) tripparas = re.findall(r"<hr\s*/>([\s\S]*?)(?=<hr)", txt)
for trippara in tripparas: for trippara in tripparas:
s = re.match(u"(?s)\s*<p>(.*?)</p>(.*)$", trippara) s = re.match("(?s)\s*<p>(.*?)</p>(.*)$", trippara)
assert s, trippara assert s, trippara
tripheader, triptext = s.group(1), s.group(2) tripheader, triptext = s.group(1), s.group(2)
tripheader = re.sub(r"&nbsp;", " ", tripheader) tripheader = re.sub(r"&nbsp;", " ", tripheader)
@ -261,7 +261,7 @@ def Parseloghtml03(year, expedition, txt):
if re.match("T/U|Time underwater", sheader[-1]): if re.match("T/U|Time underwater", sheader[-1]):
tu = sheader.pop() tu = sheader.pop()
if len(sheader) != 3: if len(sheader) != 3:
print("header not three pieces", sheader) print(("header not three pieces", sheader))
tripdate, triptitle, trippeople = sheader tripdate, triptitle, trippeople = sheader
ldate = ParseDate(tripdate.strip(), year) ldate = ParseDate(tripdate.strip(), year)
triptitles = triptitle.split(" , ") triptitles = triptitle.split(" , ")
@ -325,35 +325,36 @@ def LoadLogbookForExpedition(expedition):
#print " - Cache is more than 30 days old." #print " - Cache is more than 30 days old."
bad_cache= True bad_cache= True
if bad_cache: if bad_cache:
print " - Cache is either stale or more than 30 days old. Deleting it." print(" - Cache is either stale or more than 30 days old. Deleting it.")
os.remove(cache_filename) os.remove(cache_filename)
logentries=[] logentries=[]
raise raise
print(" - Reading cache: " + cache_filename ) print((" - Reading cache: " + cache_filename ))
try: try:
with open(cache_filename, "rb") as f: with open(cache_filename, "rb") as f:
logentries = pickle.load(f) logentries = pickle.load(f)
print " - Loaded ", len(logentries), " objects" print(" - Loaded ", len(logentries), " objects")
logbook_cached = True logbook_cached = True
except: except:
print " - Failed to load corrupt cache. Deleting it.\n" print(" - Failed to load corrupt cache. Deleting it.\n")
os.remove(cache_filename) os.remove(cache_filename)
logentries=[] logentries=[]
raise
except: except:
print(" - Opening logbook: ") print(" - Opening logbook: ")
file_in = open(os.path.join(expowebbase, year_settings[0])) file_in = open(os.path.join(expowebbase, year_settings[0]),'rb')
txt = file_in.read().decode("latin1") txt = file_in.read().decode("latin1")
file_in.close() file_in.close()
parsefunc = year_settings[1] parsefunc = year_settings[1]
logbook_parseable = True logbook_parseable = True
print(" - Parsing logbook: " + year_settings[0] + "\n - Using parser: " + year_settings[1]) print((" - Parsing logbook: " + year_settings[0] + "\n - Using parser: " + year_settings[1]))
if logbook_parseable: if logbook_parseable:
parser = globals()[parsefunc] parser = globals()[parsefunc]
parser(expedition.year, expedition, txt) parser(expedition.year, expedition, txt)
SetDatesFromLogbookEntries(expedition) SetDatesFromLogbookEntries(expedition)
# and this has also stored all the objects in logentries[] # and this has also stored all the objects in logentries[]
print " - Storing " , len(logentries), " log entries" print(" - Storing " , len(logentries), " log entries")
cache_filename = os.path.join(expowebbase, year_settings[0])+".cache" cache_filename = os.path.join(expowebbase, year_settings[0])+".cache"
with open(cache_filename, "wb") as f: with open(cache_filename, "wb") as f:
pickle.dump(logentries, f, 2) pickle.dump(logentries, f, 2)
@ -370,7 +371,7 @@ def LoadLogbookForExpedition(expedition):
i +=1 i +=1
else: else:
try: try:
file_in = open(os.path.join(expowebbase, expedition.year, settings.DEFAULT_LOGBOOK_FILE)) file_in = open(os.path.join(expowebbase, expedition.year, settings.DEFAULT_LOGBOOK_FILE),'rb')
txt = file_in.read().decode("latin1") txt = file_in.read().decode("latin1")
file_in.close() file_in.close()
logbook_parseable = True logbook_parseable = True
@ -378,7 +379,7 @@ def LoadLogbookForExpedition(expedition):
parsefunc = settings.DEFAULT_LOGBOOK_PARSER parsefunc = settings.DEFAULT_LOGBOOK_PARSER
except (IOError): except (IOError):
logbook_parseable = False logbook_parseable = False
print("Couldn't open default logbook file and nothing in settings for expo " + expedition.year) print(("Couldn't open default logbook file and nothing in settings for expo " + expedition.year))
#return "TOLOAD: " + year + " " + str(expedition.personexpedition_set.all()[1].logbookentry_set.count()) + " " + str(models.PersonTrip.objects.filter(personexpedition__expedition=expedition).count()) #return "TOLOAD: " + year + " " + str(expedition.personexpedition_set.all()[1].logbookentry_set.count()) + " " + str(models.PersonTrip.objects.filter(personexpedition__expedition=expedition).count())
@ -391,7 +392,7 @@ def LoadLogbooks():
# Fetch all expos # Fetch all expos
expos = models.Expedition.objects.all() expos = models.Expedition.objects.all()
for expo in expos: for expo in expos:
print("\nLoading Logbook for: " + expo.year) print(("\nLoading Logbook for: " + expo.year))
# Load logbook for expo # Load logbook for expo
LoadLogbookForExpedition(expo) LoadLogbookForExpedition(expo)

View File

@ -4,7 +4,7 @@ from django.conf import settings
import troggle.core.models as models import troggle.core.models as models
import csv, re, datetime, os, shutil import csv, re, datetime, os, shutil
from utils import save_carefully from utils import save_carefully
from HTMLParser import HTMLParser from html.parser import HTMLParser
from unidecode import unidecode from unidecode import unidecode
# def saveMugShot(mugShotPath, mugShotFilename, person): # def saveMugShot(mugShotPath, mugShotFilename, person):
@ -45,7 +45,7 @@ def parseMugShotAndBlurb(personline, header, person):
#Only finds the first image, not all of them #Only finds the first image, not all of them
person.blurb=re.search('<body>.*<hr',personPageOld,re.DOTALL).group() person.blurb=re.search('<body>.*<hr',personPageOld,re.DOTALL).group()
else: else:
print "ERROR: --------------- Broken link or Blurb parse error in ", mugShotFilename print("ERROR: --------------- Broken link or Blurb parse error in ", mugShotFilename)
#for mugShotFilename in re.findall('i/.*?jpg',personPageOld,re.DOTALL): #for mugShotFilename in re.findall('i/.*?jpg',personPageOld,re.DOTALL):
# mugShotPath = os.path.join(settings.EXPOWEB, "folk", mugShotFilename) # mugShotPath = os.path.join(settings.EXPOWEB, "folk", mugShotFilename)
# saveMugShot(mugShotPath=mugShotPath, mugShotFilename=mugShotFilename, person=person) # saveMugShot(mugShotPath=mugShotPath, mugShotFilename=mugShotFilename, person=person)
@ -55,8 +55,8 @@ def LoadPersonsExpos():
persontab = open(os.path.join(settings.EXPOWEB, "folk", "folk.csv")) persontab = open(os.path.join(settings.EXPOWEB, "folk", "folk.csv"))
personreader = csv.reader(persontab) personreader = csv.reader(persontab)
headers = personreader.next() headers = next(personreader)
header = dict(zip(headers, range(len(headers)))) header = dict(list(zip(headers, list(range(len(headers))))))
# make expeditions # make expeditions
print(" - Loading expeditions") print(" - Loading expeditions")
@ -100,7 +100,7 @@ def LoadPersonsExpos():
parseMugShotAndBlurb(personline=personline, header=header, person=person) parseMugShotAndBlurb(personline=personline, header=header, person=person)
# make person expedition from table # make person expedition from table
for year, attended in zip(headers, personline)[5:]: for year, attended in list(zip(headers, personline))[5:]:
expedition = models.Expedition.objects.get(year=year) expedition = models.Expedition.objects.get(year=year)
if attended == "1" or attended == "-1": if attended == "1" or attended == "-1":
lookupAttribs = {'person':person, 'expedition':expedition} lookupAttribs = {'person':person, 'expedition':expedition}

View File

@ -34,7 +34,7 @@ def importSubcaves(cave):
nonLookupAttribs={'description':description} nonLookupAttribs={'description':description}
newSubcave=save_carefully(Subcave,lookupAttribs=lookupAttribs,nonLookupAttribs=nonLookupAttribs) newSubcave=save_carefully(Subcave,lookupAttribs=lookupAttribs,nonLookupAttribs=nonLookupAttribs)
logging.info("Added " + unicode(newSubcave) + " to " + unicode(cave)) logging.info("Added " + str(newSubcave) + " to " + str(cave))
except IOError: except IOError:
logging.info("Subcave import couldn't open "+subcaveFilePath) logging.info("Subcave import couldn't open "+subcaveFilePath)

View File

@ -33,27 +33,27 @@ def LoadSurvexLineLeg(survexblock, stardata, sline, comment, cave):
try: try:
survexleg.tape = float(ls[stardata["tape"]]) survexleg.tape = float(ls[stardata["tape"]])
except ValueError: except ValueError:
print("! Tape misread in", survexblock.survexfile.path) print(("! Tape misread in", survexblock.survexfile.path))
print(" Stardata:", stardata) print((" Stardata:", stardata))
print(" Line:", ls) print((" Line:", ls))
message = ' ! Value Error: Tape misread in line %s in %s' % (ls, survexblock.survexfile.path) message = ' ! Value Error: Tape misread in line %s in %s' % (ls, survexblock.survexfile.path)
models.DataIssue.objects.create(parser='survex', message=message) models.DataIssue.objects.create(parser='survex', message=message)
survexleg.tape = 1000 survexleg.tape = 1000
try: try:
lclino = ls[stardata["clino"]] lclino = ls[stardata["clino"]]
except: except:
print("! Clino misread in", survexblock.survexfile.path) print(("! Clino misread in", survexblock.survexfile.path))
print(" Stardata:", stardata) print((" Stardata:", stardata))
print(" Line:", ls) print((" Line:", ls))
message = ' ! Value Error: Clino misread in line %s in %s' % (ls, survexblock.survexfile.path) message = ' ! Value Error: Clino misread in line %s in %s' % (ls, survexblock.survexfile.path)
models.DataIssue.objects.create(parser='survex', message=message) models.DataIssue.objects.create(parser='survex', message=message)
lclino = error lclino = error
try: try:
lcompass = ls[stardata["compass"]] lcompass = ls[stardata["compass"]]
except: except:
print("! Compass misread in", survexblock.survexfile.path) print(("! Compass misread in", survexblock.survexfile.path))
print(" Stardata:", stardata) print((" Stardata:", stardata))
print(" Line:", ls) print((" Line:", ls))
message = ' ! Value Error: Compass misread in line %s in %s' % (ls, survexblock.survexfile.path) message = ' ! Value Error: Compass misread in line %s in %s' % (ls, survexblock.survexfile.path)
models.DataIssue.objects.create(parser='survex', message=message) models.DataIssue.objects.create(parser='survex', message=message)
lcompass = error lcompass = error
@ -67,9 +67,9 @@ def LoadSurvexLineLeg(survexblock, stardata, sline, comment, cave):
try: try:
survexleg.compass = float(lcompass) survexleg.compass = float(lcompass)
except ValueError: except ValueError:
print("! Compass misread in", survexblock.survexfile.path) print(("! Compass misread in", survexblock.survexfile.path))
print(" Stardata:", stardata) print((" Stardata:", stardata))
print(" Line:", ls) print((" Line:", ls))
message = ' ! Value Error: line %s in %s' % (ls, survexblock.survexfile.path) message = ' ! Value Error: line %s in %s' % (ls, survexblock.survexfile.path)
models.DataIssue.objects.create(parser='survex', message=message) models.DataIssue.objects.create(parser='survex', message=message)
survexleg.compass = 1000 survexleg.compass = 1000
@ -143,7 +143,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
global insp global insp
# uncomment to print out all files during parsing # uncomment to print out all files during parsing
print(insp+" - Reading file: " + survexblock.survexfile.path + " <> " + survexfile.path) print((insp+" - Reading file: " + survexblock.survexfile.path + " <> " + survexfile.path))
stamp = datetime.now() stamp = datetime.now()
lineno = 0 lineno = 0
@ -196,7 +196,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
# print(insp+' - Wallet ; ref - %s - found in survexscansfolders' % refscan) # print(insp+' - Wallet ; ref - %s - found in survexscansfolders' % refscan)
else: else:
message = ' ! Wallet ; ref - %s - NOT found in survexscansfolders %s-%s-%s' % (refscan,yr,letterx,wallet) message = ' ! Wallet ; ref - %s - NOT found in survexscansfolders %s-%s-%s' % (refscan,yr,letterx,wallet)
print(insp+message) print((insp+message))
models.DataIssue.objects.create(parser='survex', message=message) models.DataIssue.objects.create(parser='survex', message=message)
# This whole section should be moved if we can have *QM become a proper survex command # This whole section should be moved if we can have *QM become a proper survex command
@ -268,7 +268,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
# print(insp+' - Wallet *REF - %s - found in survexscansfolders' % refscan) # print(insp+' - Wallet *REF - %s - found in survexscansfolders' % refscan)
else: else:
message = ' ! Wallet *REF - %s - NOT found in survexscansfolders %s-%s-%s' % (refscan,yr,letterx,wallet) message = ' ! Wallet *REF - %s - NOT found in survexscansfolders %s-%s-%s' % (refscan,yr,letterx,wallet)
print(insp+message) print((insp+message))
models.DataIssue.objects.create(parser='survex', message=message) models.DataIssue.objects.create(parser='survex', message=message)
continue continue
@ -293,7 +293,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
cmd = cmd.lower() cmd = cmd.lower()
if re.match("include$(?i)", cmd): if re.match("include$(?i)", cmd):
includepath = os.path.join(os.path.split(survexfile.path)[0], re.sub(r"\.svx$", "", line)) includepath = os.path.join(os.path.split(survexfile.path)[0], re.sub(r"\.svx$", "", line))
print(insp+' - Include path found including - ' + includepath) print((insp+' - Include path found including - ' + includepath))
# Try to find the cave in the DB if not use the string as before # Try to find the cave in the DB if not use the string as before
path_match = re.search(r"caves-(\d\d\d\d)/(\d+|\d\d\d\d-?\w+-\d+)/", includepath) path_match = re.search(r"caves-(\d\d\d\d)/(\d+|\d\d\d\d-?\w+-\d+)/", includepath)
if path_match: if path_match:
@ -303,7 +303,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
if cave: if cave:
survexfile.cave = cave survexfile.cave = cave
else: else:
print(insp+' - No match in DB (i) for %s, so loading..' % includepath) print((insp+' - No match in DB (i) for %s, so loading..' % includepath))
includesurvexfile = models.SurvexFile(path=includepath) includesurvexfile = models.SurvexFile(path=includepath)
includesurvexfile.save() includesurvexfile.save()
includesurvexfile.SetDirectory() includesurvexfile.SetDirectory()
@ -326,10 +326,10 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
if cave: if cave:
survexfile.cave = cave survexfile.cave = cave
else: else:
print(insp+' - No match (b) for %s' % newsvxpath) print((insp+' - No match (b) for %s' % newsvxpath))
name = line.lower() name = line.lower()
print(insp+' - Begin found for: ' + name) print((insp+' - Begin found for: ' + name))
# print(insp+'Block cave: ' + str(survexfile.cave)) # print(insp+'Block cave: ' + str(survexfile.cave))
survexblockdown = models.SurvexBlock(name=name, begin_char=fin.tell(), parent=survexblock, survexpath=survexblock.survexpath+"."+name, cave=survexfile.cave, survexfile=survexfile, totalleglength=0.0) survexblockdown = models.SurvexBlock(name=name, begin_char=fin.tell(), parent=survexblock, survexpath=survexblock.survexpath+"."+name, cave=survexfile.cave, survexfile=survexfile, totalleglength=0.0)
survexblockdown.save() survexblockdown.save()
@ -420,7 +420,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
if cmd not in ["sd", "include", "units", "entrance", "data", "flags", "title", "export", "instrument", if cmd not in ["sd", "include", "units", "entrance", "data", "flags", "title", "export", "instrument",
"calibrate", "set", "infer", "alias", "cs", "declination", "case"]: "calibrate", "set", "infer", "alias", "cs", "declination", "case"]:
message = "! Bad svx command in line:%s %s %s %s" % (cmd, line, survexblock, survexblock.survexfile.path) message = "! Bad svx command in line:%s %s %s %s" % (cmd, line, survexblock, survexblock.survexfile.path)
print(insp+message) print((insp+message))
models.DataIssue.objects.create(parser='survex', message=message) models.DataIssue.objects.create(parser='survex', message=message)
endstamp = datetime.now() endstamp = datetime.now()
@ -482,7 +482,7 @@ def LoadPos():
If we don't have it in the database, print an error message and discard it. If we don't have it in the database, print an error message and discard it.
""" """
topdata = settings.SURVEX_DATA + settings.SURVEX_TOPNAME topdata = settings.SURVEX_DATA + settings.SURVEX_TOPNAME
print(' - Generating a list of Pos from %s.svx and then loading...' % (topdata)) print((' - Generating a list of Pos from %s.svx and then loading...' % (topdata)))
# Be careful with the cache file. # Be careful with the cache file.
# If LoadPos has been run before, # If LoadPos has been run before,
@ -498,39 +498,39 @@ def LoadPos():
updtsvx = os.path.getmtime(topdata + ".svx") updtsvx = os.path.getmtime(topdata + ".svx")
updtcache = os.path.getmtime(cachefile) updtcache = os.path.getmtime(cachefile)
age = updtcache - updtsvx age = updtcache - updtsvx
print(' svx: %s cache: %s not-found cache is fresher by: %s' % (updtsvx, updtcache, str(timedelta(seconds=age) ))) print((' svx: %s cache: %s not-found cache is fresher by: %s' % (updtsvx, updtcache, str(timedelta(seconds=age) ))))
now = time.time() now = time.time()
if now - updtcache > 3*24*60*60: if now - updtcache > 3*24*60*60:
print " cache is more than 3 days old. Deleting." print(" cache is more than 3 days old. Deleting.")
os.remove(cachefile) os.remove(cachefile)
elif age < 0 : elif age < 0 :
print " cache is stale. Deleting." print(" cache is stale. Deleting.")
os.remove(cachefile) os.remove(cachefile)
else: else:
print " cache is fresh. Reading..." print(" cache is fresh. Reading...")
try: try:
with open(cachefile, "r") as f: with open(cachefile, "r") as f:
for line in f: for line in f:
l = line.rstrip() l = line.rstrip()
if l in notfoundbefore: if l in notfoundbefore:
notfoundbefore[l] +=1 # should not be duplicates notfoundbefore[l] +=1 # should not be duplicates
print " DUPLICATE ", line, notfoundbefore[l] print(" DUPLICATE ", line, notfoundbefore[l])
else: else:
notfoundbefore[l] =1 notfoundbefore[l] =1
except: except:
print " FAILURE READ opening cache file %s" % (cachefile) print(" FAILURE READ opening cache file %s" % (cachefile))
raise raise
notfoundnow =[] notfoundnow =[]
found = 0 found = 0
skip = {} skip = {}
print "\n" # extra line because cavern overwrites the text buffer somehow print("\n") # extra line because cavern overwrites the text buffer somehow
# cavern defaults to using same cwd as supplied input file # cavern defaults to using same cwd as supplied input file
call([settings.CAVERN, "--output=%s.3d" % (topdata), "%s.svx" % (topdata)]) call([settings.CAVERN, "--output=%s.3d" % (topdata), "%s.svx" % (topdata)])
call([settings.THREEDTOPOS, '%s.3d' % (topdata)], cwd = settings.SURVEX_DATA) call([settings.THREEDTOPOS, '%s.3d' % (topdata)], cwd = settings.SURVEX_DATA)
print " - This next bit takes a while. Matching ~32,000 survey positions. Be patient..." print(" - This next bit takes a while. Matching ~32,000 survey positions. Be patient...")
posfile = open("%s.pos" % (topdata)) posfile = open("%s.pos" % (topdata))
posfile.readline() #Drop header posfile.readline() #Drop header
@ -550,7 +550,7 @@ def LoadPos():
found += 1 found += 1
except: except:
notfoundnow.append(name) notfoundnow.append(name)
print " - %s stations not found in lookup of SurvexStation.objects. %s found. %s skipped." % (len(notfoundnow),found, len(skip)) print(" - %s stations not found in lookup of SurvexStation.objects. %s found. %s skipped." % (len(notfoundnow),found, len(skip)))
if found > 10: # i.e. a previous cave import has been done if found > 10: # i.e. a previous cave import has been done
try: try:
@ -560,8 +560,8 @@ def LoadPos():
f.write("%s\n" % i) f.write("%s\n" % i)
for j in skip: for j in skip:
f.write("%s\n" % j) # NB skip not notfoundbefore f.write("%s\n" % j) # NB skip not notfoundbefore
print(' Not-found cache file written: %s entries' % c) print((' Not-found cache file written: %s entries' % c))
except: except:
print " FAILURE WRITE opening cache file %s" % (cachefile) print(" FAILURE WRITE opening cache file %s" % (cachefile))
raise raise

View File

@ -10,7 +10,7 @@ import csv
import re import re
import datetime import datetime
#from PIL import Image from PIL import Image
from utils import save_carefully from utils import save_carefully
from functools import reduce from functools import reduce
@ -82,14 +82,14 @@ def get_or_create_placeholder(year):
# logging.info("added survey " + survey[header['Year']] + "#" + surveyobj.wallet_number + "\r") # logging.info("added survey " + survey[header['Year']] + "#" + surveyobj.wallet_number + "\r")
# dead # dead
# def listdir(*directories): def listdir(*directories):
# try: try:
# return os.listdir(os.path.join(settings.SURVEYS, *directories)) return os.listdir(os.path.join(settings.SURVEYS, *directories))
# except: except:
# import urllib.request, urllib.parse, urllib.error import urllib.request, urllib.parse, urllib.error
# url = settings.SURVEYS + reduce(lambda x, y: x + "/" + y, ["listdir"] + list(directories)) url = settings.SURVEYS + reduce(lambda x, y: x + "/" + y, ["listdir"] + list(directories))
# folders = urllib.request.urlopen(url.replace("#", "%23")).readlines() folders = urllib.request.urlopen(url.replace("#", "%23")).readlines()
# return [folder.rstrip(r"/") for folder in folders] return [folder.rstrip(r"/") for folder in folders]
# add survey scans # add survey scans
# def parseSurveyScans(expedition, logfile=None): # def parseSurveyScans(expedition, logfile=None):
@ -252,7 +252,7 @@ def LoadListScans():
def FindTunnelScan(tunnelfile, path): def FindTunnelScan(tunnelfile, path):
scansfolder, scansfile = None, None scansfolder, scansfile = None, None
mscansdir = re.search(r"(\d\d\d\d#X?\d+\w?|1995-96kh|92-94Surveybookkh|1991surveybook|smkhs)/(.*?(?:png|jpg|pdf|jpeg))$", path) mscansdir = re.search(rb"(\d\d\d\d#X?\d+\w?|1995-96kh|92-94Surveybookkh|1991surveybook|smkhs)/(.*?(?:png|jpg|pdf|jpeg))$", path)
if mscansdir: if mscansdir:
scansfolderl = SurvexScansFolder.objects.filter(walletname=mscansdir.group(1)) scansfolderl = SurvexScansFolder.objects.filter(walletname=mscansdir.group(1))
if len(scansfolderl): if len(scansfolderl):
@ -273,7 +273,7 @@ def FindTunnelScan(tunnelfile, path):
if scansfile: if scansfile:
tunnelfile.survexscans.add(scansfile) tunnelfile.survexscans.add(scansfile)
elif path and not re.search(r"\.(?:png|jpg|pdf|jpeg)$(?i)", path): elif path and not re.search(rb"\.(?:png|jpg|pdf|jpeg)$(?i)", path):
name = os.path.split(path)[1] name = os.path.split(path)[1]
#print("debug-tunnelfileobjects ", tunnelfile.tunnelpath, path, name) #print("debug-tunnelfileobjects ", tunnelfile.tunnelpath, path, name)
rtunnelfilel = TunnelFile.objects.filter(tunnelname=name) rtunnelfilel = TunnelFile.objects.filter(tunnelname=name)
@ -295,16 +295,16 @@ def SetTunnelfileInfo(tunnelfile):
if tunnelfile.filesize <= 0: if tunnelfile.filesize <= 0:
print("DEBUG - zero length xml file", ff) print("DEBUG - zero length xml file", ff)
return return
mtype = re.search(r"<(fontcolours|sketch)", ttext) mtype = re.search(rb"<(fontcolours|sketch)", ttext)
assert mtype, ff assert mtype, ff
tunnelfile.bfontcolours = (mtype.group(1)=="fontcolours") tunnelfile.bfontcolours = (mtype.group(1)=="fontcolours")
tunnelfile.npaths = len(re.findall(r"<skpath", ttext)) tunnelfile.npaths = len(re.findall(rb"<skpath", ttext))
tunnelfile.save() tunnelfile.save()
# <tunnelxml tunnelversion="version2009-06-21 Matienzo" tunnelproject="ireby" tunneluser="goatchurch" tunneldate="2009-06-29 23:22:17"> # <tunnelxml tunnelversion="version2009-06-21 Matienzo" tunnelproject="ireby" tunneluser="goatchurch" tunneldate="2009-06-29 23:22:17">
# <pcarea area_signal="frame" sfscaledown="12.282584" sfrotatedeg="-90.76982" sfxtrans="11.676667377221136" sfytrans="-15.677173422877454" sfsketch="204description/scans/plan(38).png" sfstyle="" nodeconnzsetrelative="0.0"> # <pcarea area_signal="frame" sfscaledown="12.282584" sfrotatedeg="-90.76982" sfxtrans="11.676667377221136" sfytrans="-15.677173422877454" sfsketch="204description/scans/plan(38).png" sfstyle="" nodeconnzsetrelative="0.0">
for path, style in re.findall(r'<pcarea area_signal="frame".*?sfsketch="([^"]*)" sfstyle="([^"]*)"', ttext): for path, style in re.findall(rb'<pcarea area_signal="frame".*?sfsketch="([^"]*)" sfstyle="([^"]*)"', ttext):
FindTunnelScan(tunnelfile, path) FindTunnelScan(tunnelfile, path)
# should also scan and look for survex blocks that might have been included # should also scan and look for survex blocks that might have been included

View File

@ -1,10 +1,10 @@
#!/usr/bin/python #!/usr/bin/python
from settings import * from .settings import *
import sys import sys
import os import os
import string import string
import re import re
import urlparse import urllib.parse
import django import django
pathsdict={ pathsdict={
@ -52,9 +52,9 @@ sep2="\r\t\t\t\t\t\t\t" # ugh nasty - terminal output only
bycodes = sorted(pathsdict) bycodes = sorted(pathsdict)
for p in bycodes: for p in bycodes:
print p, sep , pathsdict[p] print(p, sep , pathsdict[p])
byvals = sorted(pathsdict, key=pathsdict.__getitem__) byvals = sorted(pathsdict, key=pathsdict.__getitem__)
for p in byvals: for p in byvals:
print pathsdict[p] , sep2, p print(pathsdict[p] , sep2, p)

View File

@ -135,7 +135,7 @@ def create_profile(request, form_class=None, success_url=None,
if extra_context is None: if extra_context is None:
extra_context = {} extra_context = {}
context = RequestContext(request) context = RequestContext(request)
for key, value in extra_context.items(): for key, value in list(extra_context.items()):
context[key] = callable(value) and value() or value context[key] = callable(value) and value() or value
return render_to_response(template_name, return render_to_response(template_name,
@ -226,7 +226,7 @@ def edit_profile(request, form_class=None, success_url=None,
if extra_context is None: if extra_context is None:
extra_context = {} extra_context = {}
context = RequestContext(request) context = RequestContext(request)
for key, value in extra_context.items(): for key, value in list(extra_context.items()):
context[key] = callable(value) and value() or value context[key] = callable(value) and value() or value
return render_to_response(template_name, return render_to_response(template_name,
@ -301,7 +301,7 @@ def profile_detail(request, username, public_profile_field=None,
if extra_context is None: if extra_context is None:
extra_context = {} extra_context = {}
context = RequestContext(request) context = RequestContext(request)
for key, value in extra_context.items(): for key, value in list(extra_context.items()):
context[key] = callable(value) and value() or value context[key] = callable(value) and value() or value
return render_to_response(template_name, return render_to_response(template_name,

View File

@ -1,8 +1,22 @@
from localsettings import * #inital localsettings call so that urljoins work
import os import os
import urlparse
import urllib.parse
import django import django
from localsettings import *
#inital localsettings call so that urljoins work
#Imports should be grouped in the following order:
#Standard library imports.
#Related third party imports.
#Local application/library specific imports.
#You should put a blank line between each group of imports.
print("** importing settings.py")
print("--**-- REPOS_ROOT_PATH: ", REPOS_ROOT_PATH)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...) # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__)) BASE_DIR = os.path.dirname(os.path.dirname(__file__))
@ -10,7 +24,7 @@ BASE_DIR = os.path.dirname(os.path.dirname(__file__))
DEBUG = True DEBUG = True
TEMPLATE_DEBUG = DEBUG TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = [u'expo.survex.com'] ALLOWED_HOSTS = ['expo.survex.com']
ADMINS = ( ADMINS = (
# ('Your Name', 'your_email@domain.com'), # ('Your Name', 'your_email@domain.com'),
@ -48,10 +62,10 @@ ADMIN_MEDIA_PREFIX = '/troggle/media-admin/'
CAVEDESCRIPTIONS = os.path.join(EXPOWEB, "cave_data") CAVEDESCRIPTIONS = os.path.join(EXPOWEB, "cave_data")
ENTRANCEDESCRIPTIONS = os.path.join(EXPOWEB, "entrance_data") ENTRANCEDESCRIPTIONS = os.path.join(EXPOWEB, "entrance_data")
MEDIA_URL = urlparse.urljoin(URL_ROOT , '/site_media/') MEDIA_URL = urllib.parse.urljoin(URL_ROOT , '/site_media/')
SURVEYS_URL = urlparse.urljoin(URL_ROOT , '/survey_scans/') SURVEYS_URL = urllib.parse.urljoin(URL_ROOT , '/survey_scans/')
PHOTOS_URL = urlparse.urljoin(URL_ROOT , '/photos/') PHOTOS_URL = urllib.parse.urljoin(URL_ROOT , '/photos/')
SVX_URL = urlparse.urljoin(URL_ROOT , '/survex/') SVX_URL = urllib.parse.urljoin(URL_ROOT , '/survex/')
# top-level survex file basename (without .svx) # top-level survex file basename (without .svx)
SURVEX_TOPNAME = "1623" SURVEX_TOPNAME = "1623"
@ -169,5 +183,5 @@ TINYMCE_COMPRESSOR = True
MAX_LOGBOOK_ENTRY_TITLE_LENGTH = 200 MAX_LOGBOOK_ENTRY_TITLE_LENGTH = 200
TEST_RUNNER = 'django.test.runner.DiscoverRunner' TEST_RUNNER = 'django.test.runner.DiscoverRunner'
from localsettings import *
from localsettings import * #localsettings needs to take precedence. Call it to override any existing vars. #localsettings needs to take precedence. Call it to override any existing vars.

10
urls.py
View File

@ -1,11 +1,11 @@
from django.conf.urls import * from django.conf.urls import *
from django.conf import settings from django.conf import settings
from core.views import * # flat import from .core.views import * # flat import
from core.views_other import * from .core.views_other import *
from core.views_caves import * from .core.views_caves import *
from core.views_survex import * from .core.views_survex import *
from core.models import * from .core.models import *
from django.views.generic.edit import UpdateView from django.views.generic.edit import UpdateView
from django.contrib import admin from django.contrib import admin
from django.views.generic.list import ListView from django.views.generic.list import ListView

View File

@ -46,7 +46,7 @@ def save_carefully(objectType, lookupAttribs={}, nonLookupAttribs={}):
instance, created=objectType.objects.get_or_create(defaults=nonLookupAttribs, **lookupAttribs) instance, created=objectType.objects.get_or_create(defaults=nonLookupAttribs, **lookupAttribs)
if not created and not instance.new_since_parsing: if not created and not instance.new_since_parsing:
for k, v in nonLookupAttribs.items(): #overwrite the existing attributes from the logbook text (except date and title) for k, v in list(nonLookupAttribs.items()): #overwrite the existing attributes from the logbook text (except date and title)
setattr(instance, k, v) setattr(instance, k, v)
instance.save() instance.save()
@ -111,8 +111,8 @@ re_subs = [(re.compile(r"\<b[^>]*\>(.*?)\</b\>", re.DOTALL), r"'''\1'''"),
] ]
def html_to_wiki(text, codec = "utf-8"): def html_to_wiki(text, codec = "utf-8"):
if type(text) == str: if isinstance(text, str):
text = unicode(text, codec) text = str(text, codec)
text = re.sub("</p>", r"", text) text = re.sub("</p>", r"", text)
text = re.sub("<p>$", r"", text) text = re.sub("<p>$", r"", text)
text = re.sub("<p>", r"\n\n", text) text = re.sub("<p>", r"\n\n", text)