Compare commits

...

28 Commits

Author SHA1 Message Date
Philip Sargent
37553da556 fix python2 python3 issues 2020-07-01 00:56:26 +01:00
Philip Sargent
8861e2e240 reset only up to before survex imports 2020-06-15 21:55:58 +01:00
Philip Sargent
09e9932711 Stop storing SurvexStations fixups 2020-06-15 19:48:53 +01:00
Philip Sargent
7fe34bedb8 Stop storing all SurvexStations 2020-06-15 18:09:59 +01:00
Philip Sargent
d134a58931 stopped storing survex legs 2020-06-12 00:34:53 +01:00
Philip Sargent
90a5524036 remove survexblks from 'reset' command 2020-06-11 18:48:09 +01:00
Philip Sargent
69f72184a6 print cave being imported 2020-06-06 20:23:45 +01:00
Philip Sargent
e0d8df0a79 remove unused import 2020-06-06 15:56:32 +01:00
Philip Sargent
15d4defe0e Fix to /caves/ != /caves 2020-06-05 23:08:53 +01:00
Philip Sargent
9052982089 Bugfix for capitalised filename extensions 2020-06-05 00:44:41 +01:00
Philip Sargent
0a35824b9c update svx template & fix CRLF & utf8 2020-06-02 23:22:30 +01:00
Philip Sargent
bc5c0b9e53 Chng troggle horizontal menu items & svx template 2020-06-02 23:04:13 +01:00
Philip Sargent
e873dedcf2 bugfixes done using _future_ 2020-06-01 02:18:25 +01:00
Philip Sargent
a0c5a34b3f Progress dots on importing data 2020-06-01 00:52:14 +01:00
Philip Sargent
6c3c70a02c Oops. Remove CSV download pages 2020-05-31 21:35:37 +01:00
Philip Sargent
43394facdf Delete SURVEYS.CSV code 2020-05-31 21:08:51 +01:00
Philip Sargent
d5b4a0b1d9 Troggle code documentation pointers 2020-05-31 20:50:49 +01:00
Philip Sargent
8feb1774bb Adding progress dots to import print output and fix SURVEY_SCANS 2020-05-31 20:50:15 +01:00
Philip Sargent
d55a58bfc8 Reducing input print output 2020-05-31 20:48:18 +01:00
Philip Sargent
fffb083aee fix SURVEY_SCANS to include target folder fully 2020-05-31 16:34:46 +01:00
Philip Sargent
b9aa447cac fix dup profile printing 2020-05-30 21:13:53 +01:00
Philip Sargent
932b1a2ae3 fixups for cherry pick bugfile in logbook parser 2020-05-30 21:13:13 +01:00
Philip Sargent
367854c9a6 bug fix in logbook parser 2020-05-30 20:54:36 +01:00
Philip Sargent
c76aed3bf6 delete duplication 2020-05-29 00:30:43 +01:00
Philip Sargent
079f528963 cleaning options list 2020-05-29 00:24:53 +01:00
Philip Sargent
972e6f3a95 remove old imagekit files 2020-05-29 00:04:09 +01:00
Expo on server
7af6c3cb9c Allow being unable to open local LOGFILE. 2020-05-26 00:54:41 +01:00
Expo on server
501a5122d8 Add check to avoid running databaseReset as root accidentally 2020-05-26 00:47:01 +01:00
28 changed files with 513 additions and 1002 deletions

27
README/index.html Normal file
View File

@ -0,0 +1,27 @@
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>Troggle - Coding Documentation</title>
<link rel="stylesheet" type="text/css" href="..media/css/main2.css" />
</head>
<body>
<h1>Troggle Code - README</h1>
<h2>Contents of README.txt file</h2>
<iframe name="erriframe" width="90%" height="45%"
src="../readme.txt" frameborder="1" ></iframe>
<h2>Troggle documentation in the Expo Handbook</h2>
<ul>
<li><a href="http://expo.survex.com/handbook/troggle/trogintro.html">Intro</a>
<li><a href="http://expo.survex.com/handbook/troggle/trogmanual.html">Troggle manual</a>
<li><a href="http://expo.survex.com/handbook/troggle/trogarch.html">Troggle data model</a>
<li><a href="http://expo.survex.com/handbook/troggle/trogimport.html">Troggle importing data</a>
<li><a href="http://expo.survex.com/handbook/troggle/trogdesign.html">Troggle design decisions</a>
<li><a href="http://expo.survex.com/handbook/troggle/trogdesignx.html">Troggle future architectures</a>
<li><a href="http://expo.survex.com/handbook/troggle/trogsimpler.html">a kinder simpler Troggle?</a>
</ul>
<hr />
</body></html>

View File

@ -1,187 +1,33 @@
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from troggle.core.models import Cave
import settings
import os
from optparse import make_option
from django.db import connection
from django.core import management
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from troggle.core.models import Cave, Entrance
import troggle.flatpages.models
import settings
"""Pretty much all of this is now replaced by databaseRest.py
I don't know why this still exists
I don't know why this still exists. Needs testing to see if
removing it makes django misbehave.
"""
databasename=settings.DATABASES['default']['NAME']
expouser=settings.EXPOUSER
expouserpass=settings.EXPOUSERPASS
expouseremail=settings.EXPOUSER_EMAIL
class Command(BaseCommand):
help = 'This is normal usage, clear database and reread everything'
help = 'Removed as redundant - use databaseReset.py'
option_list = BaseCommand.option_list + (
make_option('--reset',
action='store_true',
dest='reset',
default=False,
help='Reset the entier DB from files'),
help='Removed as redundant'),
)
def handle(self, *args, **options):
print(args)
print(options)
if "desc" in args:
self.resetdesc()
elif "scans" in args:
self.import_surveyscans()
elif "caves" in args:
self.reload_db()
self.make_dirs()
self.pageredirects()
self.import_caves()
elif "people" in args:
self.import_people()
elif "QMs" in args:
self.import_QMs()
elif "tunnel" in args:
self.import_tunnelfiles()
elif options['reset']:
self.reset(self)
elif "survex" in args:
self.import_survex()
elif "survexpos" in args:
import parsers.survex
parsers.survex.LoadPos()
elif "logbooks" in args:
self.import_logbooks()
elif "autologbooks" in args:
self.import_auto_logbooks()
elif "dumplogbooks" in args:
self.dumplogbooks()
elif "writeCaves" in args:
self.writeCaves()
elif options['foo']:
self.stdout.write(self.style.WARNING('Tesing....'))
else:
#self.stdout.write("%s not recognised" % args)
#self.usage(options)
self.stdout.write("poo")
#print(args)
def reload_db(obj):
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3':
try:
os.remove(databasename)
except OSError:
pass
else:
cursor = connection.cursor()
cursor.execute("DROP DATABASE %s" % databasename)
cursor.execute("CREATE DATABASE %s" % databasename)
cursor.execute("ALTER DATABASE %s CHARACTER SET=utf8" % databasename)
cursor.execute("USE %s" % databasename)
management.call_command('migrate', interactive=False)
# management.call_command('syncdb', interactive=False)
user = User.objects.create_user(expouser, expouseremail, expouserpass)
user.is_staff = True
user.is_superuser = True
user.save()
def make_dirs(obj):
"""Make directories that troggle requires"""
pass
# should also deal with permissions here.
#if not os.path.isdir(settings.PHOTOS_ROOT):
#os.mkdir(settings.PHOTOS_ROOT)
def import_caves(obj):
import parsers.caves
print("Importing Caves")
parsers.caves.readcaves()
def import_people(obj):
import parsers.people
parsers.people.LoadPersonsExpos()
def import_logbooks(obj):
# The below line was causing errors I didn't understand (it said LOGFILE was a string), and I couldn't be bothered to figure
# what was going on so I just catch the error with a try. - AC 21 May
try:
settings.LOGFILE.write('\nBegun importing logbooks at ' + time.asctime() + '\n' + '-' * 60)
except:
pass
import parsers.logbooks
parsers.logbooks.LoadLogbooks()
def import_survex(obj):
import parsers.survex
parsers.survex.LoadAllSurvexBlocks()
parsers.survex.LoadPos()
def import_QMs(obj):
import parsers.QMs
def import_surveys(obj):
import parsers.surveys
parsers.surveys.parseSurveys(logfile=settings.LOGFILE)
def import_surveyscans(obj):
import parsers.surveys
parsers.surveys.LoadListScans()
def import_tunnelfiles(obj):
import parsers.surveys
parsers.surveys.LoadTunnelFiles()
def reset(self, mgmt_obj):
""" Wipe the troggle database and import everything from legacy data
"""
self.reload_db()
self.make_dirs()
self.pageredirects()
self.import_caves()
self.import_people()
self.import_surveyscans()
self.import_survex()
self.import_logbooks()
self.import_QMs()
try:
self.import_tunnelfiles()
except:
print("Tunnel files parser broken.")
self.import_surveys()
def pageredirects(obj):
for oldURL, newURL in [("indxal.htm", reverse("caveindex"))]:
f = troggle.flatpages.models.Redirect(originalURL=oldURL, newURL=newURL)
f.save()
def writeCaves(obj):
for cave in Cave.objects.all():
cave.writeDataFile()
for entrance in Entrance.objects.all():
entrance.writeDataFile()
def troggle_usage(obj):
print("""Usage is 'manage.py reset_db <command>'
where command is:
reset - this is normal usage, clear database and reread everything
desc
caves - read in the caves
logbooks - read in the logbooks
autologbooks
dumplogbooks
people
QMs - read in the QM files
resetend
scans - read in the scanned surveynotes
survex - read in the survex files
survexpos
tunnel - read in the Tunnel files
writeCaves
""")

View File

@ -39,10 +39,8 @@ try:
filename=settings.LOGFILE,
filemode='w')
except:
subprocess.call(settings.FIX_PERMISSIONS)
logging.basicConfig(level=logging.DEBUG,
filename=settings.LOGFILE,
filemode='w')
# Opening of file for writing is going to fail currently, so decide it doesn't matter for now
pass
#This class is for adding fields and methods which all of our models will have.
class TroggleModel(models.Model):

View File

@ -147,7 +147,7 @@ class SurvexBlock(models.Model):
return ssl[0]
#print name
ss = SurvexStation(name=name, block=self)
ss.save()
#ss.save()
return ss
def DayIndex(self):

28
core/views_caves.py Normal file → Executable file
View File

@ -9,20 +9,44 @@ import troggle.core.models as models
import troggle.settings as settings
from troggle.helper import login_required_if_public
from PIL import Image, ImageDraw, ImageFont
from django.forms.models import modelformset_factory
from django import forms
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.conf import settings
import re
import os
import urlparse
#import urllib.parse
from django.shortcuts import get_object_or_404, render
import settings
from PIL import Image, ImageDraw, ImageFont
import string, os, sys, subprocess
class MapLocations(object):
p = [
("laser.0_7", "BNase", "Reference", "Br&auml;uning Nase laser point"),
("226-96", "BZkn", "Reference", "Br&auml;uning Zinken trig point"),
("vd1","VD1","Reference", "VD1 survey point"),
("laser.kt114_96","HSK","Reference", "Hinterer Schwarzmooskogel trig point"),
("2000","Nipple","Reference", "Nipple (Wei&szlig;e Warze)"),
("3000","VSK","Reference", "Vorderer Schwarzmooskogel summit"),
("topcamp", "OTC", "Reference", "Old Top Camp"),
("laser.0", "LSR0", "Reference", "Laser Point 0"),
("laser.0_1", "LSR1", "Reference", "Laser Point 0/1"),
("laser.0_3", "LSR3", "Reference", "Laser Point 0/3"),
("laser.0_5", "LSR5", "Reference", "Laser Point 0/5"),
("225-96", "BAlm", "Reference", "Br&auml;uning Alm trig point")
]
def points(self):
for ent in Entrance.objects.all():
if ent.best_station():
areaName = ent.caveandentrance_set.all()[0].cave.getArea().short_name
self.p.append((ent.best_station(), "%s-%s" % (areaName, str(ent)[5:]), ent.needs_surface_work(), str(ent)))
return self.p
def __str__(self):
return "{} map locations".format(len(self.p))
def getCave(cave_id):
"""Returns a cave object when given a cave name or number. It is used by views including cavehref, ent, and qm."""

37
core/views_logbooks.py Normal file → Executable file
View File

@ -218,20 +218,41 @@ def pathsreport(request):
def experimental(request):
blockroots = models.SurvexBlock.objects.filter(name="root")
if len(blockroots)>1:
print(" ! more than one root survexblock {}".format(len(blockroots)))
for sbr in blockroots:
print("{} {} {} {}".format(sbr.id, sbr.name, sbr.text, sbr.date))
sbr = blockroots[0]
totalsurvexlength = sbr.totalleglength
try:
nimportlegs = int(sbr.text)
except:
print("{} {} {} {}".format(sbr.id, sbr.name, sbr.text, sbr.date))
nimportlegs = -1
legsbyexpo = [ ]
addupsurvexlength = 0
for expedition in Expedition.objects.all():
survexblocks = expedition.survexblock_set.all()
survexlegs = [ ]
#survexlegs = [ ]
legsyear=0
survexleglength = 0.0
for survexblock in survexblocks:
survexlegs.extend(survexblock.survexleg_set.all())
#survexlegs.extend(survexblock.survexleg_set.all())
survexleglength += survexblock.totalleglength
legsbyexpo.append((expedition, {"nsurvexlegs":len(survexlegs), "survexleglength":survexleglength}))
legsbyexpo.reverse()
survexlegs = models.SurvexLeg.objects.all()
totalsurvexlength = sum([survexleg.tape for survexleg in survexlegs])
return render(request, 'experimental.html', { "nsurvexlegs":len(survexlegs), "totalsurvexlength":totalsurvexlength, "legsbyexpo":legsbyexpo })
try:
legsyear += int(survexblock.text)
except:
pass
addupsurvexlength += survexleglength
legsbyexpo.append((expedition, {"nsurvexlegs":legsyear, "survexleglength":survexleglength}))
legsbyexpo.reverse()
#removing survexleg objects completely
#survexlegs = models.SurvexLeg.objects.all()
#totalsurvexlength = sum([survexleg.tape for survexleg in survexlegs])
return render(request, 'experimental.html', { "nsurvexlegs":nimportlegs, "totalsurvexlength":totalsurvexlength, "addupsurvexlength":addupsurvexlength, "legsbyexpo":legsbyexpo })
@login_required_if_public
def newLogbookEntry(request, expeditionyear, pdate = None, pslug = None):

View File

@ -71,20 +71,6 @@ def controlPanel(request):
return render(request,'controlPanel.html', {'caves':Cave.objects.all(),'expeditions':Expedition.objects.all(),'jobs_completed':jobs_completed})
def downloadCavetab(request):
from export import tocavetab
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=CAVETAB2.CSV'
tocavetab.writeCaveTab(response)
return response
def downloadSurveys(request):
from export import tosurveys
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=Surveys.csv'
tosurveys.writeCaveTab(response)
return response
def downloadLogbook(request,year=None,extension=None,queryset=None):
if year:

136
core/views_survex.py Normal file → Executable file
View File

@ -15,47 +15,76 @@ from parsers.people import GetPersonExpeditionNameLookup
import troggle.settings as settings
import parsers.survex
survextemplatefile = """; Locn: Totes Gebirge, Austria - Loser/Augst-Eck Plateau (kataster group 1623)
; Cave:
survextemplatefile = """; *** THIS IS A TEMPLATE FILE NOT WHAT YOU MIGHT BE EXPECTING ***
*** DO NOT SAVE THIS FILE WITHOUT RENAMING IT !! ***
;[Stuff in square brackets is example text to be replaced with real data,
; removing the square brackets]
*begin [surveyname]
*export [connecting stations]
; stations linked into other surveys (or likely to)
*export [1 8 12 34]
*title "area title"
*date 2999.99.99
*team Insts [Caver]
*team Insts [Caver]
*team Notes [Caver]
*instrument [set number]
; Cave:
; Area in cave/QM:
*title ""
*date [2040.07.04] ; <-- CHANGE THIS DATE
*team Insts [Fred Fossa]
*team Notes [Brenda Badger]
*team Pics [Luke Lynx]
*team Tape [Albert Aadvark]
*instrument [SAP #+Laser Tape/DistoX/Compass # ; Clino #]
; Calibration: [Where, readings]
*ref [2040#00] ; <-- CHANGE THIS TOO
; the #number is on the clear pocket containing the original notes
;ref.: 2009#NN
; if using a tape:
*calibrate tape +0.0 ; +ve if tape was too short, -ve if too long
*calibrate tape +0.0 ; +ve if tape was too short, -ve if too long
; Centreline data
*data normal from to length bearing gradient ignoreall
[ 1 2 5.57 034.5 -12.8 ]
*data normal from to tape compass clino
1 2 3.90 298 -20
;-----------
;recorded station details (leave commented out)
;(NP=Nail Polish, LHW/RHW=Left/Right Hand Wall)
;Station Left Right Up Down Description
;[Red] nail varnish markings
[;1 0.8 0 5.3 1.6 ; NP on boulder. pt 23 on foo survey ]
[;2 0.3 1.2 6 1.2 ; NP '2' LHW ]
[;3 1.3 0 3.4 0.2 ; Rock on floor - not refindable ]
*data passage station left right up down ignoreall
1 [L] [R] [U] [D] comment
*end [surveyname]"""
def ReplaceTabs(stext):
res = [ ]
nsl = 0
for s in re.split("(\t|\n)", stext):
if s == "\t":
res.append(" " * (4 - (nsl % 4)))
nsl = 0
continue
if s == "\n":
nsl = 0
else:
nsl += len(s)
res.append(s)
return "".join(res)
;LRUDs arranged into passage tubes
;new *data command for each 'passage',
;repeat stations and adjust numbers as needed
*data passage station left right up down
;[ 1 0.8 0 5.3 1.6 ]
;[ 2 0.3 1.2 6 1.2 ]
*data passage station left right up down
;[ 1 1.3 1.5 5.3 1.6 ]
;[ 3 2.4 0 3.4 0.2 ]
;-----------
;Question Mark List ;(leave commented-out)
; The nearest-station is the name of the survey and station which are nearest to
; the QM. The resolution-station is either '-' to indicate that the QM hasn't
; been checked; or the name of the survey and station which push that QM. If a
; QM doesn't go anywhere, set the resolution-station to be the same as the
; nearest-station. Include any relevant details of how to find or push the QM in
; the textual description.
;Serial number grade(A/B/C/X) nearest-station resolution-station description
;[ QM1 A surveyname.3 - description of QM ]
;[ QM2 B surveyname.5 - description of QM ]
;------------
;Cave description ;(leave commented-out)
;freeform text describing this section of the cave
*end [surveyname]
"""
class SvxForm(forms.Form):
@ -63,15 +92,14 @@ class SvxForm(forms.Form):
filename = forms.CharField(widget=forms.TextInput(attrs={"readonly":True}))
datetime = forms.DateTimeField(widget=forms.TextInput(attrs={"readonly":True}))
outputtype = forms.CharField(widget=forms.TextInput(attrs={"readonly":True}))
code = forms.CharField(widget=forms.Textarea(attrs={"cols":150, "rows":18}))
code = forms.CharField(widget=forms.Textarea(attrs={"cols":150, "rows":36}))
def GetDiscCode(self):
fname = settings.SURVEX_DATA + self.data['filename'] + ".svx"
if not os.path.isfile(fname):
return survextemplatefile
fin = open(fname, "rb")
svxtext = fin.read().decode("latin1") # unicode(a, "latin1")
svxtext = ReplaceTabs(svxtext).strip()
fin = open(fname, "rt")
svxtext = fin.read().encode("utf8")
fin.close()
return svxtext
@ -84,19 +112,28 @@ class SvxForm(forms.Form):
def SaveCode(self, rcode):
fname = settings.SURVEX_DATA + self.data['filename'] + ".svx"
if not os.path.isfile(fname):
# only save if appears valid
if re.search(r"\[|\]", rcode):
return "Error: clean up all []s from the text"
return "Error: remove all []s from the text. They are only template guidance."
mbeginend = re.search(r"(?s)\*begin\s+(\w+).*?\*end\s+(\w+)", rcode)
if not mbeginend:
return "Error: no begin/end block here"
if mbeginend.group(1) != mbeginend.group(2):
return "Error: mismatching beginend"
fout = open(fname, "w")
res = fout.write(rcode.encode("latin1"))
return "Error: mismatching begin/end labels"
# Make this create new survex folders if needed
try:
fout = open(fname, "wb")
except IOError:
pth = os.path.dirname(self.data['filename'])
newpath = os.path.join(settings.SURVEX_DATA, pth)
if not os.path.exists(newpath):
os.makedirs(newpath)
fout = open(fname, "wb")
# javascript seems to insert CRLF on WSL1 whatever you say. So fix that:
res = fout.write(rcode.replace("\r",""))
fout.close()
return "SAVED"
return "SAVED ."
def Process(self):
print("....\n\n\n....Processing\n\n\n")
@ -104,7 +141,7 @@ class SvxForm(forms.Form):
os.chdir(os.path.split(settings.SURVEX_DATA + self.data['filename'])[0])
os.system(settings.CAVERN + " --log " + settings.SURVEX_DATA + self.data['filename'] + ".svx")
os.chdir(cwd)
fin = open(settings.SURVEX_DATA + self.data['filename'] + ".log", "rb")
fin = open(settings.SURVEX_DATA + self.data['filename'] + ".log", "rt")
log = fin.read()
fin.close()
log = re.sub("(?s).*?(Survey contains)", "\\1", log)
@ -144,7 +181,6 @@ def svx(request, survex_file):
form.data['code'] = rcode
if "save" in rform.data:
if request.user.is_authenticated():
#print("sssavvving")
message = form.SaveCode(rcode)
else:
message = "You do not have authority to save this file"
@ -179,7 +215,7 @@ def svx(request, survex_file):
return render_to_response('svxfile.html', vmap)
def svxraw(request, survex_file):
svx = open(os.path.join(settings.SURVEX_DATA, survex_file+".svx"), "rb")
svx = open(os.path.join(settings.SURVEX_DATA, survex_file+".svx"), "rt",encoding='utf8')
return HttpResponse(svx, content_type="text")
@ -194,20 +230,20 @@ def process(survex_file):
def threed(request, survex_file):
process(survex_file)
try:
threed = open(settings.SURVEX_DATA + survex_file + ".3d", "rb")
threed = open(settings.SURVEX_DATA + survex_file + ".3d", "rt",encoding='utf8')
return HttpResponse(threed, content_type="model/3d")
except:
log = open(settings.SURVEX_DATA + survex_file + ".log", "rb")
log = open(settings.SURVEX_DATA + survex_file + ".log", "rt",encoding='utf8')
return HttpResponse(log, content_type="text")
def log(request, survex_file):
process(survex_file)
log = open(settings.SURVEX_DATA + survex_file + ".log", "rb")
log = open(settings.SURVEX_DATA + survex_file + ".log", "rt",encoding='utf8')
return HttpResponse(log, content_type="text")
def err(request, survex_file):
process(survex_file)
err = open(settings.SURVEX_DATA + survex_file + ".err", "rb")
err = open(settings.SURVEX_DATA + survex_file + ".err", "rt",encoding='utf8')
return HttpResponse(err, content_type="text")

53
databaseReset.py Normal file → Executable file
View File

@ -6,6 +6,10 @@ import timeit
import json
import settings
if os.geteuid() == 0:
print("This script should be run as expo not root - quitting")
exit()
os.environ['PYTHONPATH'] = settings.PYTHON_PATH
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
@ -61,9 +65,9 @@ def dirsredirect():
#should also deal with permissions here.
#if not os.path.isdir(settings.PHOTOS_ROOT):
#os.mkdir(settings.PHOTOS_ROOT)
for oldURL, newURL in [("indxal.htm", reverse("caveindex"))]:
f = troggle.flatpages.models.Redirect(originalURL = oldURL, newURL = newURL)
f.save()
# for oldURL, newURL in [("indxal.htm", reverse("caveindex"))]:
# f = troggle.flatpages.models.Redirect(originalURL = oldURL, newURL = newURL)
# f.save()
def import_caves():
import troggle.parsers.caves
@ -91,7 +95,7 @@ def import_survexblks():
troggle.parsers.survex.LoadAllSurvexBlocks()
def import_survexpos():
import troggle.parsers.survex
import troggle.parsers.survex
print("Importing Survex x/y/z Positions")
troggle.parsers.survex.LoadPos()
@ -118,6 +122,14 @@ def import_tunnelfiles():
#import logbooksdump
#def import_auto_logbooks():
#def dumplogbooks():
#def writeCaves():
# Writes out all cave and entrance HTML files to
# folder specified in settings.CAVEDESCRIPTIONS
# for cave in Cave.objects.all():
# cave.writeDataFile()
# for entrance in Entrance.objects.all():
# entrance.writeDataFile()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class JobQueue():
@ -130,8 +142,8 @@ class JobQueue():
self.results = {}
self.results_order=[
"date","runlabel","reinit", "caves", "people",
"logbooks", "QMs", "survexblks", "survexpos",
"tunnel", "scans", "surveyimgs", "test", "dirsredirect", "syncuser" ]
"logbooks", "QMs", "scans", "survexblks", "survexpos",
"tunnel", "surveyimgs", "test", "dirsredirect", "syncuser" ]
for k in self.results_order:
self.results[k]=[]
self.tfile = "import_profile.json"
@ -309,6 +321,8 @@ class JobQueue():
for k in self.results_order:
if k =="dirsredirect":
break
if k =="surveyimgs":
break
elif k =="syncuser":
break
elif k =="test":
@ -360,17 +374,15 @@ def usage():
profile - print the profile from previous runs. Import nothing.
reset - normal usage: clear database and reread everything from files - time-consuming
caves - read in the caves
caves - read in the caves (must run first after reset)
people - read in the people from folk.csv (must run before logbooks)
logbooks - read in the logbooks
people - read in the people from folk.csv
QMs - read in the QM csv files (older caves only)
scans - the survey scans in all the wallets
scans - the survey scans in all the wallets (must run before survex)
survex - read in the survex files - all the survex blocks but not the x/y/z positions
survexpos - just the x/y/z Pos out of the survex files
survexall - both survex and survexpos
survexpos - set the x/y/z positions for entrances and fixed points
tunnel - read in the Tunnel files - which scans the survey scans too
drawings - Tunnel, QMs, scans
reinit - clear database (delete everything) and make empty tables. Import nothing.
syncuser - needed after reloading database from SQL backup
@ -384,6 +396,8 @@ def usage():
caves and logbooks must be run on an empty db before the others as they
set up db tables used by the others.
the in-memory phase is on an empty db, so always runs reinit, caves & people for this phase
""")
if __name__ == "__main__":
@ -413,8 +427,6 @@ if __name__ == "__main__":
jq.enq("people",import_people)
elif "QMs" in sys.argv:
jq.enq("QMs",import_QMs)
elif "reinit" in sys.argv:
jq.enq("reinit",reinit_db)
elif "reset" in sys.argv:
jq.enq("reinit",reinit_db)
jq.enq("dirsredirect",dirsredirect)
@ -423,9 +435,9 @@ if __name__ == "__main__":
jq.enq("scans",import_surveyscans)
jq.enq("logbooks",import_logbooks)
jq.enq("QMs",import_QMs)
jq.enq("survexblks",import_survexblks)
jq.enq("survexpos",import_survexpos)
jq.enq("tunnel",import_tunnelfiles)
#jq.enq("survexblks",import_survexblks)
#jq.enq("survexpos",import_survexpos)
elif "scans" in sys.argv:
jq.enq("scans",import_surveyscans)
elif "survex" in sys.argv:
@ -434,19 +446,14 @@ if __name__ == "__main__":
jq.enq("survexpos",import_survexpos)
elif "tunnel" in sys.argv:
jq.enq("tunnel",import_tunnelfiles)
elif "survexall" in sys.argv:
jq.enq("survexblks",import_survexblks)
jq.enq("survexpos",import_survexpos)
elif "drawings" in sys.argv:
jq.enq("QMs",import_QMs)
jq.enq("scans",import_surveyscans)
jq.enq("tunnel",import_tunnelfiles)
elif "surveyimgs" in sys.argv:
jq.enq("surveyimgs",import_surveyimgs) # imports into tables which are never read
elif "autologbooks" in sys.argv: # untested in 2020
import_auto_logbooks()
elif "dumplogbooks" in sys.argv: # untested in 2020
dumplogbooks()
# elif "writecaves" in sys.argv: # untested in 2020 - will overwrite input files!!
# writeCaves()
elif "profile" in sys.argv:
jq.loadprofiles()
jq.showprofile()

View File

@ -1,38 +0,0 @@
from django.db.models.loading import cache
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from imagekit.models import ImageModel
from imagekit.specs import ImageSpec
class Command(BaseCommand):
help = ('Clears all ImageKit cached files.')
args = '[apps]'
requires_model_validation = True
can_import_settings = True
def handle(self, *args, **options):
return flush_cache(args, options)
def flush_cache(apps, options):
""" Clears the image cache
"""
apps = [a.strip(',') for a in apps]
if apps:
print 'Flushing cache for %s...' % ', '.join(apps)
else:
print 'Flushing caches...'
for app_label in apps:
app = cache.get_app(app_label)
models = [m for m in cache.get_models(app) if issubclass(m, ImageModel)]
for model in models:
for obj in model.objects.all():
for spec in model._ik.specs:
prop = getattr(obj, spec.name(), None)
if prop is not None:
prop._delete()
if spec.pre_cache:
prop._create()

View File

@ -1,136 +0,0 @@
import os
from datetime import datetime
from django.conf import settings
from django.core.files.base import ContentFile
from django.db import models
from django.db.models.base import ModelBase
from django.utils.translation import ugettext_lazy as _
from imagekit import specs
from imagekit.lib import *
from imagekit.options import Options
from imagekit.utils import img_to_fobj
# Modify image file buffer size.
ImageFile.MAXBLOCK = getattr(settings, 'PIL_IMAGEFILE_MAXBLOCK', 256 * 2 ** 10)
# Choice tuples for specifying the crop origin.
# These are provided for convenience.
CROP_HORZ_CHOICES = (
(0, _('left')),
(1, _('center')),
(2, _('right')),
)
CROP_VERT_CHOICES = (
(0, _('top')),
(1, _('center')),
(2, _('bottom')),
)
class ImageModelBase(ModelBase):
""" ImageModel metaclass
This metaclass parses IKOptions and loads the specified specification
module.
"""
def __init__(cls, name, bases, attrs):
parents = [b for b in bases if isinstance(b, ImageModelBase)]
if not parents:
return
user_opts = getattr(cls, 'IKOptions', None)
opts = Options(user_opts)
try:
module = __import__(opts.spec_module, {}, {}, [''])
except ImportError:
raise ImportError('Unable to load imagekit config module: %s' % \
opts.spec_module)
for spec in [spec for spec in module.__dict__.values() \
if isinstance(spec, type) \
and issubclass(spec, specs.ImageSpec) \
and spec != specs.ImageSpec]:
setattr(cls, spec.name(), specs.Descriptor(spec))
opts.specs.append(spec)
setattr(cls, '_ik', opts)
class ImageModel(models.Model):
""" Abstract base class implementing all core ImageKit functionality
Subclasses of ImageModel are augmented with accessors for each defined
image specification and can override the inner IKOptions class to customize
storage locations and other options.
"""
__metaclass__ = ImageModelBase
class Meta:
abstract = True
class IKOptions:
pass
def admin_thumbnail_view(self):
if not self._imgfield:
return None
prop = getattr(self, self._ik.admin_thumbnail_spec, None)
if prop is None:
return 'An "%s" image spec has not been defined.' % \
self._ik.admin_thumbnail_spec
else:
if hasattr(self, 'get_absolute_url'):
return u'<a href="%s"><img src="%s"></a>' % \
(self.get_absolute_url(), prop.url)
else:
return u'<a href="%s"><img src="%s"></a>' % \
(self._imgfield.url, prop.url)
admin_thumbnail_view.short_description = _('Thumbnail')
admin_thumbnail_view.allow_tags = True
@property
def _imgfield(self):
return getattr(self, self._ik.image_field)
def _clear_cache(self):
for spec in self._ik.specs:
prop = getattr(self, spec.name())
prop._delete()
def _pre_cache(self):
for spec in self._ik.specs:
if spec.pre_cache:
prop = getattr(self, spec.name())
prop._create()
def save(self, clear_cache=True, *args, **kwargs):
is_new_object = self._get_pk_val is None
super(ImageModel, self).save(*args, **kwargs)
if is_new_object:
clear_cache = False
spec = self._ik.preprocessor_spec
if spec is not None:
newfile = self._imgfield.storage.open(str(self._imgfield))
img = Image.open(newfile)
img = spec.process(img, None)
format = img.format or 'JPEG'
if format != 'JPEG':
imgfile = img_to_fobj(img, format)
else:
imgfile = img_to_fobj(img, format,
quality=int(spec.quality),
optimize=True)
content = ContentFile(imgfile.read())
newfile.close()
name = str(self._imgfield)
self._imgfield.storage.delete(name)
self._imgfield.storage.save(name, content)
if clear_cache and self._imgfield != '':
self._clear_cache()
self._pre_cache()
def delete(self):
assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." % (self._meta.object_name, self._meta.pk.attname)
self._clear_cache()
models.Model.delete(self)

View File

@ -1,23 +0,0 @@
# Imagekit options
from imagekit import processors
from imagekit.specs import ImageSpec
class Options(object):
""" Class handling per-model imagekit options
"""
image_field = 'image'
crop_horz_field = 'crop_horz'
crop_vert_field = 'crop_vert'
preprocessor_spec = None
cache_dir = 'cache'
save_count_as = None
cache_filename_format = "%(filename)s_%(specname)s.%(extension)s"
admin_thumbnail_spec = 'admin_thumbnail'
spec_module = 'imagekit.defaults'
def __init__(self, opts):
for key, value in opts.__dict__.iteritems():
setattr(self, key, value)
self.specs = []

View File

@ -1,119 +0,0 @@
""" ImageKit image specifications
All imagekit specifications must inherit from the ImageSpec class. Models
inheriting from ImageModel will be modified with a descriptor/accessor for each
spec found.
"""
import os
from StringIO import StringIO
from imagekit.lib import *
from imagekit.utils import img_to_fobj
from django.core.files.base import ContentFile
class ImageSpec(object):
pre_cache = False
quality = 70
increment_count = False
processors = []
@classmethod
def name(cls):
return getattr(cls, 'access_as', cls.__name__.lower())
@classmethod
def process(cls, image, obj):
processed_image = image.copy()
for proc in cls.processors:
processed_image = proc.process(processed_image, obj)
return processed_image
class Accessor(object):
def __init__(self, obj, spec):
self._img = None
self._obj = obj
self.spec = spec
def _get_imgfile(self):
format = self._img.format or 'JPEG'
if format != 'JPEG':
imgfile = img_to_fobj(self._img, format)
else:
imgfile = img_to_fobj(self._img, format,
quality=int(self.spec.quality),
optimize=True)
return imgfile
def _create(self):
if self._exists():
return
# process the original image file
fp = self._obj._imgfield.storage.open(self._obj._imgfield.name)
fp.seek(0)
fp = StringIO(fp.read())
try:
self._img = self.spec.process(Image.open(fp), self._obj)
# save the new image to the cache
content = ContentFile(self._get_imgfile().read())
self._obj._imgfield.storage.save(self.name, content)
except IOError:
pass
def _delete(self):
self._obj._imgfield.storage.delete(self.name)
def _exists(self):
return self._obj._imgfield.storage.exists(self.name)
def _basename(self):
filename, extension = \
os.path.splitext(os.path.basename(self._obj._imgfield.name))
return self._obj._ik.cache_filename_format % \
{'filename': filename,
'specname': self.spec.name(),
'extension': extension.lstrip('.')}
@property
def name(self):
return os.path.join(self._obj._ik.cache_dir, self._basename())
@property
def url(self):
self._create()
if self.spec.increment_count:
fieldname = self._obj._ik.save_count_as
if fieldname is not None:
current_count = getattr(self._obj, fieldname)
setattr(self._obj, fieldname, current_count + 1)
self._obj.save(clear_cache=False)
return self._obj._imgfield.storage.url(self.name)
@property
def file(self):
self._create()
return self._obj._imgfield.storage.open(self.name)
@property
def image(self):
if self._img is None:
self._create()
if self._img is None:
self._img = Image.open(self.file)
return self._img
@property
def width(self):
return self.image.size[0]
@property
def height(self):
return self.image.size[1]
class Descriptor(object):
def __init__(self, spec):
self._spec = spec
def __get__(self, obj, type=None):
return Accessor(obj, self._spec)

View File

@ -1,86 +0,0 @@
import os
import tempfile
import unittest
from django.conf import settings
from django.core.files.base import ContentFile
from django.db import models
from django.test import TestCase
from imagekit import processors
from imagekit.models import ImageModel
from imagekit.specs import ImageSpec
from imagekit.lib import Image
class ResizeToWidth(processors.Resize):
width = 100
class ResizeToHeight(processors.Resize):
height = 100
class ResizeToFit(processors.Resize):
width = 100
height = 100
class ResizeCropped(ResizeToFit):
crop = ('center', 'center')
class TestResizeToWidth(ImageSpec):
access_as = 'to_width'
processors = [ResizeToWidth]
class TestResizeToHeight(ImageSpec):
access_as = 'to_height'
processors = [ResizeToHeight]
class TestResizeCropped(ImageSpec):
access_as = 'cropped'
processors = [ResizeCropped]
class TestPhoto(ImageModel):
""" Minimal ImageModel class for testing """
image = models.ImageField(upload_to='images')
class IKOptions:
spec_module = 'imagekit.tests'
class IKTest(TestCase):
""" Base TestCase class """
def setUp(self):
# create a test image using tempfile and PIL
self.tmp = tempfile.TemporaryFile()
Image.new('RGB', (800, 600)).save(self.tmp, 'JPEG')
self.tmp.seek(0)
self.p = TestPhoto()
self.p.image.save(os.path.basename('test.jpg'),
ContentFile(self.tmp.read()))
self.p.save()
# destroy temp file
self.tmp.close()
def test_setup(self):
self.assertEqual(self.p.image.width, 800)
self.assertEqual(self.p.image.height, 600)
def test_to_width(self):
self.assertEqual(self.p.to_width.width, 100)
self.assertEqual(self.p.to_width.height, 75)
def test_to_height(self):
self.assertEqual(self.p.to_height.width, 133)
self.assertEqual(self.p.to_height.height, 100)
def test_crop(self):
self.assertEqual(self.p.cropped.width, 100)
self.assertEqual(self.p.cropped.height, 100)
def test_url(self):
tup = (settings.MEDIA_URL, self.p._ik.cache_dir, 'test_to_width.jpg')
self.assertEqual(self.p.to_width.url, "%s%s/%s" % tup)
def tearDown(self):
# make sure image file is deleted
path = self.p.image.path
self.p.delete()
self.failIf(os.path.isfile(path))

View File

@ -48,7 +48,7 @@ def parseCaveQMs(cave,inputFile):
elif cave=='hauch':
placeholder, hadToCreate = LogbookEntry.objects.get_or_create(date__year=year, title="placeholder for QMs in 234", text="QMs temporarily attached to this should be re-attached to their actual trips", defaults={"date": date(year, 1, 1),"cave":hauchHl})
if hadToCreate:
print(cave + " placeholder logbook entry for " + str(year) + " added to database")
print((" - placeholder logbook entry for " + cave + " " + str(year) + " added to database"))
QMnum=re.match(r".*?-\d*?-X?(?P<numb>\d*)",line[0]).group("numb")
newQM = QM()
newQM.found_by=placeholder
@ -71,9 +71,9 @@ def parseCaveQMs(cave,inputFile):
if preexistingQM.new_since_parsing==False: #if the pre-existing QM has not been modified, overwrite it
preexistingQM.delete()
newQM.save()
print("overwriting " + str(preexistingQM) +"\r")
#print((" - overwriting " + str(preexistingQM) +"\r"))
else: # otherwise, print that it was ignored
print("preserving " + str(preexistingQM) + ", which was edited in admin \r")
print((" - preserving " + str(preexistingQM) + ", which was edited in admin \r"))
except QM.DoesNotExist: #if there is no pre-existing QM, save the new one
newQM.save()

1
parsers/caves.py Normal file → Executable file
View File

@ -170,6 +170,7 @@ def readcave(filename):
def getXML(text, itemname, minItems = 1, maxItems = None, printwarnings = True, context = ""):
# this next line is where it crashes horribly if a stray umlaut creeps in. Will fix itself in python3
items = re.findall("<%(itemname)s>(.*?)</%(itemname)s>" % {"itemname": itemname}, text, re.S)
if len(items) < minItems and printwarnings:
message = " ! %(count)i %(itemname)s found, at least %(min)i expected" % {"count": len(items),

View File

@ -1,21 +1,20 @@
#.-*- coding: utf-8 -*-
from django.conf import settings
import troggle.core.models as models
from parsers.people import GetPersonExpeditionNameLookup
from parsers.cavetab import GetCaveLookup
from django.template.defaultfilters import slugify
from django.utils.timezone import get_current_timezone
from django.utils.timezone import make_aware
from __future__ import (absolute_import, division,
print_function)
import csv
import re
import datetime, time
import os
import pickle
from django.conf import settings
from django.template.defaultfilters import slugify
from troggle.core.models import DataIssue, Expedition
import troggle.core.models as models
from parsers.people import GetPersonExpeditionNameLookup
from parsers.cavetab import GetCaveLookup
from utils import save_carefully
#
@ -92,7 +91,7 @@ def EnterLogIntoDbase(date, place, title, text, trippeople, expedition, logtime_
trippersons, author = GetTripPersons(trippeople, expedition, logtime_underground)
if not author:
print(" - Skipping logentry: " + title + " - no author for entry")
print(" * Skipping logentry: " + title + " - no author for entry")
message = "Skipping logentry: %s - no author for entry in year '%s'" % (title, expedition.year)
models.DataIssue.objects.create(parser='logbooks', message=message)
return
@ -135,7 +134,6 @@ def Parselogwikitxt(year, expedition, txt):
trippara = re.findall(r"===(.*?)===([\s\S]*?)(?====)", txt)
for triphead, triptext in trippara:
tripheadp = triphead.split("|")
#print "ttt", tripheadp
assert len(tripheadp) == 3, (tripheadp, triptext)
tripdate, tripplace, trippeople = tripheadp
tripsplace = tripplace.split(" - ")
@ -143,19 +141,14 @@ def Parselogwikitxt(year, expedition, txt):
tul = re.findall(r"T/?U:?\s*(\d+(?:\.\d*)?|unknown)\s*(hrs|hours)?", triptext)
if tul:
#assert len(tul) <= 1, (triphead, triptext)
#assert tul[0][1] in ["hrs", "hours"], (triphead, triptext)
tu = tul[0][0]
else:
tu = ""
#assert tripcave == "Journey", (triphead, triptext)
#print tripdate
ldate = ParseDate(tripdate.strip(), year)
#print "\n", tripcave, "--- ppp", trippeople, len(triptext)
EnterLogIntoDbase(date = ldate, place = tripcave, title = tripplace, text = triptext, trippeople=trippeople, expedition=expedition, logtime_underground=0)
# 2002, 2004, 2005, 2007, 2010 - 2018
# 2002, 2004, 2005, 2007, 2010 - now
def Parseloghtmltxt(year, expedition, txt):
#print(" - Starting log html parser")
tripparas = re.findall(r"<hr\s*/>([\s\S]*?)(?=<hr)", txt)
@ -175,28 +168,21 @@ def Parseloghtmltxt(year, expedition, txt):
''', trippara)
if not s:
if not re.search(r"Rigging Guide", trippara):
print("can't parse: ", trippara) # this is 2007 which needs editing
#assert s, trippara
print(("can't parse: ", trippara)) # this is 2007 which needs editing
continue
tripid, tripid1, tripdate, trippeople, triptitle, triptext, tu = s.groups()
ldate = ParseDate(tripdate.strip(), year)
#assert tripid[:-1] == "t" + tripdate, (tripid, tripdate)
#trippeople = re.sub(r"Ol(?!l)", "Olly", trippeople)
#trippeople = re.sub(r"Wook(?!e)", "Wookey", trippeople)
triptitles = triptitle.split(" - ")
if len(triptitles) >= 2:
tripcave = triptitles[0]
else:
tripcave = "UNKNOWN"
#print("\n", tripcave, "--- ppp", trippeople, len(triptext))
ltriptext = re.sub(r"</p>", "", triptext)
ltriptext = re.sub(r"\s*?\n\s*", " ", ltriptext)
ltriptext = re.sub(r"<p>", "</br></br>", ltriptext).strip()
EnterLogIntoDbase(date = ldate, place = tripcave, title = triptitle, text = ltriptext,
trippeople=trippeople, expedition=expedition, logtime_underground=0,
entry_type="html")
if logbook_entry_count == 0:
print(" - No trip entries found in logbook, check the syntax matches htmltxt format")
# main parser for 1991 - 2001. simpler because the data has been hacked so much to fit it
@ -210,9 +196,6 @@ def Parseloghtml01(year, expedition, txt):
tripid = mtripid and mtripid.group(1) or ""
tripheader = re.sub(r"</?(?:[ab]|span)[^>]*>", "", tripheader)
#print " ", [tripheader]
#continue
tripdate, triptitle, trippeople = tripheader.split("|")
ldate = ParseDate(tripdate.strip(), year)
@ -230,19 +213,14 @@ def Parseloghtml01(year, expedition, txt):
mtail = re.search(r'(?:<a href="[^"]*">[^<]*</a>|\s|/|-|&amp;|</?p>|\((?:same day|\d+)\))*$', ltriptext)
if mtail:
#print mtail.group(0)
ltriptext = ltriptext[:mtail.start(0)]
ltriptext = re.sub(r"</p>", "", ltriptext)
ltriptext = re.sub(r"\s*?\n\s*", " ", ltriptext)
ltriptext = re.sub(r"<p>|<br>", "\n\n", ltriptext).strip()
#ltriptext = re.sub("[^\s0-9a-zA-Z\-.,:;'!]", "NONASCII", ltriptext)
ltriptext = re.sub(r"</?u>", "_", ltriptext)
ltriptext = re.sub(r"</?i>", "''", ltriptext)
ltriptext = re.sub(r"</?b>", "'''", ltriptext)
#print ldate, trippeople.strip()
# could includ the tripid (url link for cross referencing)
EnterLogIntoDbase(date=ldate, place=tripcave, title=triptitle, text=ltriptext,
trippeople=trippeople, expedition=expedition, logtime_underground=0,
entry_type="html")
@ -269,7 +247,6 @@ def Parseloghtml03(year, expedition, txt):
tripcave = triptitles[0]
else:
tripcave = "UNKNOWN"
#print tripcave, "--- ppp", triptitle, trippeople, len(triptext)
ltriptext = re.sub(r"</p>", "", triptext)
ltriptext = re.sub(r"\s*?\n\s*", " ", ltriptext)
ltriptext = re.sub(r"<p>", "\n\n", ltriptext).strip()
@ -299,102 +276,95 @@ def SetDatesFromLogbookEntries(expedition):
def LoadLogbookForExpedition(expedition):
""" Parses all logbook entries for one expedition """
""" Parses all logbook entries for one expedition
"""
global logentries
expowebbase = os.path.join(settings.EXPOWEB, "years")
yearlinks = settings.LOGBOOK_PARSER_SETTINGS
logbook_parseable = False
logbook_cached = False
yearlinks = settings.LOGBOOK_PARSER_SETTINGS
expologbase = os.path.join(settings.EXPOWEB, "years")
if expedition.year in yearlinks:
# print " - Valid logbook year: ", expedition.year
year_settings = yearlinks[expedition.year]
logbookfile = os.path.join(expologbase, yearlinks[expedition.year][0])
parsefunc = yearlinks[expedition.year][1]
else:
logbookfile = os.path.join(expologbase, expedition.year, settings.DEFAULT_LOGBOOK_FILE)
parsefunc = settings.DEFAULT_LOGBOOK_PARSER
cache_filename = logbookfile + ".cache"
try:
bad_cache = False
now = time.time()
cache_t = os.path.getmtime(cache_filename)
if os.path.getmtime(logbookfile) - cache_t > 2: # at least 2 secs later
bad_cache= True
if now - cache_t > 30*24*60*60:
bad_cache= True
if bad_cache:
print(" - ! Cache is either stale or more than 30 days old. Deleting it.")
os.remove(cache_filename)
logentries=[]
print(" ! Removed stale or corrupt cache file")
raise
print(" - Reading cache: " + cache_filename, end='')
try:
bad_cache = False
cache_filename = os.path.join(expowebbase, year_settings[0])+".cache"
now = time.time()
cache_t = os.path.getmtime(cache_filename)
file_t = os.path.getmtime(os.path.join(expowebbase, year_settings[0]))
if file_t - cache_t > 2: # at least 2 secs later
#print " - Cache is stale."
bad_cache= True
if now - cache_t > 30*24*60*60:
#print " - Cache is more than 30 days old."
bad_cache= True
if bad_cache:
print " - Cache is either stale or more than 30 days old. Deleting it."
os.remove(cache_filename)
logentries=[]
raise
print(" - Reading cache: " + cache_filename )
try:
with open(cache_filename, "rb") as f:
logentries = pickle.load(f)
print " - Loaded ", len(logentries), " objects"
logbook_cached = True
except:
print " - Failed to load corrupt cache. Deleting it.\n"
os.remove(cache_filename)
logentries=[]
with open(cache_filename, "rb") as f:
logentries = pickle.load(f)
print(" -- Loaded ", len(logentries), " log entries")
logbook_cached = True
except:
print(" - Opening logbook: ")
file_in = open(os.path.join(expowebbase, year_settings[0]))
print("\n ! Failed to load corrupt cache. Deleting it.\n")
os.remove(cache_filename)
logentries=[]
raise
except : # no cache found
#print(" - No cache \"" + cache_filename +"\"")
try:
file_in = open(logbookfile,'rb')
txt = file_in.read().decode("latin1")
file_in.close()
parsefunc = year_settings[1]
logbook_parseable = True
print(" - Parsing logbook: " + year_settings[0] + "\n - Using parser: " + year_settings[1])
print((" - Using: " + parsefunc + " to parse " + logbookfile))
except (IOError):
logbook_parseable = False
print((" ! Couldn't open logbook " + logbookfile))
if logbook_parseable:
parser = globals()[parsefunc]
parser(expedition.year, expedition, txt)
SetDatesFromLogbookEntries(expedition)
# and this has also stored all the objects in logentries[]
print " - Storing " , len(logentries), " log entries"
cache_filename = os.path.join(expowebbase, year_settings[0])+".cache"
with open(cache_filename, "wb") as f:
pickle.dump(logentries, f, 2)
logentries=[] # flush for next year
if logbook_parseable:
parser = globals()[parsefunc]
parser(expedition.year, expedition, txt)
SetDatesFromLogbookEntries(expedition)
# and this has also stored all the log entries in logentries[]
if len(logentries) >0:
print(" - Cacheing " , len(logentries), " log entries")
with open(cache_filename, "wb") as fc:
pickle.dump(logentries, fc, 2)
else:
print(" ! NO TRIP entries found in logbook, check the syntax.")
if logbook_cached:
i=0
for entrytuple in range(len(logentries)):
date, place, title, text, trippeople, expedition, logtime_underground, \
entry_type = logentries[i]
#print " - - obj ", i, date, title
EnterLogIntoDbase(date, place, title, text, trippeople, expedition, logtime_underground,\
entry_type)
i +=1
else:
try:
file_in = open(os.path.join(expowebbase, expedition.year, settings.DEFAULT_LOGBOOK_FILE))
txt = file_in.read().decode("latin1")
file_in.close()
logbook_parseable = True
print("No set parser found using default")
parsefunc = settings.DEFAULT_LOGBOOK_PARSER
except (IOError):
logbook_parseable = False
print("Couldn't open default logbook file and nothing in settings for expo " + expedition.year)
logentries=[] # flush for next year
if logbook_cached:
i=0
for entrytuple in range(len(logentries)):
date, place, title, text, trippeople, expedition, logtime_underground, \
entry_type = logentries[i]
EnterLogIntoDbase(date, place, title, text, trippeople, expedition, logtime_underground,\
entry_type)
i +=1
#return "TOLOAD: " + year + " " + str(expedition.personexpedition_set.all()[1].logbookentry_set.count()) + " " + str(models.PersonTrip.objects.filter(personexpedition__expedition=expedition).count())
def LoadLogbooks():
""" This is the master function for parsing all logbooks into the Troggle database. """
# Clear the logbook data issues as we are reloading
models.DataIssue.objects.filter(parser='logbooks').delete()
# Fetch all expos
expos = models.Expedition.objects.all()
""" This is the master function for parsing all logbooks into the Troggle database.
"""
DataIssue.objects.filter(parser='logbooks').delete()
expos = Expedition.objects.all()
nologbook = ["1976", "1977","1978","1979","1980","1980","1981","1983","1984",
"1985","1986","1987","1988","1989","1990",]
for expo in expos:
print("\nLoading Logbook for: " + expo.year)
# Load logbook for expo
LoadLogbookForExpedition(expo)
if expo.year not in nologbook:
print((" - Logbook for: " + expo.year))
LoadLogbookForExpedition(expo)
dateRegex = re.compile(r'<span\s+class="date">(\d\d\d\d)-(\d\d)-(\d\d)</span>', re.S)
@ -418,25 +388,25 @@ def parseAutoLogBookEntry(filename):
year, month, day = [int(x) for x in dateMatch.groups()]
date = datetime.date(year, month, day)
else:
errors.append("Date could not be found")
errors.append(" - Date could not be found")
expeditionYearMatch = expeditionYearRegex.search(contents)
if expeditionYearMatch:
try:
expedition = models.Expedition.objects.get(year = expeditionYearMatch.groups()[0])
personExpeditionNameLookup = GetPersonExpeditionNameLookup(expedition)
except models.Expedition.DoesNotExist:
errors.append("Expedition not in database")
except Expedition.DoesNotExist:
errors.append(" - Expedition not in database")
else:
errors.append("Expedition Year could not be parsed")
errors.append(" - Expedition Year could not be parsed")
titleMatch = titleRegex.search(contents)
if titleMatch:
title, = titleMatch.groups()
if len(title) > settings.MAX_LOGBOOK_ENTRY_TITLE_LENGTH:
errors.append("Title too long")
errors.append(" - Title too long")
else:
errors.append("Title could not be found")
errors.append(" - Title could not be found")
caveMatch = caveRegex.search(contents)
if caveMatch:
@ -445,7 +415,7 @@ def parseAutoLogBookEntry(filename):
cave = models.getCaveByReference(caveRef)
except AssertionError:
cave = None
errors.append("Cave not found in database")
errors.append(" - Cave not found in database")
else:
cave = None
@ -456,13 +426,13 @@ def parseAutoLogBookEntry(filename):
location = None
if cave is None and location is None:
errors.append("Location nor cave could not be found")
errors.append(" - Location nor cave could not be found")
reportMatch = reportRegex.search(contents)
if reportMatch:
report, = reportMatch.groups()
else:
errors.append("Contents could not be found")
errors.append(" - Contents could not be found")
if errors:
return errors # Easiest to bail out at this point as we need to make sure that we know which expedition to look for people from.
people = []
@ -473,21 +443,21 @@ def parseAutoLogBookEntry(filename):
if name.lower() in personExpeditionNameLookup:
personExpo = personExpeditionNameLookup[name.lower()]
else:
errors.append("Person could not be found in database")
errors.append(" - Person could not be found in database")
author = bool(author)
else:
errors.append("Persons name could not be found")
errors.append(" - Persons name could not be found")
TUMatch = TURegex.search(contents)
if TUMatch:
TU, = TUMatch.groups()
else:
errors.append("TU could not be found")
errors.append(" - TU could not be found")
if not errors:
people.append((name, author, TU))
if errors:
return errors # Bail out before commiting to the database
logbookEntry = models.LogbookEntry(date = date,
return errors # Bail out before committing to the database
logbookEntry = LogbookEntry(date = date,
expedition = expedition,
title = title, cave = cave, place = location,
text = report, slug = slugify(title)[:50],

157
parsers/survex.py Normal file → Executable file
View File

@ -1,26 +1,31 @@
import troggle.settings as settings
import troggle.core.models as models
import troggle.settings as settings
from __future__ import absolute_import, division, print_function
from subprocess import call, Popen, PIPE
from troggle.parsers.people import GetPersonExpeditionNameLookup
from django.utils.timezone import get_current_timezone
from django.utils.timezone import make_aware
import re
import os
import re
import sys
import time
from datetime import datetime, timedelta
import sys
from subprocess import PIPE, Popen, call
from django.utils.timezone import get_current_timezone, make_aware
import troggle.settings as settings
import troggle.core.models as models
import troggle.core.models_survex as models_survex
from troggle.parsers.people import GetPersonExpeditionNameLookup
from troggle.core.views_caves import MapLocations
"""A 'survex block' is a *begin...*end set of cave data.
A 'survexscansfolder' is what we today call a "survey scans folder" or a "wallet".
"""
line_leg_regex = re.compile(r"[\d\-+.]+$")
survexlegsalllength = 0.0
survexlegsnumber = 0
def LoadSurvexLineLeg(survexblock, stardata, sline, comment, cave):
global survexlegsalllength
global survexlegsnumber
# The try catches here need replacing as they are relatively expensive
ls = sline.lower().split()
ssfrom = survexblock.MakeSurvexStation(ls[stardata["from"]])
@ -32,13 +37,14 @@ def LoadSurvexLineLeg(survexblock, stardata, sline, comment, cave):
if stardata["type"] == "normal":
try:
survexleg.tape = float(ls[stardata["tape"]])
survexlegsnumber += 1
except ValueError:
print("! Tape misread in", survexblock.survexfile.path)
print(" Stardata:", stardata)
print(" Line:", ls)
message = ' ! Value Error: Tape misread in line %s in %s' % (ls, survexblock.survexfile.path)
models.DataIssue.objects.create(parser='survex', message=message)
survexleg.tape = 1000
survexleg.tape = 0
try:
lclino = ls[stardata["clino"]]
except:
@ -84,15 +90,20 @@ def LoadSurvexLineLeg(survexblock, stardata, sline, comment, cave):
survexleg.cave = cave
# only save proper legs
survexleg.save()
# No need to save as we are measuring lengths only on parsing now.
# delete the object so that django autosaving doesn't save it.
survexleg = None
#survexleg.save()
itape = stardata.get("tape")
if itape:
try:
survexblock.totalleglength += float(ls[itape])
survexlegsalllength += float(ls[itape])
except ValueError:
print("! Length not added")
survexblock.save()
# No need to save as we are measuring lengths only on parsing now.
#survexblock.save()
def LoadSurvexEquate(survexblock, sline):
@ -129,7 +140,7 @@ regex_team_member = re.compile(r" and | / |, | & | \+ |^both$|^none$(?i)"
regex_qm = re.compile(r'^\s*QM(\d)\s+?([a-dA-DxX])\s+([\w\-]+)\.(\d+)\s+(([\w\-]+)\.(\d+)|\-)\s+(.+)$')
insp = ""
callcount = 0
def RecursiveLoad(survexblock, survexfile, fin, textlines):
"""Follows the *include links in all the survex files from the root file 1623.svx
and reads in the survex blocks, other data and the wallet references (survexscansfolder) as it
@ -141,11 +152,19 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
stardata = stardatadefault
teammembers = [ ]
global insp
global callcount
global survexlegsnumber
# uncomment to print out all files during parsing
print(insp+" - Reading file: " + survexblock.survexfile.path + " <> " + survexfile.path)
stamp = datetime.now()
lineno = 0
sys.stderr.flush();
callcount +=1
if callcount >=10:
callcount=0
print(".", file=sys.stderr,end='')
# Try to find the cave in the DB if not use the string as before
path_match = re.search(r"caves-(\d\d\d\d)/(\d+|\d\d\d\d-?\w+-\d+)/", survexblock.survexfile.path)
@ -328,6 +347,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
else:
print(insp+' - No match (b) for %s' % newsvxpath)
previousnlegs = survexlegsnumber
name = line.lower()
print(insp+' - Begin found for: ' + name)
# print(insp+'Block cave: ' + str(survexfile.cave))
@ -347,7 +367,11 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
if iblankbegins:
iblankbegins -= 1
else:
survexblock.text = "".join(textlines)
#survexblock.text = "".join(textlines)
# .text not used, using it for number of legs per block
legsinblock = survexlegsnumber - previousnlegs
print("LEGS: {} (previous: {}, now:{})".format(legsinblock,previousnlegs,survexlegsnumber))
survexblock.text = str(legsinblock)
survexblock.save()
# print(insp+' - End found: ')
endstamp = datetime.now()
@ -428,6 +452,8 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
# print(insp+' - Time to process: ' + str(timetaken))
def LoadAllSurvexBlocks():
global survexlegsalllength
global survexlegsnumber
print(' - Flushing All Survex Blocks...')
@ -445,7 +471,7 @@ def LoadAllSurvexBlocks():
models.DataIssue.objects.filter(parser='survex').delete()
print(' - Loading All Survex Blocks...')
print(' - redirecting stdout to loadsurvexblks.log ...')
print(' - redirecting stdout to loadsurvexblks.log...')
stdout_orig = sys.stdout
# Redirect sys.stdout to the file
sys.stdout = open('loadsurvexblks.log', 'w')
@ -455,6 +481,7 @@ def LoadAllSurvexBlocks():
survexfile.SetDirectory()
#Load all
# this is the first so id=1
survexblockroot = models.SurvexBlock(name="root", survexpath="", begin_char=0, cave=None, survexfile=survexfile, totalleglength=0.0)
survexblockroot.save()
fin = survexfile.OpenFile()
@ -462,13 +489,20 @@ def LoadAllSurvexBlocks():
# The real work starts here
RecursiveLoad(survexblockroot, survexfile, fin, textlines)
fin.close()
survexblockroot.text = "".join(textlines)
survexblockroot.totalleglength = survexlegsalllength
survexblockroot.text = str(survexlegsnumber)
#survexblockroot.text = "".join(textlines) these are all blank
survexblockroot.save()
# Close the file
sys.stdout.close()
print("+", file=sys.stderr)
sys.stderr.flush();
# Restore sys.stdout to our old saved file handler
sys.stdout = stdout_orig
print(" - total number of survex legs: {}".format(survexlegsnumber))
print(" - total leg lengths loaded: {}m".format(survexlegsalllength))
print(' - Loaded All Survex Blocks.')
@ -502,66 +536,103 @@ def LoadPos():
now = time.time()
if now - updtcache > 3*24*60*60:
print " cache is more than 3 days old. Deleting."
print( " cache is more than 3 days old. Deleting.")
os.remove(cachefile)
elif age < 0 :
print " cache is stale. Deleting."
print(" cache is stale. Deleting.")
os.remove(cachefile)
else:
print " cache is fresh. Reading..."
print(" cache is fresh. Reading...")
try:
with open(cachefile, "r") as f:
for line in f:
l = line.rstrip()
if l in notfoundbefore:
notfoundbefore[l] +=1 # should not be duplicates
print " DUPLICATE ", line, notfoundbefore[l]
print(" DUPLICATE ", line, notfoundbefore[l])
else:
notfoundbefore[l] =1
except:
print " FAILURE READ opening cache file %s" % (cachefile)
print(" FAILURE READ opening cache file %s" % (cachefile))
raise
notfoundnow =[]
found = 0
skip = {}
print "\n" # extra line because cavern overwrites the text buffer somehow
print("\n") # extra line because cavern overwrites the text buffer somehow
# cavern defaults to using same cwd as supplied input file
call([settings.CAVERN, "--output=%s.3d" % (topdata), "%s.svx" % (topdata)])
call([settings.THREEDTOPOS, '%s.3d' % (topdata)], cwd = settings.SURVEX_DATA)
print " - This next bit takes a while. Matching ~32,000 survey positions. Be patient..."
print(" - This next bit takes a while. Matching ~32,000 survey positions. Be patient...")
mappoints = {}
for pt in MapLocations().points():
svxid, number, point_type, label = pt
mappoints[svxid]=True
posfile = open("%s.pos" % (topdata))
posfile.readline() #Drop header
survexblockroot = models_survex.SurvexBlock.objects.get(id=1)
for line in posfile.readlines():
r = poslineregex.match(line)
if r:
x, y, z, name = r.groups() # easting, northing, altitude
if name in notfoundbefore:
skip[name] = 1
x, y, z, id = r.groups()
if id in notfoundbefore:
skip[id] = 1
else:
try:
ss = models.SurvexStation.objects.lookup(name)
ss.x = float(x)
ss.y = float(y)
ss.z = float(z)
ss.save()
found += 1
except:
notfoundnow.append(name)
print " - %s stations not found in lookup of SurvexStation.objects. %s found. %s skipped." % (len(notfoundnow),found, len(skip))
for sid in mappoints:
if id.endswith(sid):
notfoundnow.append(id)
# Now that we don't import any stations, we create it rather than look it up
# ss = models_survex.SurvexStation.objects.lookup(id)
# need to set block_id which means doing a search on all the survex blocks..
# remove dot at end and add one at beginning
blockpath = "." + id[:-len(sid)].strip(".")
try:
sbqs = models_survex.SurvexBlock.objects.filter(survexpath=blockpath)
if len(sbqs)==1:
sb = sbqs[0]
if len(sbqs)>1:
message = ' ! MULTIPLE SurvexBlocks matching Entrance point {} {}'.format(blockpath, sid)
print(message)
models.DataIssue.objects.create(parser='survex', message=message)
sb = sbqs[0]
elif len(sbqs)<=0:
message = ' ! ZERO SurvexBlocks matching Entrance point {} {}'.format(blockpath, sid)
print(message)
models.DataIssue.objects.create(parser='survex', message=message)
sb = survexblockroot
except:
message = ' ! FAIL in getting SurvexBlock matching Entrance point {} {}'.format(blockpath, sid)
print(message)
models.DataIssue.objects.create(parser='survex', message=message)
try:
ss = models_survex.SurvexStation(name=id, block=sb)
ss.x = float(x)
ss.y = float(y)
ss.z = float(z)
ss.save()
found += 1
except:
message = ' ! FAIL to create SurvexStation Entrance point {} {}'.format(blockpath, sid)
print(message)
models.DataIssue.objects.create(parser='survex', message=message)
raise
#print(" - %s failed lookups of SurvexStation.objects. %s found. %s skipped." % (len(notfoundnow),found, len(skip)))
if found > 10: # i.e. a previous cave import has been done
try:
with open(cachefile, "w") as f:
c = len(notfoundnow)+len(skip)
for i in notfoundnow:
f.write("%s\n" % i)
pass #f.write("%s\n" % i)
for j in skip:
f.write("%s\n" % j) # NB skip not notfoundbefore
print(' Not-found cache file written: %s entries' % c)
pass #f.write("%s\n" % j) # NB skip not notfoundbefore
print((' Not-found cache file written: %s entries' % c))
except:
print " FAILURE WRITE opening cache file %s" % (cachefile)
print(" FAILURE WRITE opening cache file %s" % (cachefile))
raise

View File

@ -29,67 +29,14 @@ def get_or_create_placeholder(year):
placeholder_logbook_entry, newly_created = save_carefully(LogbookEntry, lookupAttribs, nonLookupAttribs)
return placeholder_logbook_entry
# obsolete surveys.csv does not exist.
# def readSurveysFromCSV():
# try: # could probably combine these two
# surveytab = open(os.path.join(settings.SURVEY_SCANS, "Surveys.csv"))
# except IOError:
# import io, urllib.request, urllib.parse, urllib.error
# surveytab = io.StringIO(urllib.request.urlopen(settings.SURVEY_SCANS + "/Surveys.csv").read())
# dialect=csv.Sniffer().sniff(surveytab.read())
# surveytab.seek(0,0)
# surveyreader = csv.reader(surveytab,dialect=dialect)
# headers = next(surveyreader)
# header = dict(list(zip(headers, list(range(len(headers)))))) #set up a dictionary where the indexes are header names and the values are column numbers
# # test if the expeditions have been added yet
# if Expedition.objects.count()==0:
# print("There are no expeditions in the database. Please run the logbook parser.")
# sys.exit()
# logging.info("Deleting all scanned images")
# ScannedImage.objects.all().delete()
# logging.info("Deleting all survey objects")
# Survey.objects.all().delete()
# logging.info("Beginning to import surveys from "+str(os.path.join(settings.SURVEYS, "Surveys.csv"))+"\n"+"-"*60+"\n")
# for survey in surveyreader:
# #I hate this, but some surveys have a letter eg 2000#34a. The next line deals with that.
# walletNumberLetter = re.match(r'(?P<number>\d*)(?P<letter>[a-zA-Z]*)',survey[header['Survey Number']])
# # print(walletNumberLetter.groups())
# year=survey[header['Year']]
# surveyobj = Survey(
# expedition = Expedition.objects.filter(year=year)[0],
# wallet_number = walletNumberLetter.group('number'),
# logbook_entry = get_or_create_placeholder(year),
# comments = survey[header['Comments']],
# location = survey[header['Location']]
# )
# surveyobj.wallet_letter = walletNumberLetter.group('letter')
# if survey[header['Finished']]=='Yes':
# #try and find the sketch_scan
# pass
# surveyobj.save()
# logging.info("added survey " + survey[header['Year']] + "#" + surveyobj.wallet_number + "\r")
# dead
# def listdir(*directories):
# try:
# return os.listdir(os.path.join(settings.SURVEYS, *directories))
# except:
# import urllib.request, urllib.parse, urllib.error
# url = settings.SURVEYS + reduce(lambda x, y: x + "/" + y, ["listdir"] + list(directories))
# folders = urllib.request.urlopen(url.replace("#", "%23")).readlines()
# return [folder.rstrip(r"/") for folder in folders]
def listdir(*directories):
try:
return os.listdir(os.path.join(settings.SURVEYS, *directories))
except:
import urllib.request, urllib.parse, urllib.error
url = settings.SURVEYS + reduce(lambda x, y: x + "/" + y, ["listdir"] + list(directories))
folders = urllib.request.urlopen(url.replace("#", "%23")).readlines()
return [folder.rstrip(r"/") for folder in folders]
# add survey scans
# def parseSurveyScans(expedition, logfile=None):
@ -157,19 +104,6 @@ def get_or_create_placeholder(year):
# yearPath=os.path.join(settings.SURVEY_SCANS, "surveyscans", expedition.year)
# print((" ! No folder found for " + expedition.year + " at:- " + yearPath))
# dead
# def parseSurveys(logfile=None):
# try:
# readSurveysFromCSV()
# except (IOError, OSError):
# print(" ! Survey CSV not found..")
# pass
# print(" - Loading scans by expedition year")
# for expedition in Expedition.objects.filter(year__gte=2000): #expos since 2000, because paths and filenames were nonstandard before then
# print("%s" % expedition, end=' ')
# parseSurveyScans(expedition)
# dead
# def isInterlacedPNG(filePath): #We need to check for interlaced PNGs because the thumbnail engine can't handle them (uses PIL)
# file=Image.open(filePath)
@ -196,7 +130,6 @@ def GetListDir(sdir):
def LoadListScansFile(survexscansfolder):
gld = [ ]
# flatten out any directories in these wallet folders - should not be any
for (fyf, ffyf, fisdiryf) in GetListDir(survexscansfolder.fpath):
if fisdiryf:
@ -204,24 +137,31 @@ def LoadListScansFile(survexscansfolder):
else:
gld.append((fyf, ffyf, fisdiryf))
c=0
for (fyf, ffyf, fisdiryf) in gld:
#assert not fisdiryf, ffyf
if re.search(r"\.(?:png|jpg|jpeg|pdf|jpeg|svg)(?i)$", fyf):
if re.search(r"\.(?:png|jpg|jpeg|pdf|svg|gif)(?i)$", fyf):
survexscansingle = SurvexScanSingle(ffile=ffyf, name=fyf, survexscansfolder=survexscansfolder)
survexscansingle.save()
c+=1
if c>=10:
print(".", end='')
c = 0
# this iterates through the scans directories (either here or on the remote server)
# and builds up the models we can access later
def LoadListScans():
print(' - Loading Survey Scans... (deleting all objects first)')
print(' - Loading Survey Scans')
SurvexScanSingle.objects.all().delete()
SurvexScansFolder.objects.all().delete()
print(' - deleting all scansFolder and scansSingle objects')
# first do the smkhs (large kh survey scans) directory
survexscansfoldersmkhs = SurvexScansFolder(fpath=os.path.join(settings.SURVEY_SCANS, "smkhs"), walletname="smkhs")
survexscansfoldersmkhs = SurvexScansFolder(fpath=os.path.join(settings.SURVEY_SCANS, "../surveys/smkhs"), walletname="smkhs")
print("smkhs", end=' ')
if os.path.isdir(survexscansfoldersmkhs.fpath):
survexscansfoldersmkhs.save()
LoadListScansFile(survexscansfoldersmkhs)
@ -229,7 +169,7 @@ def LoadListScans():
# iterate into the surveyscans directory
print(' - ', end=' ')
for f, ff, fisdir in GetListDir(os.path.join(settings.SURVEY_SCANS, "surveyscans")):
for f, ff, fisdir in GetListDir(settings.SURVEY_SCANS):
if not fisdir:
continue

3
settings.py Normal file → Executable file
View File

@ -1,4 +1,5 @@
from localsettings import * #inital localsettings call so that urljoins work
from localsettings import *
#inital localsettings call so that urljoins work
import os
import urlparse
import django

View File

@ -35,13 +35,11 @@
<a href="{% url "survexcaveslist" %}">All Survex</a> |
<a href="{% url "surveyscansfolders" %}">Scans</a> |
<a href="{% url "tunneldata" %}">Tunneldata</a> |
<a href="{% url "survexcavessingle" 107 %}">107</a> |
<a href="{% url "survexcavessingle" 161 %}">161</a> |
<a href="{% url "survexcavessingle" 204 %}">204</a> |
<a href="{% url "survexcavessingle" 258 %}">258</a> |
<a href="{% url "survexcavessingle" 264 %}">264</a> |
<a href="{% url "expedition" 2016 %}">Expo2016</a> |
<a href="{% url "expedition" 2017 %}">Expo2017</a> |
<a href="{% url "survexcavessingle" "caves-1623/290/290.svx" %}">290</a> |
<a href="{% url "survexcavessingle" "caves-1623/291/291.svx" %}">291</a> |
<a href="{% url "survexcavessingle" "caves-1626/359/359.svx" %}">359</a> |
<a href="{% url "survexcavessingle" "caves-1623/258/258.svx" %}">258</a> |
<a href="{% url "survexcavessingle" "caves-1623/264/264.svx" %}">264</a> |
<a href="{% url "expedition" 2018 %}">Expo2018</a> |
<a href="{% url "expedition" 2019 %}">Expo2019</a> |
<a href="{% url "expedition" 2020 %}">Expo2020</a> |

View File

@ -15,7 +15,20 @@
{% endfor %}
</ul>
<h3>1623</h3>
<h3>1626</h3>
<ul class="searchable">
{% for cave in caves1626 %}
<li> <a href="{{ cave.url }}">{% if cave.kataster_number %}{{ cave.kataster_number }} {{cave.official_name|safe}}</a> {% if cave.unofficial_number %}({{cave.unofficial_number }}){% endif %}{% else %}{{cave.unofficial_number }} {{cave.official_name|safe}}</a> {% endif %}
</li>
{% endfor %}
</ul>
<p style="text-align:right">
<a href="{% url "newcave" %}">New Cave</a>
</p>
<h3>1623</h3>
<table class="searchable">
{% for cave in caves1623 %}
@ -25,17 +38,7 @@
{% endfor %}
</table>
<h3>1626</h3>
<ul class="searchable">
{% for cave in caves1626 %}
<li> <a href="{{ cave.url }}">{% if cave.kataster_number %}{{ cave.kataster_number }} {{cave.official_name|safe}}</a> {% if cave.unofficial_number %}({{cave.unofficial_number }}){% endif %}{% else %}{{cave.unofficial_number }} {{cave.official_name|safe}}</a> {% endif %}
</li>
{% endfor %}
</ul>
<p style="text-align:right">
<a href="{% url "newcave" %}">New Cave</a>
</p>
{% endblock %}

View File

@ -16,7 +16,7 @@
{% if error %}
<div class="noticeBox">
{{ error }}
{{ error }}
<a href="#" class="closeDiv">dismiss this message</a>
</div>
{% endif %}
@ -96,61 +96,44 @@
</tr>
<tr>
<td>
surveys to Surveys.csv
<td>
surveys to Surveys.csv
</td>
<td>
<td>
</td>
<td>
<form name="export" method="get" action={% url "downloadlogbook" %}>
<p>Download a logbook file which is dynamically generated by Troggle.</p>
<p>Download a logbook file which is dynamically generated by Troggle.</p>
<p>
Expedition year:
<select name="year">
{% for expedition in expeditions %}
<option value="{{expedition}}"> {{expedition}} </option>
<option value="{{expedition}}"> {{expedition}} </option>
{% endfor %}
</select>
</p>
<p>
Output style:
<select name="extension">
<option value="txt">.txt file with MediaWiki markup - 2008 style</option>
<option value="html">.html file - 2005 style</option>
<select name="extension">
<option value="txt">.txt file with MediaWiki markup - 2008 style</option>
<option value="html">.html file - 2005 style</option>
</select>
</p>
<p>
<input name="download_logbook" type="submit" value="Download logbook" />
</p>
</form>
</td>
</td>
</tr>
<tr>
<td>
surveys to Surveys.csv
</td>
<td>
<form name="export" method="post" action="">
<p>Overwrite the existing Surveys.csv file with one generated by Troggle.</p>
<input disabled name="export_surveys" type="submit" value="Update {{settings.SURVEYS}}noinfo/Surveys.csv" />
</form>
</td>
<td>
<form name="export" method="get" action={% url "downloadsurveys" %}>
<p>Download a Surveys.csv file which is dynamically generated by Troggle.</p>
<input disabled name="download_surveys" type="submit" value="Download Surveys.csv" />
</form>
</td>
</tr>
<tr>
<td>qms to qms.csv</td><td>
<form name="export_qms" method="get" action="downloadqms">
<form name="export_qms" method="get" action="downloadqms">
<!--This is for choosing caves by area (drilldown).
<select id="qmcaveareachooser" class="searchable" >
@ -158,12 +141,12 @@
-->
Choose a cave.
Choose a cave.
<select name="cave_id" id="qmcavechooser">
{% for cave in caves %}
<option value="{{cave.kataster_number}}">{{cave}}
</option>
</option>
{% endfor %}
</select>
@ -174,4 +157,4 @@
</table>
</form>
{% endblock %}
{% endblock %}

4
templates/experimental.html Normal file → Executable file
View File

@ -8,7 +8,9 @@
<h1>Expo Experimental</h1>
<p>Number of survey legs: {{nsurvexlegs}}, total length: {{totalsurvexlength}}</p>
<p>Number of survey legs: {{nsurvexlegs}}<br />
Total length: {{totalsurvexlength}} m on importing survex files.<br />
Total length: {{addupsurvexlength}} m adding up all the years below.</p>
<table>
<tr><th>Year</th><th>Surveys</th><th>Survey Legs</th><th>Total length</th></tr>

View File

@ -1,11 +1,10 @@
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" />
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>{% block title %}{% endblock %}
</title>
<link rel="stylesheet" type="text/css" href="../css/main2.css" />
</head>
<body>
<div id="mainmenu">
@ -13,17 +12,19 @@
<li><a href="/index.htm">Expo website home</a></li>
<li><a href="/intro.html">Introduction</a></li>
<li><a href="/infodx.htm">Main index</a></li>
<li><a href="/indxal.htm">Cave index</a></li>
<li><a href="/caves">Cave index</a></li>
{% if cavepage %}
<ul>
<li><a href="{% url "survexcaveslist" %}">All Survex</a></li>
<li><a href="{% url "surveyscansfolders" %}">Scans</a></li>
<li><a href="{% url "tunneldata" %}">Tunneldata</a></li>
<li><a href="{% url "survexcavessingle" 161 %}">161</a></li>
<li><a href="{% url "survexcavessingle" 204 %}">204</a></li>
<li><a href="{% url "survexcavessingle" 258 %}">258</a></li>
<li><a href="{% url "expedition" 2012 %}">Expo2012</a></li>
<li><a href="{% url "expedition" 2013 %}">Expo2013</a></li>
<li><a href="{% url "survexcavessingle" "caves-1623/290/290.svx" %}">290</a></li>
<li><a href="{% url "survexcavessingle" "caves-1623/291/291.svx" %}">291</a></li>
<li><a href="{% url "survexcavessingle" "caves-1626/359/359.svx" %}">359</a></li>
<li><a href="{% url "survexcavessingle" "caves-1623/258/258.svx" %}">258</a></li>
<li><a href="{% url "survexcavessingle" "caves-1623/264/264.svx" %}">264</a></li>
<li><a href="{% url "expedition" 2018 %}">Expo2018</a></li>
<li><a href="{% url "expedition" 2019 %}">Expo2019</a></li>
<li><a href="/admin">Django admin</a></li>
</ul>
{% endif %}

View File

@ -5,7 +5,7 @@
<li><a href="/handbook/index.htm">Handbook</a></li>
<li><a href="/pubs.htm">Reports</a></li>
<li><a href="/areas.htm">Areas</a></li>
<li><a href="/indxal.htm">Caves</a></li>
<li><a href="/caves">Caves</a></li>
<li><a href="/expedition/2019">Troggle</a></li>
<li><form name=P method=get action="/search" target="_top">
<input id="omega-autofocus" type=search name=P value="testing" size=8 autofocus>

28
urls.py Normal file → Executable file
View File

@ -15,21 +15,17 @@ admin.autodiscover()
# type url probably means it's used.
# HOW DOES THIS WORK:
# url( <regular expression that matches the thing in the web browser>,
# HOW DOES THIS WORK:
# url( <regular expression that matches the thing in the web browser>,
# <reference to python function in 'core' folder>,
# <name optional argument for URL reversing (doesn't do much)>)
# <name optional argument for URL reversing (doesn't do much)>)
actualurlpatterns = patterns('',
url(r'^testingurl/?$' , views_caves.millenialcaves, name="testing"),
url(r'^millenialcaves/?$', views_caves.millenialcaves, name="millenialcaves"),
url(r'^troggle$', views_other.frontpage, name="frontpage"),
url(r'^troggle$', views_other.frontpage, name="frontpage"),
url(r'^todo/$', views_other.todo, name="todo"),
url(r'^caves/?$', views_caves.caveindex, name="caveindex"),
url(r'^caves$', views_caves.caveindex, name="caveindex"),
url(r'^people/?$', views_logbooks.personindex, name="personindex"),
url(r'^newqmnumber/?$', views_other.ajax_QM_number, ),
@ -89,8 +85,6 @@ actualurlpatterns = patterns('',
# Is all this lot out of date ? Maybe the logbooks work?
url(r'^controlpanel/?$', views_other.controlPanel, name="controlpanel"),
url(r'^CAVETAB2\.CSV/?$', views_other.downloadCavetab, name="downloadcavetab"),
url(r'^Surveys\.csv/?$', views_other.downloadSurveys, name="downloadsurveys"),
url(r'^logbook(?P<year>\d\d\d\d)\.(?P<extension>.*)/?$',views_other.downloadLogbook),
url(r'^logbook/?$',views_other.downloadLogbook, name="downloadlogbook"),
url(r'^cave/(?P<cave_id>[^/]+)/qm\.csv/?$', views_other.downloadQMs, name="downloadqms"),
@ -112,6 +106,10 @@ actualurlpatterns = patterns('',
# (r'^personform/(.*)$', personForm),
(r'^expofiles/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.EXPOFILES, 'show_indexes': True}),
(r'^static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.STATIC_ROOT, 'show_indexes': True}),
(r'^site_media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
(r'^tinymce_media/(?P<path>.*)$', 'django.views.static.serve',
@ -125,9 +123,9 @@ actualurlpatterns = patterns('',
url(r'^survexfile/(?P<survex_file>.*?)\.err$', views_survex.err),
url(r'^survexfile/caves/$', views_survex.survexcaveslist, name="survexcaveslist"),
url(r'^survexfile/caves/(?P<survex_cave>.*)$', views_survex.survexcavesingle, name="survexcavessingle"),
url(r'^survexfileraw/(?P<survex_file>.*?)\.svx$', views_survex.svxraw, name="svxraw"),
url(r'^survexfile/caves/$', views_survex.survexcaveslist, name="survexcaveslist"),
url(r'^survexfile/(?P<survex_cave>.*)$', views_survex.survexcavesingle, name="survexcavessingle"),
url(r'^survexfileraw/(?P<survex_file>.*?)\.svx$', views_survex.svxraw, name="svxraw"),
(r'^survey_files/listdir/(?P<path>.*)$', view_surveys.listdir),
@ -139,7 +137,7 @@ actualurlpatterns = patterns('',
#(r'^survey_scans/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.SURVEY_SCANS, 'show_indexes':True}),
url(r'^survey_scans/$', view_surveys.surveyscansfolders, name="surveyscansfolders"),
url(r'^survey_scans/(?P<path>[^/]+)/$', view_surveys.surveyscansfolder, name="surveyscansfolder"),
url(r'^survey_scans/(?P<path>[^/]+)/(?P<file>[^/]+(?:png|jpg|jpeg))$',
url(r'^survey_scans/(?P<path>[^/]+)/(?P<file>[^/]+(?:png|jpg|jpeg|pdf|PNG|JPG|JPEG|PDF))$',
view_surveys.surveyscansingle, name="surveyscansingle"),
url(r'^tunneldata/$', view_surveys.tunneldata, name="tunneldata"),