forked from expo/troggle
Compare commits
28 Commits
master
...
old-master
Author | SHA1 | Date | |
---|---|---|---|
|
37553da556 | ||
|
8861e2e240 | ||
|
09e9932711 | ||
|
7fe34bedb8 | ||
|
d134a58931 | ||
|
90a5524036 | ||
|
69f72184a6 | ||
|
e0d8df0a79 | ||
|
15d4defe0e | ||
|
9052982089 | ||
|
0a35824b9c | ||
|
bc5c0b9e53 | ||
|
e873dedcf2 | ||
|
a0c5a34b3f | ||
|
6c3c70a02c | ||
|
43394facdf | ||
|
d5b4a0b1d9 | ||
|
8feb1774bb | ||
|
d55a58bfc8 | ||
|
fffb083aee | ||
|
b9aa447cac | ||
|
932b1a2ae3 | ||
|
367854c9a6 | ||
|
c76aed3bf6 | ||
|
079f528963 | ||
|
972e6f3a95 | ||
|
7af6c3cb9c | ||
|
501a5122d8 |
README
core
management/commands
models.pymodels_survex.pyviews_caves.pyviews_logbooks.pyviews_other.pyviews_survex.pyimagekit
parsers
settings.pytemplates
urls.py
27
README/index.html
Normal file
27
README/index.html
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
|
||||||
|
<title>Troggle - Coding Documentation</title>
|
||||||
|
<link rel="stylesheet" type="text/css" href="..media/css/main2.css" />
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<h1>Troggle Code - README</h1>
|
||||||
|
<h2>Contents of README.txt file</h2>
|
||||||
|
|
||||||
|
<iframe name="erriframe" width="90%" height="45%"
|
||||||
|
src="../readme.txt" frameborder="1" ></iframe>
|
||||||
|
|
||||||
|
<h2>Troggle documentation in the Expo Handbook</h2>
|
||||||
|
<ul>
|
||||||
|
<li><a href="http://expo.survex.com/handbook/troggle/trogintro.html">Intro</a>
|
||||||
|
<li><a href="http://expo.survex.com/handbook/troggle/trogmanual.html">Troggle manual</a>
|
||||||
|
<li><a href="http://expo.survex.com/handbook/troggle/trogarch.html">Troggle data model</a>
|
||||||
|
<li><a href="http://expo.survex.com/handbook/troggle/trogimport.html">Troggle importing data</a>
|
||||||
|
<li><a href="http://expo.survex.com/handbook/troggle/trogdesign.html">Troggle design decisions</a>
|
||||||
|
<li><a href="http://expo.survex.com/handbook/troggle/trogdesignx.html">Troggle future architectures</a>
|
||||||
|
<li><a href="http://expo.survex.com/handbook/troggle/trogsimpler.html">a kinder simpler Troggle?</a>
|
||||||
|
|
||||||
|
</ul>
|
||||||
|
<hr />
|
||||||
|
</body></html>
|
@ -1,187 +1,33 @@
|
|||||||
from django.core.management.base import BaseCommand, CommandError
|
|
||||||
from optparse import make_option
|
|
||||||
from troggle.core.models import Cave
|
|
||||||
import settings
|
|
||||||
import os
|
import os
|
||||||
|
from optparse import make_option
|
||||||
|
|
||||||
from django.db import connection
|
from django.db import connection
|
||||||
from django.core import management
|
from django.core import management
|
||||||
from django.contrib.auth.models import User
|
|
||||||
from django.core.urlresolvers import reverse
|
from django.core.urlresolvers import reverse
|
||||||
|
from django.core.management.base import BaseCommand, CommandError
|
||||||
|
from django.contrib.auth.models import User
|
||||||
|
|
||||||
from troggle.core.models import Cave, Entrance
|
from troggle.core.models import Cave, Entrance
|
||||||
import troggle.flatpages.models
|
import troggle.flatpages.models
|
||||||
|
|
||||||
|
import settings
|
||||||
|
|
||||||
"""Pretty much all of this is now replaced by databaseRest.py
|
"""Pretty much all of this is now replaced by databaseRest.py
|
||||||
I don't know why this still exists
|
I don't know why this still exists. Needs testing to see if
|
||||||
|
removing it makes django misbehave.
|
||||||
"""
|
"""
|
||||||
databasename=settings.DATABASES['default']['NAME']
|
|
||||||
expouser=settings.EXPOUSER
|
|
||||||
expouserpass=settings.EXPOUSERPASS
|
|
||||||
expouseremail=settings.EXPOUSER_EMAIL
|
|
||||||
|
|
||||||
class Command(BaseCommand):
|
class Command(BaseCommand):
|
||||||
help = 'This is normal usage, clear database and reread everything'
|
help = 'Removed as redundant - use databaseReset.py'
|
||||||
|
|
||||||
option_list = BaseCommand.option_list + (
|
option_list = BaseCommand.option_list + (
|
||||||
make_option('--reset',
|
make_option('--reset',
|
||||||
action='store_true',
|
action='store_true',
|
||||||
dest='reset',
|
dest='reset',
|
||||||
default=False,
|
default=False,
|
||||||
help='Reset the entier DB from files'),
|
help='Removed as redundant'),
|
||||||
)
|
)
|
||||||
|
|
||||||
def handle(self, *args, **options):
|
def handle(self, *args, **options):
|
||||||
print(args)
|
print(args)
|
||||||
print(options)
|
print(options)
|
||||||
if "desc" in args:
|
|
||||||
self.resetdesc()
|
|
||||||
elif "scans" in args:
|
|
||||||
self.import_surveyscans()
|
|
||||||
elif "caves" in args:
|
|
||||||
self.reload_db()
|
|
||||||
self.make_dirs()
|
|
||||||
self.pageredirects()
|
|
||||||
self.import_caves()
|
|
||||||
elif "people" in args:
|
|
||||||
self.import_people()
|
|
||||||
elif "QMs" in args:
|
|
||||||
self.import_QMs()
|
|
||||||
elif "tunnel" in args:
|
|
||||||
self.import_tunnelfiles()
|
|
||||||
elif options['reset']:
|
|
||||||
self.reset(self)
|
|
||||||
elif "survex" in args:
|
|
||||||
self.import_survex()
|
|
||||||
elif "survexpos" in args:
|
|
||||||
import parsers.survex
|
|
||||||
parsers.survex.LoadPos()
|
|
||||||
elif "logbooks" in args:
|
|
||||||
self.import_logbooks()
|
|
||||||
elif "autologbooks" in args:
|
|
||||||
self.import_auto_logbooks()
|
|
||||||
elif "dumplogbooks" in args:
|
|
||||||
self.dumplogbooks()
|
|
||||||
elif "writeCaves" in args:
|
|
||||||
self.writeCaves()
|
|
||||||
elif options['foo']:
|
|
||||||
self.stdout.write(self.style.WARNING('Tesing....'))
|
|
||||||
else:
|
|
||||||
#self.stdout.write("%s not recognised" % args)
|
|
||||||
#self.usage(options)
|
|
||||||
self.stdout.write("poo")
|
|
||||||
#print(args)
|
|
||||||
|
|
||||||
def reload_db(obj):
|
|
||||||
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3':
|
|
||||||
try:
|
|
||||||
os.remove(databasename)
|
|
||||||
except OSError:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
cursor = connection.cursor()
|
|
||||||
cursor.execute("DROP DATABASE %s" % databasename)
|
|
||||||
cursor.execute("CREATE DATABASE %s" % databasename)
|
|
||||||
cursor.execute("ALTER DATABASE %s CHARACTER SET=utf8" % databasename)
|
|
||||||
cursor.execute("USE %s" % databasename)
|
|
||||||
management.call_command('migrate', interactive=False)
|
|
||||||
# management.call_command('syncdb', interactive=False)
|
|
||||||
user = User.objects.create_user(expouser, expouseremail, expouserpass)
|
|
||||||
user.is_staff = True
|
|
||||||
user.is_superuser = True
|
|
||||||
user.save()
|
|
||||||
|
|
||||||
def make_dirs(obj):
|
|
||||||
"""Make directories that troggle requires"""
|
|
||||||
pass
|
|
||||||
# should also deal with permissions here.
|
|
||||||
#if not os.path.isdir(settings.PHOTOS_ROOT):
|
|
||||||
#os.mkdir(settings.PHOTOS_ROOT)
|
|
||||||
|
|
||||||
def import_caves(obj):
|
|
||||||
import parsers.caves
|
|
||||||
print("Importing Caves")
|
|
||||||
parsers.caves.readcaves()
|
|
||||||
|
|
||||||
def import_people(obj):
|
|
||||||
import parsers.people
|
|
||||||
parsers.people.LoadPersonsExpos()
|
|
||||||
|
|
||||||
def import_logbooks(obj):
|
|
||||||
# The below line was causing errors I didn't understand (it said LOGFILE was a string), and I couldn't be bothered to figure
|
|
||||||
# what was going on so I just catch the error with a try. - AC 21 May
|
|
||||||
try:
|
|
||||||
settings.LOGFILE.write('\nBegun importing logbooks at ' + time.asctime() + '\n' + '-' * 60)
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
import parsers.logbooks
|
|
||||||
parsers.logbooks.LoadLogbooks()
|
|
||||||
|
|
||||||
def import_survex(obj):
|
|
||||||
import parsers.survex
|
|
||||||
parsers.survex.LoadAllSurvexBlocks()
|
|
||||||
parsers.survex.LoadPos()
|
|
||||||
|
|
||||||
def import_QMs(obj):
|
|
||||||
import parsers.QMs
|
|
||||||
|
|
||||||
def import_surveys(obj):
|
|
||||||
import parsers.surveys
|
|
||||||
parsers.surveys.parseSurveys(logfile=settings.LOGFILE)
|
|
||||||
|
|
||||||
def import_surveyscans(obj):
|
|
||||||
import parsers.surveys
|
|
||||||
parsers.surveys.LoadListScans()
|
|
||||||
|
|
||||||
def import_tunnelfiles(obj):
|
|
||||||
import parsers.surveys
|
|
||||||
parsers.surveys.LoadTunnelFiles()
|
|
||||||
|
|
||||||
def reset(self, mgmt_obj):
|
|
||||||
""" Wipe the troggle database and import everything from legacy data
|
|
||||||
"""
|
|
||||||
self.reload_db()
|
|
||||||
self.make_dirs()
|
|
||||||
self.pageredirects()
|
|
||||||
self.import_caves()
|
|
||||||
self.import_people()
|
|
||||||
self.import_surveyscans()
|
|
||||||
self.import_survex()
|
|
||||||
self.import_logbooks()
|
|
||||||
self.import_QMs()
|
|
||||||
try:
|
|
||||||
self.import_tunnelfiles()
|
|
||||||
except:
|
|
||||||
print("Tunnel files parser broken.")
|
|
||||||
|
|
||||||
self.import_surveys()
|
|
||||||
|
|
||||||
def pageredirects(obj):
|
|
||||||
for oldURL, newURL in [("indxal.htm", reverse("caveindex"))]:
|
|
||||||
f = troggle.flatpages.models.Redirect(originalURL=oldURL, newURL=newURL)
|
|
||||||
f.save()
|
|
||||||
|
|
||||||
def writeCaves(obj):
|
|
||||||
for cave in Cave.objects.all():
|
|
||||||
cave.writeDataFile()
|
|
||||||
for entrance in Entrance.objects.all():
|
|
||||||
entrance.writeDataFile()
|
|
||||||
|
|
||||||
def troggle_usage(obj):
|
|
||||||
print("""Usage is 'manage.py reset_db <command>'
|
|
||||||
where command is:
|
|
||||||
reset - this is normal usage, clear database and reread everything
|
|
||||||
desc
|
|
||||||
caves - read in the caves
|
|
||||||
logbooks - read in the logbooks
|
|
||||||
autologbooks
|
|
||||||
dumplogbooks
|
|
||||||
people
|
|
||||||
QMs - read in the QM files
|
|
||||||
resetend
|
|
||||||
scans - read in the scanned surveynotes
|
|
||||||
survex - read in the survex files
|
|
||||||
survexpos
|
|
||||||
tunnel - read in the Tunnel files
|
|
||||||
writeCaves
|
|
||||||
""")
|
|
||||||
|
@ -39,10 +39,8 @@ try:
|
|||||||
filename=settings.LOGFILE,
|
filename=settings.LOGFILE,
|
||||||
filemode='w')
|
filemode='w')
|
||||||
except:
|
except:
|
||||||
subprocess.call(settings.FIX_PERMISSIONS)
|
# Opening of file for writing is going to fail currently, so decide it doesn't matter for now
|
||||||
logging.basicConfig(level=logging.DEBUG,
|
pass
|
||||||
filename=settings.LOGFILE,
|
|
||||||
filemode='w')
|
|
||||||
|
|
||||||
#This class is for adding fields and methods which all of our models will have.
|
#This class is for adding fields and methods which all of our models will have.
|
||||||
class TroggleModel(models.Model):
|
class TroggleModel(models.Model):
|
||||||
|
@ -147,7 +147,7 @@ class SurvexBlock(models.Model):
|
|||||||
return ssl[0]
|
return ssl[0]
|
||||||
#print name
|
#print name
|
||||||
ss = SurvexStation(name=name, block=self)
|
ss = SurvexStation(name=name, block=self)
|
||||||
ss.save()
|
#ss.save()
|
||||||
return ss
|
return ss
|
||||||
|
|
||||||
def DayIndex(self):
|
def DayIndex(self):
|
||||||
|
28
core/views_caves.py
Normal file → Executable file
28
core/views_caves.py
Normal file → Executable file
@ -9,20 +9,44 @@ import troggle.core.models as models
|
|||||||
import troggle.settings as settings
|
import troggle.settings as settings
|
||||||
from troggle.helper import login_required_if_public
|
from troggle.helper import login_required_if_public
|
||||||
|
|
||||||
|
from PIL import Image, ImageDraw, ImageFont
|
||||||
from django.forms.models import modelformset_factory
|
from django.forms.models import modelformset_factory
|
||||||
from django import forms
|
from django import forms
|
||||||
from django.core.urlresolvers import reverse
|
from django.core.urlresolvers import reverse
|
||||||
from django.http import HttpResponse, HttpResponseRedirect
|
from django.http import HttpResponse, HttpResponseRedirect
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
import re
|
import re
|
||||||
|
import os
|
||||||
import urlparse
|
import urlparse
|
||||||
#import urllib.parse
|
#import urllib.parse
|
||||||
from django.shortcuts import get_object_or_404, render
|
from django.shortcuts import get_object_or_404, render
|
||||||
import settings
|
import settings
|
||||||
|
|
||||||
|
|
||||||
from PIL import Image, ImageDraw, ImageFont
|
class MapLocations(object):
|
||||||
import string, os, sys, subprocess
|
p = [
|
||||||
|
("laser.0_7", "BNase", "Reference", "Bräuning Nase laser point"),
|
||||||
|
("226-96", "BZkn", "Reference", "Bräuning Zinken trig point"),
|
||||||
|
("vd1","VD1","Reference", "VD1 survey point"),
|
||||||
|
("laser.kt114_96","HSK","Reference", "Hinterer Schwarzmooskogel trig point"),
|
||||||
|
("2000","Nipple","Reference", "Nipple (Weiße Warze)"),
|
||||||
|
("3000","VSK","Reference", "Vorderer Schwarzmooskogel summit"),
|
||||||
|
("topcamp", "OTC", "Reference", "Old Top Camp"),
|
||||||
|
("laser.0", "LSR0", "Reference", "Laser Point 0"),
|
||||||
|
("laser.0_1", "LSR1", "Reference", "Laser Point 0/1"),
|
||||||
|
("laser.0_3", "LSR3", "Reference", "Laser Point 0/3"),
|
||||||
|
("laser.0_5", "LSR5", "Reference", "Laser Point 0/5"),
|
||||||
|
("225-96", "BAlm", "Reference", "Bräuning Alm trig point")
|
||||||
|
]
|
||||||
|
def points(self):
|
||||||
|
for ent in Entrance.objects.all():
|
||||||
|
if ent.best_station():
|
||||||
|
areaName = ent.caveandentrance_set.all()[0].cave.getArea().short_name
|
||||||
|
self.p.append((ent.best_station(), "%s-%s" % (areaName, str(ent)[5:]), ent.needs_surface_work(), str(ent)))
|
||||||
|
return self.p
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return "{} map locations".format(len(self.p))
|
||||||
|
|
||||||
def getCave(cave_id):
|
def getCave(cave_id):
|
||||||
"""Returns a cave object when given a cave name or number. It is used by views including cavehref, ent, and qm."""
|
"""Returns a cave object when given a cave name or number. It is used by views including cavehref, ent, and qm."""
|
||||||
|
33
core/views_logbooks.py
Normal file → Executable file
33
core/views_logbooks.py
Normal file → Executable file
@ -218,20 +218,41 @@ def pathsreport(request):
|
|||||||
|
|
||||||
|
|
||||||
def experimental(request):
|
def experimental(request):
|
||||||
|
blockroots = models.SurvexBlock.objects.filter(name="root")
|
||||||
|
if len(blockroots)>1:
|
||||||
|
print(" ! more than one root survexblock {}".format(len(blockroots)))
|
||||||
|
for sbr in blockroots:
|
||||||
|
print("{} {} {} {}".format(sbr.id, sbr.name, sbr.text, sbr.date))
|
||||||
|
sbr = blockroots[0]
|
||||||
|
totalsurvexlength = sbr.totalleglength
|
||||||
|
try:
|
||||||
|
nimportlegs = int(sbr.text)
|
||||||
|
except:
|
||||||
|
print("{} {} {} {}".format(sbr.id, sbr.name, sbr.text, sbr.date))
|
||||||
|
nimportlegs = -1
|
||||||
|
|
||||||
legsbyexpo = [ ]
|
legsbyexpo = [ ]
|
||||||
|
addupsurvexlength = 0
|
||||||
for expedition in Expedition.objects.all():
|
for expedition in Expedition.objects.all():
|
||||||
survexblocks = expedition.survexblock_set.all()
|
survexblocks = expedition.survexblock_set.all()
|
||||||
survexlegs = [ ]
|
#survexlegs = [ ]
|
||||||
|
legsyear=0
|
||||||
survexleglength = 0.0
|
survexleglength = 0.0
|
||||||
for survexblock in survexblocks:
|
for survexblock in survexblocks:
|
||||||
survexlegs.extend(survexblock.survexleg_set.all())
|
#survexlegs.extend(survexblock.survexleg_set.all())
|
||||||
survexleglength += survexblock.totalleglength
|
survexleglength += survexblock.totalleglength
|
||||||
legsbyexpo.append((expedition, {"nsurvexlegs":len(survexlegs), "survexleglength":survexleglength}))
|
try:
|
||||||
|
legsyear += int(survexblock.text)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
addupsurvexlength += survexleglength
|
||||||
|
legsbyexpo.append((expedition, {"nsurvexlegs":legsyear, "survexleglength":survexleglength}))
|
||||||
legsbyexpo.reverse()
|
legsbyexpo.reverse()
|
||||||
|
|
||||||
survexlegs = models.SurvexLeg.objects.all()
|
#removing survexleg objects completely
|
||||||
totalsurvexlength = sum([survexleg.tape for survexleg in survexlegs])
|
#survexlegs = models.SurvexLeg.objects.all()
|
||||||
return render(request, 'experimental.html', { "nsurvexlegs":len(survexlegs), "totalsurvexlength":totalsurvexlength, "legsbyexpo":legsbyexpo })
|
#totalsurvexlength = sum([survexleg.tape for survexleg in survexlegs])
|
||||||
|
return render(request, 'experimental.html', { "nsurvexlegs":nimportlegs, "totalsurvexlength":totalsurvexlength, "addupsurvexlength":addupsurvexlength, "legsbyexpo":legsbyexpo })
|
||||||
|
|
||||||
@login_required_if_public
|
@login_required_if_public
|
||||||
def newLogbookEntry(request, expeditionyear, pdate = None, pslug = None):
|
def newLogbookEntry(request, expeditionyear, pdate = None, pslug = None):
|
||||||
|
@ -71,20 +71,6 @@ def controlPanel(request):
|
|||||||
|
|
||||||
return render(request,'controlPanel.html', {'caves':Cave.objects.all(),'expeditions':Expedition.objects.all(),'jobs_completed':jobs_completed})
|
return render(request,'controlPanel.html', {'caves':Cave.objects.all(),'expeditions':Expedition.objects.all(),'jobs_completed':jobs_completed})
|
||||||
|
|
||||||
def downloadCavetab(request):
|
|
||||||
from export import tocavetab
|
|
||||||
response = HttpResponse(content_type='text/csv')
|
|
||||||
response['Content-Disposition'] = 'attachment; filename=CAVETAB2.CSV'
|
|
||||||
tocavetab.writeCaveTab(response)
|
|
||||||
return response
|
|
||||||
|
|
||||||
def downloadSurveys(request):
|
|
||||||
from export import tosurveys
|
|
||||||
response = HttpResponse(content_type='text/csv')
|
|
||||||
response['Content-Disposition'] = 'attachment; filename=Surveys.csv'
|
|
||||||
tosurveys.writeCaveTab(response)
|
|
||||||
return response
|
|
||||||
|
|
||||||
def downloadLogbook(request,year=None,extension=None,queryset=None):
|
def downloadLogbook(request,year=None,extension=None,queryset=None):
|
||||||
|
|
||||||
if year:
|
if year:
|
||||||
|
132
core/views_survex.py
Normal file → Executable file
132
core/views_survex.py
Normal file → Executable file
@ -15,47 +15,76 @@ from parsers.people import GetPersonExpeditionNameLookup
|
|||||||
import troggle.settings as settings
|
import troggle.settings as settings
|
||||||
import parsers.survex
|
import parsers.survex
|
||||||
|
|
||||||
survextemplatefile = """; Locn: Totes Gebirge, Austria - Loser/Augst-Eck Plateau (kataster group 1623)
|
survextemplatefile = """; *** THIS IS A TEMPLATE FILE NOT WHAT YOU MIGHT BE EXPECTING ***
|
||||||
; Cave:
|
|
||||||
|
*** DO NOT SAVE THIS FILE WITHOUT RENAMING IT !! ***
|
||||||
|
;[Stuff in square brackets is example text to be replaced with real data,
|
||||||
|
; removing the square brackets]
|
||||||
|
|
||||||
*begin [surveyname]
|
*begin [surveyname]
|
||||||
|
|
||||||
*export [connecting stations]
|
; stations linked into other surveys (or likely to)
|
||||||
|
*export [1 8 12 34]
|
||||||
|
|
||||||
*title "area title"
|
; Cave:
|
||||||
*date 2999.99.99
|
; Area in cave/QM:
|
||||||
*team Insts [Caver]
|
*title ""
|
||||||
*team Insts [Caver]
|
*date [2040.07.04] ; <-- CHANGE THIS DATE
|
||||||
*team Notes [Caver]
|
*team Insts [Fred Fossa]
|
||||||
*instrument [set number]
|
*team Notes [Brenda Badger]
|
||||||
|
*team Pics [Luke Lynx]
|
||||||
;ref.: 2009#NN
|
*team Tape [Albert Aadvark]
|
||||||
|
*instrument [SAP #+Laser Tape/DistoX/Compass # ; Clino #]
|
||||||
|
; Calibration: [Where, readings]
|
||||||
|
*ref [2040#00] ; <-- CHANGE THIS TOO
|
||||||
|
; the #number is on the clear pocket containing the original notes
|
||||||
|
|
||||||
|
; if using a tape:
|
||||||
*calibrate tape +0.0 ; +ve if tape was too short, -ve if too long
|
*calibrate tape +0.0 ; +ve if tape was too short, -ve if too long
|
||||||
|
|
||||||
*data normal from to tape compass clino
|
; Centreline data
|
||||||
1 2 3.90 298 -20
|
*data normal from to length bearing gradient ignoreall
|
||||||
|
[ 1 2 5.57 034.5 -12.8 ]
|
||||||
|
|
||||||
*data passage station left right up down ignoreall
|
;-----------
|
||||||
1 [L] [R] [U] [D] comment
|
;recorded station details (leave commented out)
|
||||||
|
;(NP=Nail Polish, LHW/RHW=Left/Right Hand Wall)
|
||||||
*end [surveyname]"""
|
;Station Left Right Up Down Description
|
||||||
|
;[Red] nail varnish markings
|
||||||
|
[;1 0.8 0 5.3 1.6 ; NP on boulder. pt 23 on foo survey ]
|
||||||
|
[;2 0.3 1.2 6 1.2 ; NP '2' LHW ]
|
||||||
|
[;3 1.3 0 3.4 0.2 ; Rock on floor - not refindable ]
|
||||||
|
|
||||||
|
|
||||||
def ReplaceTabs(stext):
|
;LRUDs arranged into passage tubes
|
||||||
res = [ ]
|
;new *data command for each 'passage',
|
||||||
nsl = 0
|
;repeat stations and adjust numbers as needed
|
||||||
for s in re.split("(\t|\n)", stext):
|
*data passage station left right up down
|
||||||
if s == "\t":
|
;[ 1 0.8 0 5.3 1.6 ]
|
||||||
res.append(" " * (4 - (nsl % 4)))
|
;[ 2 0.3 1.2 6 1.2 ]
|
||||||
nsl = 0
|
*data passage station left right up down
|
||||||
continue
|
;[ 1 1.3 1.5 5.3 1.6 ]
|
||||||
if s == "\n":
|
;[ 3 2.4 0 3.4 0.2 ]
|
||||||
nsl = 0
|
|
||||||
else:
|
|
||||||
nsl += len(s)
|
;-----------
|
||||||
res.append(s)
|
;Question Mark List ;(leave commented-out)
|
||||||
return "".join(res)
|
; The nearest-station is the name of the survey and station which are nearest to
|
||||||
|
; the QM. The resolution-station is either '-' to indicate that the QM hasn't
|
||||||
|
; been checked; or the name of the survey and station which push that QM. If a
|
||||||
|
; QM doesn't go anywhere, set the resolution-station to be the same as the
|
||||||
|
; nearest-station. Include any relevant details of how to find or push the QM in
|
||||||
|
; the textual description.
|
||||||
|
;Serial number grade(A/B/C/X) nearest-station resolution-station description
|
||||||
|
;[ QM1 A surveyname.3 - description of QM ]
|
||||||
|
;[ QM2 B surveyname.5 - description of QM ]
|
||||||
|
|
||||||
|
;------------
|
||||||
|
;Cave description ;(leave commented-out)
|
||||||
|
;freeform text describing this section of the cave
|
||||||
|
|
||||||
|
*end [surveyname]
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
class SvxForm(forms.Form):
|
class SvxForm(forms.Form):
|
||||||
@ -63,15 +92,14 @@ class SvxForm(forms.Form):
|
|||||||
filename = forms.CharField(widget=forms.TextInput(attrs={"readonly":True}))
|
filename = forms.CharField(widget=forms.TextInput(attrs={"readonly":True}))
|
||||||
datetime = forms.DateTimeField(widget=forms.TextInput(attrs={"readonly":True}))
|
datetime = forms.DateTimeField(widget=forms.TextInput(attrs={"readonly":True}))
|
||||||
outputtype = forms.CharField(widget=forms.TextInput(attrs={"readonly":True}))
|
outputtype = forms.CharField(widget=forms.TextInput(attrs={"readonly":True}))
|
||||||
code = forms.CharField(widget=forms.Textarea(attrs={"cols":150, "rows":18}))
|
code = forms.CharField(widget=forms.Textarea(attrs={"cols":150, "rows":36}))
|
||||||
|
|
||||||
def GetDiscCode(self):
|
def GetDiscCode(self):
|
||||||
fname = settings.SURVEX_DATA + self.data['filename'] + ".svx"
|
fname = settings.SURVEX_DATA + self.data['filename'] + ".svx"
|
||||||
if not os.path.isfile(fname):
|
if not os.path.isfile(fname):
|
||||||
return survextemplatefile
|
return survextemplatefile
|
||||||
fin = open(fname, "rb")
|
fin = open(fname, "rt")
|
||||||
svxtext = fin.read().decode("latin1") # unicode(a, "latin1")
|
svxtext = fin.read().encode("utf8")
|
||||||
svxtext = ReplaceTabs(svxtext).strip()
|
|
||||||
fin.close()
|
fin.close()
|
||||||
return svxtext
|
return svxtext
|
||||||
|
|
||||||
@ -84,19 +112,28 @@ class SvxForm(forms.Form):
|
|||||||
def SaveCode(self, rcode):
|
def SaveCode(self, rcode):
|
||||||
fname = settings.SURVEX_DATA + self.data['filename'] + ".svx"
|
fname = settings.SURVEX_DATA + self.data['filename'] + ".svx"
|
||||||
if not os.path.isfile(fname):
|
if not os.path.isfile(fname):
|
||||||
# only save if appears valid
|
|
||||||
if re.search(r"\[|\]", rcode):
|
if re.search(r"\[|\]", rcode):
|
||||||
return "Error: clean up all []s from the text"
|
return "Error: remove all []s from the text. They are only template guidance."
|
||||||
mbeginend = re.search(r"(?s)\*begin\s+(\w+).*?\*end\s+(\w+)", rcode)
|
mbeginend = re.search(r"(?s)\*begin\s+(\w+).*?\*end\s+(\w+)", rcode)
|
||||||
if not mbeginend:
|
if not mbeginend:
|
||||||
return "Error: no begin/end block here"
|
return "Error: no begin/end block here"
|
||||||
if mbeginend.group(1) != mbeginend.group(2):
|
if mbeginend.group(1) != mbeginend.group(2):
|
||||||
return "Error: mismatching beginend"
|
return "Error: mismatching begin/end labels"
|
||||||
|
|
||||||
fout = open(fname, "w")
|
# Make this create new survex folders if needed
|
||||||
res = fout.write(rcode.encode("latin1"))
|
try:
|
||||||
|
fout = open(fname, "wb")
|
||||||
|
except IOError:
|
||||||
|
pth = os.path.dirname(self.data['filename'])
|
||||||
|
newpath = os.path.join(settings.SURVEX_DATA, pth)
|
||||||
|
if not os.path.exists(newpath):
|
||||||
|
os.makedirs(newpath)
|
||||||
|
fout = open(fname, "wb")
|
||||||
|
|
||||||
|
# javascript seems to insert CRLF on WSL1 whatever you say. So fix that:
|
||||||
|
res = fout.write(rcode.replace("\r",""))
|
||||||
fout.close()
|
fout.close()
|
||||||
return "SAVED"
|
return "SAVED ."
|
||||||
|
|
||||||
def Process(self):
|
def Process(self):
|
||||||
print("....\n\n\n....Processing\n\n\n")
|
print("....\n\n\n....Processing\n\n\n")
|
||||||
@ -104,7 +141,7 @@ class SvxForm(forms.Form):
|
|||||||
os.chdir(os.path.split(settings.SURVEX_DATA + self.data['filename'])[0])
|
os.chdir(os.path.split(settings.SURVEX_DATA + self.data['filename'])[0])
|
||||||
os.system(settings.CAVERN + " --log " + settings.SURVEX_DATA + self.data['filename'] + ".svx")
|
os.system(settings.CAVERN + " --log " + settings.SURVEX_DATA + self.data['filename'] + ".svx")
|
||||||
os.chdir(cwd)
|
os.chdir(cwd)
|
||||||
fin = open(settings.SURVEX_DATA + self.data['filename'] + ".log", "rb")
|
fin = open(settings.SURVEX_DATA + self.data['filename'] + ".log", "rt")
|
||||||
log = fin.read()
|
log = fin.read()
|
||||||
fin.close()
|
fin.close()
|
||||||
log = re.sub("(?s).*?(Survey contains)", "\\1", log)
|
log = re.sub("(?s).*?(Survey contains)", "\\1", log)
|
||||||
@ -144,7 +181,6 @@ def svx(request, survex_file):
|
|||||||
form.data['code'] = rcode
|
form.data['code'] = rcode
|
||||||
if "save" in rform.data:
|
if "save" in rform.data:
|
||||||
if request.user.is_authenticated():
|
if request.user.is_authenticated():
|
||||||
#print("sssavvving")
|
|
||||||
message = form.SaveCode(rcode)
|
message = form.SaveCode(rcode)
|
||||||
else:
|
else:
|
||||||
message = "You do not have authority to save this file"
|
message = "You do not have authority to save this file"
|
||||||
@ -179,7 +215,7 @@ def svx(request, survex_file):
|
|||||||
return render_to_response('svxfile.html', vmap)
|
return render_to_response('svxfile.html', vmap)
|
||||||
|
|
||||||
def svxraw(request, survex_file):
|
def svxraw(request, survex_file):
|
||||||
svx = open(os.path.join(settings.SURVEX_DATA, survex_file+".svx"), "rb")
|
svx = open(os.path.join(settings.SURVEX_DATA, survex_file+".svx"), "rt",encoding='utf8')
|
||||||
return HttpResponse(svx, content_type="text")
|
return HttpResponse(svx, content_type="text")
|
||||||
|
|
||||||
|
|
||||||
@ -194,20 +230,20 @@ def process(survex_file):
|
|||||||
def threed(request, survex_file):
|
def threed(request, survex_file):
|
||||||
process(survex_file)
|
process(survex_file)
|
||||||
try:
|
try:
|
||||||
threed = open(settings.SURVEX_DATA + survex_file + ".3d", "rb")
|
threed = open(settings.SURVEX_DATA + survex_file + ".3d", "rt",encoding='utf8')
|
||||||
return HttpResponse(threed, content_type="model/3d")
|
return HttpResponse(threed, content_type="model/3d")
|
||||||
except:
|
except:
|
||||||
log = open(settings.SURVEX_DATA + survex_file + ".log", "rb")
|
log = open(settings.SURVEX_DATA + survex_file + ".log", "rt",encoding='utf8')
|
||||||
return HttpResponse(log, content_type="text")
|
return HttpResponse(log, content_type="text")
|
||||||
|
|
||||||
def log(request, survex_file):
|
def log(request, survex_file):
|
||||||
process(survex_file)
|
process(survex_file)
|
||||||
log = open(settings.SURVEX_DATA + survex_file + ".log", "rb")
|
log = open(settings.SURVEX_DATA + survex_file + ".log", "rt",encoding='utf8')
|
||||||
return HttpResponse(log, content_type="text")
|
return HttpResponse(log, content_type="text")
|
||||||
|
|
||||||
def err(request, survex_file):
|
def err(request, survex_file):
|
||||||
process(survex_file)
|
process(survex_file)
|
||||||
err = open(settings.SURVEX_DATA + survex_file + ".err", "rb")
|
err = open(settings.SURVEX_DATA + survex_file + ".err", "rt",encoding='utf8')
|
||||||
return HttpResponse(err, content_type="text")
|
return HttpResponse(err, content_type="text")
|
||||||
|
|
||||||
|
|
||||||
|
51
databaseReset.py
Normal file → Executable file
51
databaseReset.py
Normal file → Executable file
@ -6,6 +6,10 @@ import timeit
|
|||||||
import json
|
import json
|
||||||
|
|
||||||
import settings
|
import settings
|
||||||
|
if os.geteuid() == 0:
|
||||||
|
print("This script should be run as expo not root - quitting")
|
||||||
|
exit()
|
||||||
|
|
||||||
os.environ['PYTHONPATH'] = settings.PYTHON_PATH
|
os.environ['PYTHONPATH'] = settings.PYTHON_PATH
|
||||||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
|
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
|
||||||
|
|
||||||
@ -61,9 +65,9 @@ def dirsredirect():
|
|||||||
#should also deal with permissions here.
|
#should also deal with permissions here.
|
||||||
#if not os.path.isdir(settings.PHOTOS_ROOT):
|
#if not os.path.isdir(settings.PHOTOS_ROOT):
|
||||||
#os.mkdir(settings.PHOTOS_ROOT)
|
#os.mkdir(settings.PHOTOS_ROOT)
|
||||||
for oldURL, newURL in [("indxal.htm", reverse("caveindex"))]:
|
# for oldURL, newURL in [("indxal.htm", reverse("caveindex"))]:
|
||||||
f = troggle.flatpages.models.Redirect(originalURL = oldURL, newURL = newURL)
|
# f = troggle.flatpages.models.Redirect(originalURL = oldURL, newURL = newURL)
|
||||||
f.save()
|
# f.save()
|
||||||
|
|
||||||
def import_caves():
|
def import_caves():
|
||||||
import troggle.parsers.caves
|
import troggle.parsers.caves
|
||||||
@ -118,6 +122,14 @@ def import_tunnelfiles():
|
|||||||
#import logbooksdump
|
#import logbooksdump
|
||||||
#def import_auto_logbooks():
|
#def import_auto_logbooks():
|
||||||
#def dumplogbooks():
|
#def dumplogbooks():
|
||||||
|
|
||||||
|
#def writeCaves():
|
||||||
|
# Writes out all cave and entrance HTML files to
|
||||||
|
# folder specified in settings.CAVEDESCRIPTIONS
|
||||||
|
# for cave in Cave.objects.all():
|
||||||
|
# cave.writeDataFile()
|
||||||
|
# for entrance in Entrance.objects.all():
|
||||||
|
# entrance.writeDataFile()
|
||||||
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
|
||||||
class JobQueue():
|
class JobQueue():
|
||||||
@ -130,8 +142,8 @@ class JobQueue():
|
|||||||
self.results = {}
|
self.results = {}
|
||||||
self.results_order=[
|
self.results_order=[
|
||||||
"date","runlabel","reinit", "caves", "people",
|
"date","runlabel","reinit", "caves", "people",
|
||||||
"logbooks", "QMs", "survexblks", "survexpos",
|
"logbooks", "QMs", "scans", "survexblks", "survexpos",
|
||||||
"tunnel", "scans", "surveyimgs", "test", "dirsredirect", "syncuser" ]
|
"tunnel", "surveyimgs", "test", "dirsredirect", "syncuser" ]
|
||||||
for k in self.results_order:
|
for k in self.results_order:
|
||||||
self.results[k]=[]
|
self.results[k]=[]
|
||||||
self.tfile = "import_profile.json"
|
self.tfile = "import_profile.json"
|
||||||
@ -309,6 +321,8 @@ class JobQueue():
|
|||||||
for k in self.results_order:
|
for k in self.results_order:
|
||||||
if k =="dirsredirect":
|
if k =="dirsredirect":
|
||||||
break
|
break
|
||||||
|
if k =="surveyimgs":
|
||||||
|
break
|
||||||
elif k =="syncuser":
|
elif k =="syncuser":
|
||||||
break
|
break
|
||||||
elif k =="test":
|
elif k =="test":
|
||||||
@ -360,17 +374,15 @@ def usage():
|
|||||||
profile - print the profile from previous runs. Import nothing.
|
profile - print the profile from previous runs. Import nothing.
|
||||||
|
|
||||||
reset - normal usage: clear database and reread everything from files - time-consuming
|
reset - normal usage: clear database and reread everything from files - time-consuming
|
||||||
caves - read in the caves
|
caves - read in the caves (must run first after reset)
|
||||||
|
people - read in the people from folk.csv (must run before logbooks)
|
||||||
logbooks - read in the logbooks
|
logbooks - read in the logbooks
|
||||||
people - read in the people from folk.csv
|
|
||||||
QMs - read in the QM csv files (older caves only)
|
QMs - read in the QM csv files (older caves only)
|
||||||
scans - the survey scans in all the wallets
|
scans - the survey scans in all the wallets (must run before survex)
|
||||||
survex - read in the survex files - all the survex blocks but not the x/y/z positions
|
survex - read in the survex files - all the survex blocks but not the x/y/z positions
|
||||||
survexpos - just the x/y/z Pos out of the survex files
|
survexpos - set the x/y/z positions for entrances and fixed points
|
||||||
survexall - both survex and survexpos
|
|
||||||
|
|
||||||
tunnel - read in the Tunnel files - which scans the survey scans too
|
tunnel - read in the Tunnel files - which scans the survey scans too
|
||||||
drawings - Tunnel, QMs, scans
|
|
||||||
|
|
||||||
reinit - clear database (delete everything) and make empty tables. Import nothing.
|
reinit - clear database (delete everything) and make empty tables. Import nothing.
|
||||||
syncuser - needed after reloading database from SQL backup
|
syncuser - needed after reloading database from SQL backup
|
||||||
@ -384,6 +396,8 @@ def usage():
|
|||||||
|
|
||||||
caves and logbooks must be run on an empty db before the others as they
|
caves and logbooks must be run on an empty db before the others as they
|
||||||
set up db tables used by the others.
|
set up db tables used by the others.
|
||||||
|
|
||||||
|
the in-memory phase is on an empty db, so always runs reinit, caves & people for this phase
|
||||||
""")
|
""")
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
@ -413,8 +427,6 @@ if __name__ == "__main__":
|
|||||||
jq.enq("people",import_people)
|
jq.enq("people",import_people)
|
||||||
elif "QMs" in sys.argv:
|
elif "QMs" in sys.argv:
|
||||||
jq.enq("QMs",import_QMs)
|
jq.enq("QMs",import_QMs)
|
||||||
elif "reinit" in sys.argv:
|
|
||||||
jq.enq("reinit",reinit_db)
|
|
||||||
elif "reset" in sys.argv:
|
elif "reset" in sys.argv:
|
||||||
jq.enq("reinit",reinit_db)
|
jq.enq("reinit",reinit_db)
|
||||||
jq.enq("dirsredirect",dirsredirect)
|
jq.enq("dirsredirect",dirsredirect)
|
||||||
@ -423,9 +435,9 @@ if __name__ == "__main__":
|
|||||||
jq.enq("scans",import_surveyscans)
|
jq.enq("scans",import_surveyscans)
|
||||||
jq.enq("logbooks",import_logbooks)
|
jq.enq("logbooks",import_logbooks)
|
||||||
jq.enq("QMs",import_QMs)
|
jq.enq("QMs",import_QMs)
|
||||||
jq.enq("survexblks",import_survexblks)
|
|
||||||
jq.enq("survexpos",import_survexpos)
|
|
||||||
jq.enq("tunnel",import_tunnelfiles)
|
jq.enq("tunnel",import_tunnelfiles)
|
||||||
|
#jq.enq("survexblks",import_survexblks)
|
||||||
|
#jq.enq("survexpos",import_survexpos)
|
||||||
elif "scans" in sys.argv:
|
elif "scans" in sys.argv:
|
||||||
jq.enq("scans",import_surveyscans)
|
jq.enq("scans",import_surveyscans)
|
||||||
elif "survex" in sys.argv:
|
elif "survex" in sys.argv:
|
||||||
@ -434,19 +446,14 @@ if __name__ == "__main__":
|
|||||||
jq.enq("survexpos",import_survexpos)
|
jq.enq("survexpos",import_survexpos)
|
||||||
elif "tunnel" in sys.argv:
|
elif "tunnel" in sys.argv:
|
||||||
jq.enq("tunnel",import_tunnelfiles)
|
jq.enq("tunnel",import_tunnelfiles)
|
||||||
elif "survexall" in sys.argv:
|
|
||||||
jq.enq("survexblks",import_survexblks)
|
|
||||||
jq.enq("survexpos",import_survexpos)
|
|
||||||
elif "drawings" in sys.argv:
|
|
||||||
jq.enq("QMs",import_QMs)
|
|
||||||
jq.enq("scans",import_surveyscans)
|
|
||||||
jq.enq("tunnel",import_tunnelfiles)
|
|
||||||
elif "surveyimgs" in sys.argv:
|
elif "surveyimgs" in sys.argv:
|
||||||
jq.enq("surveyimgs",import_surveyimgs) # imports into tables which are never read
|
jq.enq("surveyimgs",import_surveyimgs) # imports into tables which are never read
|
||||||
elif "autologbooks" in sys.argv: # untested in 2020
|
elif "autologbooks" in sys.argv: # untested in 2020
|
||||||
import_auto_logbooks()
|
import_auto_logbooks()
|
||||||
elif "dumplogbooks" in sys.argv: # untested in 2020
|
elif "dumplogbooks" in sys.argv: # untested in 2020
|
||||||
dumplogbooks()
|
dumplogbooks()
|
||||||
|
# elif "writecaves" in sys.argv: # untested in 2020 - will overwrite input files!!
|
||||||
|
# writeCaves()
|
||||||
elif "profile" in sys.argv:
|
elif "profile" in sys.argv:
|
||||||
jq.loadprofiles()
|
jq.loadprofiles()
|
||||||
jq.showprofile()
|
jq.showprofile()
|
||||||
|
@ -1,38 +0,0 @@
|
|||||||
from django.db.models.loading import cache
|
|
||||||
from django.core.management.base import BaseCommand, CommandError
|
|
||||||
from optparse import make_option
|
|
||||||
from imagekit.models import ImageModel
|
|
||||||
from imagekit.specs import ImageSpec
|
|
||||||
|
|
||||||
|
|
||||||
class Command(BaseCommand):
|
|
||||||
help = ('Clears all ImageKit cached files.')
|
|
||||||
args = '[apps]'
|
|
||||||
requires_model_validation = True
|
|
||||||
can_import_settings = True
|
|
||||||
|
|
||||||
def handle(self, *args, **options):
|
|
||||||
return flush_cache(args, options)
|
|
||||||
|
|
||||||
def flush_cache(apps, options):
|
|
||||||
""" Clears the image cache
|
|
||||||
|
|
||||||
"""
|
|
||||||
apps = [a.strip(',') for a in apps]
|
|
||||||
if apps:
|
|
||||||
print 'Flushing cache for %s...' % ', '.join(apps)
|
|
||||||
else:
|
|
||||||
print 'Flushing caches...'
|
|
||||||
|
|
||||||
for app_label in apps:
|
|
||||||
app = cache.get_app(app_label)
|
|
||||||
models = [m for m in cache.get_models(app) if issubclass(m, ImageModel)]
|
|
||||||
|
|
||||||
for model in models:
|
|
||||||
for obj in model.objects.all():
|
|
||||||
for spec in model._ik.specs:
|
|
||||||
prop = getattr(obj, spec.name(), None)
|
|
||||||
if prop is not None:
|
|
||||||
prop._delete()
|
|
||||||
if spec.pre_cache:
|
|
||||||
prop._create()
|
|
@ -1,136 +0,0 @@
|
|||||||
import os
|
|
||||||
from datetime import datetime
|
|
||||||
from django.conf import settings
|
|
||||||
from django.core.files.base import ContentFile
|
|
||||||
from django.db import models
|
|
||||||
from django.db.models.base import ModelBase
|
|
||||||
from django.utils.translation import ugettext_lazy as _
|
|
||||||
|
|
||||||
from imagekit import specs
|
|
||||||
from imagekit.lib import *
|
|
||||||
from imagekit.options import Options
|
|
||||||
from imagekit.utils import img_to_fobj
|
|
||||||
|
|
||||||
# Modify image file buffer size.
|
|
||||||
ImageFile.MAXBLOCK = getattr(settings, 'PIL_IMAGEFILE_MAXBLOCK', 256 * 2 ** 10)
|
|
||||||
|
|
||||||
# Choice tuples for specifying the crop origin.
|
|
||||||
# These are provided for convenience.
|
|
||||||
CROP_HORZ_CHOICES = (
|
|
||||||
(0, _('left')),
|
|
||||||
(1, _('center')),
|
|
||||||
(2, _('right')),
|
|
||||||
)
|
|
||||||
|
|
||||||
CROP_VERT_CHOICES = (
|
|
||||||
(0, _('top')),
|
|
||||||
(1, _('center')),
|
|
||||||
(2, _('bottom')),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class ImageModelBase(ModelBase):
|
|
||||||
""" ImageModel metaclass
|
|
||||||
|
|
||||||
This metaclass parses IKOptions and loads the specified specification
|
|
||||||
module.
|
|
||||||
|
|
||||||
"""
|
|
||||||
def __init__(cls, name, bases, attrs):
|
|
||||||
parents = [b for b in bases if isinstance(b, ImageModelBase)]
|
|
||||||
if not parents:
|
|
||||||
return
|
|
||||||
user_opts = getattr(cls, 'IKOptions', None)
|
|
||||||
opts = Options(user_opts)
|
|
||||||
try:
|
|
||||||
module = __import__(opts.spec_module, {}, {}, [''])
|
|
||||||
except ImportError:
|
|
||||||
raise ImportError('Unable to load imagekit config module: %s' % \
|
|
||||||
opts.spec_module)
|
|
||||||
for spec in [spec for spec in module.__dict__.values() \
|
|
||||||
if isinstance(spec, type) \
|
|
||||||
and issubclass(spec, specs.ImageSpec) \
|
|
||||||
and spec != specs.ImageSpec]:
|
|
||||||
setattr(cls, spec.name(), specs.Descriptor(spec))
|
|
||||||
opts.specs.append(spec)
|
|
||||||
setattr(cls, '_ik', opts)
|
|
||||||
|
|
||||||
|
|
||||||
class ImageModel(models.Model):
|
|
||||||
""" Abstract base class implementing all core ImageKit functionality
|
|
||||||
|
|
||||||
Subclasses of ImageModel are augmented with accessors for each defined
|
|
||||||
image specification and can override the inner IKOptions class to customize
|
|
||||||
storage locations and other options.
|
|
||||||
|
|
||||||
"""
|
|
||||||
__metaclass__ = ImageModelBase
|
|
||||||
|
|
||||||
class Meta:
|
|
||||||
abstract = True
|
|
||||||
|
|
||||||
class IKOptions:
|
|
||||||
pass
|
|
||||||
|
|
||||||
def admin_thumbnail_view(self):
|
|
||||||
if not self._imgfield:
|
|
||||||
return None
|
|
||||||
prop = getattr(self, self._ik.admin_thumbnail_spec, None)
|
|
||||||
if prop is None:
|
|
||||||
return 'An "%s" image spec has not been defined.' % \
|
|
||||||
self._ik.admin_thumbnail_spec
|
|
||||||
else:
|
|
||||||
if hasattr(self, 'get_absolute_url'):
|
|
||||||
return u'<a href="%s"><img src="%s"></a>' % \
|
|
||||||
(self.get_absolute_url(), prop.url)
|
|
||||||
else:
|
|
||||||
return u'<a href="%s"><img src="%s"></a>' % \
|
|
||||||
(self._imgfield.url, prop.url)
|
|
||||||
admin_thumbnail_view.short_description = _('Thumbnail')
|
|
||||||
admin_thumbnail_view.allow_tags = True
|
|
||||||
|
|
||||||
@property
|
|
||||||
def _imgfield(self):
|
|
||||||
return getattr(self, self._ik.image_field)
|
|
||||||
|
|
||||||
def _clear_cache(self):
|
|
||||||
for spec in self._ik.specs:
|
|
||||||
prop = getattr(self, spec.name())
|
|
||||||
prop._delete()
|
|
||||||
|
|
||||||
def _pre_cache(self):
|
|
||||||
for spec in self._ik.specs:
|
|
||||||
if spec.pre_cache:
|
|
||||||
prop = getattr(self, spec.name())
|
|
||||||
prop._create()
|
|
||||||
|
|
||||||
def save(self, clear_cache=True, *args, **kwargs):
|
|
||||||
is_new_object = self._get_pk_val is None
|
|
||||||
super(ImageModel, self).save(*args, **kwargs)
|
|
||||||
if is_new_object:
|
|
||||||
clear_cache = False
|
|
||||||
spec = self._ik.preprocessor_spec
|
|
||||||
if spec is not None:
|
|
||||||
newfile = self._imgfield.storage.open(str(self._imgfield))
|
|
||||||
img = Image.open(newfile)
|
|
||||||
img = spec.process(img, None)
|
|
||||||
format = img.format or 'JPEG'
|
|
||||||
if format != 'JPEG':
|
|
||||||
imgfile = img_to_fobj(img, format)
|
|
||||||
else:
|
|
||||||
imgfile = img_to_fobj(img, format,
|
|
||||||
quality=int(spec.quality),
|
|
||||||
optimize=True)
|
|
||||||
content = ContentFile(imgfile.read())
|
|
||||||
newfile.close()
|
|
||||||
name = str(self._imgfield)
|
|
||||||
self._imgfield.storage.delete(name)
|
|
||||||
self._imgfield.storage.save(name, content)
|
|
||||||
if clear_cache and self._imgfield != '':
|
|
||||||
self._clear_cache()
|
|
||||||
self._pre_cache()
|
|
||||||
|
|
||||||
def delete(self):
|
|
||||||
assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." % (self._meta.object_name, self._meta.pk.attname)
|
|
||||||
self._clear_cache()
|
|
||||||
models.Model.delete(self)
|
|
@ -1,23 +0,0 @@
|
|||||||
# Imagekit options
|
|
||||||
from imagekit import processors
|
|
||||||
from imagekit.specs import ImageSpec
|
|
||||||
|
|
||||||
|
|
||||||
class Options(object):
|
|
||||||
""" Class handling per-model imagekit options
|
|
||||||
|
|
||||||
"""
|
|
||||||
image_field = 'image'
|
|
||||||
crop_horz_field = 'crop_horz'
|
|
||||||
crop_vert_field = 'crop_vert'
|
|
||||||
preprocessor_spec = None
|
|
||||||
cache_dir = 'cache'
|
|
||||||
save_count_as = None
|
|
||||||
cache_filename_format = "%(filename)s_%(specname)s.%(extension)s"
|
|
||||||
admin_thumbnail_spec = 'admin_thumbnail'
|
|
||||||
spec_module = 'imagekit.defaults'
|
|
||||||
|
|
||||||
def __init__(self, opts):
|
|
||||||
for key, value in opts.__dict__.iteritems():
|
|
||||||
setattr(self, key, value)
|
|
||||||
self.specs = []
|
|
@ -1,119 +0,0 @@
|
|||||||
""" ImageKit image specifications
|
|
||||||
|
|
||||||
All imagekit specifications must inherit from the ImageSpec class. Models
|
|
||||||
inheriting from ImageModel will be modified with a descriptor/accessor for each
|
|
||||||
spec found.
|
|
||||||
|
|
||||||
"""
|
|
||||||
import os
|
|
||||||
from StringIO import StringIO
|
|
||||||
from imagekit.lib import *
|
|
||||||
from imagekit.utils import img_to_fobj
|
|
||||||
from django.core.files.base import ContentFile
|
|
||||||
|
|
||||||
class ImageSpec(object):
|
|
||||||
pre_cache = False
|
|
||||||
quality = 70
|
|
||||||
increment_count = False
|
|
||||||
processors = []
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def name(cls):
|
|
||||||
return getattr(cls, 'access_as', cls.__name__.lower())
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def process(cls, image, obj):
|
|
||||||
processed_image = image.copy()
|
|
||||||
for proc in cls.processors:
|
|
||||||
processed_image = proc.process(processed_image, obj)
|
|
||||||
return processed_image
|
|
||||||
|
|
||||||
|
|
||||||
class Accessor(object):
|
|
||||||
def __init__(self, obj, spec):
|
|
||||||
self._img = None
|
|
||||||
self._obj = obj
|
|
||||||
self.spec = spec
|
|
||||||
|
|
||||||
def _get_imgfile(self):
|
|
||||||
format = self._img.format or 'JPEG'
|
|
||||||
if format != 'JPEG':
|
|
||||||
imgfile = img_to_fobj(self._img, format)
|
|
||||||
else:
|
|
||||||
imgfile = img_to_fobj(self._img, format,
|
|
||||||
quality=int(self.spec.quality),
|
|
||||||
optimize=True)
|
|
||||||
return imgfile
|
|
||||||
|
|
||||||
def _create(self):
|
|
||||||
if self._exists():
|
|
||||||
return
|
|
||||||
# process the original image file
|
|
||||||
fp = self._obj._imgfield.storage.open(self._obj._imgfield.name)
|
|
||||||
fp.seek(0)
|
|
||||||
fp = StringIO(fp.read())
|
|
||||||
try:
|
|
||||||
self._img = self.spec.process(Image.open(fp), self._obj)
|
|
||||||
# save the new image to the cache
|
|
||||||
content = ContentFile(self._get_imgfile().read())
|
|
||||||
self._obj._imgfield.storage.save(self.name, content)
|
|
||||||
except IOError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
def _delete(self):
|
|
||||||
self._obj._imgfield.storage.delete(self.name)
|
|
||||||
|
|
||||||
def _exists(self):
|
|
||||||
return self._obj._imgfield.storage.exists(self.name)
|
|
||||||
|
|
||||||
def _basename(self):
|
|
||||||
filename, extension = \
|
|
||||||
os.path.splitext(os.path.basename(self._obj._imgfield.name))
|
|
||||||
return self._obj._ik.cache_filename_format % \
|
|
||||||
{'filename': filename,
|
|
||||||
'specname': self.spec.name(),
|
|
||||||
'extension': extension.lstrip('.')}
|
|
||||||
|
|
||||||
@property
|
|
||||||
def name(self):
|
|
||||||
return os.path.join(self._obj._ik.cache_dir, self._basename())
|
|
||||||
|
|
||||||
@property
|
|
||||||
def url(self):
|
|
||||||
self._create()
|
|
||||||
if self.spec.increment_count:
|
|
||||||
fieldname = self._obj._ik.save_count_as
|
|
||||||
if fieldname is not None:
|
|
||||||
current_count = getattr(self._obj, fieldname)
|
|
||||||
setattr(self._obj, fieldname, current_count + 1)
|
|
||||||
self._obj.save(clear_cache=False)
|
|
||||||
return self._obj._imgfield.storage.url(self.name)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def file(self):
|
|
||||||
self._create()
|
|
||||||
return self._obj._imgfield.storage.open(self.name)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def image(self):
|
|
||||||
if self._img is None:
|
|
||||||
self._create()
|
|
||||||
if self._img is None:
|
|
||||||
self._img = Image.open(self.file)
|
|
||||||
return self._img
|
|
||||||
|
|
||||||
@property
|
|
||||||
def width(self):
|
|
||||||
return self.image.size[0]
|
|
||||||
|
|
||||||
@property
|
|
||||||
def height(self):
|
|
||||||
return self.image.size[1]
|
|
||||||
|
|
||||||
|
|
||||||
class Descriptor(object):
|
|
||||||
def __init__(self, spec):
|
|
||||||
self._spec = spec
|
|
||||||
|
|
||||||
def __get__(self, obj, type=None):
|
|
||||||
return Accessor(obj, self._spec)
|
|
@ -1,86 +0,0 @@
|
|||||||
import os
|
|
||||||
import tempfile
|
|
||||||
import unittest
|
|
||||||
from django.conf import settings
|
|
||||||
from django.core.files.base import ContentFile
|
|
||||||
from django.db import models
|
|
||||||
from django.test import TestCase
|
|
||||||
|
|
||||||
from imagekit import processors
|
|
||||||
from imagekit.models import ImageModel
|
|
||||||
from imagekit.specs import ImageSpec
|
|
||||||
from imagekit.lib import Image
|
|
||||||
|
|
||||||
|
|
||||||
class ResizeToWidth(processors.Resize):
|
|
||||||
width = 100
|
|
||||||
|
|
||||||
class ResizeToHeight(processors.Resize):
|
|
||||||
height = 100
|
|
||||||
|
|
||||||
class ResizeToFit(processors.Resize):
|
|
||||||
width = 100
|
|
||||||
height = 100
|
|
||||||
|
|
||||||
class ResizeCropped(ResizeToFit):
|
|
||||||
crop = ('center', 'center')
|
|
||||||
|
|
||||||
class TestResizeToWidth(ImageSpec):
|
|
||||||
access_as = 'to_width'
|
|
||||||
processors = [ResizeToWidth]
|
|
||||||
|
|
||||||
class TestResizeToHeight(ImageSpec):
|
|
||||||
access_as = 'to_height'
|
|
||||||
processors = [ResizeToHeight]
|
|
||||||
|
|
||||||
class TestResizeCropped(ImageSpec):
|
|
||||||
access_as = 'cropped'
|
|
||||||
processors = [ResizeCropped]
|
|
||||||
|
|
||||||
class TestPhoto(ImageModel):
|
|
||||||
""" Minimal ImageModel class for testing """
|
|
||||||
image = models.ImageField(upload_to='images')
|
|
||||||
|
|
||||||
class IKOptions:
|
|
||||||
spec_module = 'imagekit.tests'
|
|
||||||
|
|
||||||
|
|
||||||
class IKTest(TestCase):
|
|
||||||
""" Base TestCase class """
|
|
||||||
def setUp(self):
|
|
||||||
# create a test image using tempfile and PIL
|
|
||||||
self.tmp = tempfile.TemporaryFile()
|
|
||||||
Image.new('RGB', (800, 600)).save(self.tmp, 'JPEG')
|
|
||||||
self.tmp.seek(0)
|
|
||||||
self.p = TestPhoto()
|
|
||||||
self.p.image.save(os.path.basename('test.jpg'),
|
|
||||||
ContentFile(self.tmp.read()))
|
|
||||||
self.p.save()
|
|
||||||
# destroy temp file
|
|
||||||
self.tmp.close()
|
|
||||||
|
|
||||||
def test_setup(self):
|
|
||||||
self.assertEqual(self.p.image.width, 800)
|
|
||||||
self.assertEqual(self.p.image.height, 600)
|
|
||||||
|
|
||||||
def test_to_width(self):
|
|
||||||
self.assertEqual(self.p.to_width.width, 100)
|
|
||||||
self.assertEqual(self.p.to_width.height, 75)
|
|
||||||
|
|
||||||
def test_to_height(self):
|
|
||||||
self.assertEqual(self.p.to_height.width, 133)
|
|
||||||
self.assertEqual(self.p.to_height.height, 100)
|
|
||||||
|
|
||||||
def test_crop(self):
|
|
||||||
self.assertEqual(self.p.cropped.width, 100)
|
|
||||||
self.assertEqual(self.p.cropped.height, 100)
|
|
||||||
|
|
||||||
def test_url(self):
|
|
||||||
tup = (settings.MEDIA_URL, self.p._ik.cache_dir, 'test_to_width.jpg')
|
|
||||||
self.assertEqual(self.p.to_width.url, "%s%s/%s" % tup)
|
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
# make sure image file is deleted
|
|
||||||
path = self.p.image.path
|
|
||||||
self.p.delete()
|
|
||||||
self.failIf(os.path.isfile(path))
|
|
@ -48,7 +48,7 @@ def parseCaveQMs(cave,inputFile):
|
|||||||
elif cave=='hauch':
|
elif cave=='hauch':
|
||||||
placeholder, hadToCreate = LogbookEntry.objects.get_or_create(date__year=year, title="placeholder for QMs in 234", text="QMs temporarily attached to this should be re-attached to their actual trips", defaults={"date": date(year, 1, 1),"cave":hauchHl})
|
placeholder, hadToCreate = LogbookEntry.objects.get_or_create(date__year=year, title="placeholder for QMs in 234", text="QMs temporarily attached to this should be re-attached to their actual trips", defaults={"date": date(year, 1, 1),"cave":hauchHl})
|
||||||
if hadToCreate:
|
if hadToCreate:
|
||||||
print(cave + " placeholder logbook entry for " + str(year) + " added to database")
|
print((" - placeholder logbook entry for " + cave + " " + str(year) + " added to database"))
|
||||||
QMnum=re.match(r".*?-\d*?-X?(?P<numb>\d*)",line[0]).group("numb")
|
QMnum=re.match(r".*?-\d*?-X?(?P<numb>\d*)",line[0]).group("numb")
|
||||||
newQM = QM()
|
newQM = QM()
|
||||||
newQM.found_by=placeholder
|
newQM.found_by=placeholder
|
||||||
@ -71,9 +71,9 @@ def parseCaveQMs(cave,inputFile):
|
|||||||
if preexistingQM.new_since_parsing==False: #if the pre-existing QM has not been modified, overwrite it
|
if preexistingQM.new_since_parsing==False: #if the pre-existing QM has not been modified, overwrite it
|
||||||
preexistingQM.delete()
|
preexistingQM.delete()
|
||||||
newQM.save()
|
newQM.save()
|
||||||
print("overwriting " + str(preexistingQM) +"\r")
|
#print((" - overwriting " + str(preexistingQM) +"\r"))
|
||||||
else: # otherwise, print that it was ignored
|
else: # otherwise, print that it was ignored
|
||||||
print("preserving " + str(preexistingQM) + ", which was edited in admin \r")
|
print((" - preserving " + str(preexistingQM) + ", which was edited in admin \r"))
|
||||||
|
|
||||||
except QM.DoesNotExist: #if there is no pre-existing QM, save the new one
|
except QM.DoesNotExist: #if there is no pre-existing QM, save the new one
|
||||||
newQM.save()
|
newQM.save()
|
||||||
|
1
parsers/caves.py
Normal file → Executable file
1
parsers/caves.py
Normal file → Executable file
@ -170,6 +170,7 @@ def readcave(filename):
|
|||||||
|
|
||||||
|
|
||||||
def getXML(text, itemname, minItems = 1, maxItems = None, printwarnings = True, context = ""):
|
def getXML(text, itemname, minItems = 1, maxItems = None, printwarnings = True, context = ""):
|
||||||
|
# this next line is where it crashes horribly if a stray umlaut creeps in. Will fix itself in python3
|
||||||
items = re.findall("<%(itemname)s>(.*?)</%(itemname)s>" % {"itemname": itemname}, text, re.S)
|
items = re.findall("<%(itemname)s>(.*?)</%(itemname)s>" % {"itemname": itemname}, text, re.S)
|
||||||
if len(items) < minItems and printwarnings:
|
if len(items) < minItems and printwarnings:
|
||||||
message = " ! %(count)i %(itemname)s found, at least %(min)i expected" % {"count": len(items),
|
message = " ! %(count)i %(itemname)s found, at least %(min)i expected" % {"count": len(items),
|
||||||
|
@ -1,21 +1,20 @@
|
|||||||
#.-*- coding: utf-8 -*-
|
#.-*- coding: utf-8 -*-
|
||||||
|
from __future__ import (absolute_import, division,
|
||||||
from django.conf import settings
|
print_function)
|
||||||
import troggle.core.models as models
|
|
||||||
|
|
||||||
from parsers.people import GetPersonExpeditionNameLookup
|
|
||||||
from parsers.cavetab import GetCaveLookup
|
|
||||||
|
|
||||||
from django.template.defaultfilters import slugify
|
|
||||||
from django.utils.timezone import get_current_timezone
|
|
||||||
from django.utils.timezone import make_aware
|
|
||||||
|
|
||||||
import csv
|
import csv
|
||||||
import re
|
import re
|
||||||
import datetime, time
|
import datetime, time
|
||||||
import os
|
import os
|
||||||
import pickle
|
import pickle
|
||||||
|
|
||||||
|
from django.conf import settings
|
||||||
|
from django.template.defaultfilters import slugify
|
||||||
|
|
||||||
|
|
||||||
|
from troggle.core.models import DataIssue, Expedition
|
||||||
|
import troggle.core.models as models
|
||||||
|
from parsers.people import GetPersonExpeditionNameLookup
|
||||||
|
from parsers.cavetab import GetCaveLookup
|
||||||
from utils import save_carefully
|
from utils import save_carefully
|
||||||
|
|
||||||
#
|
#
|
||||||
@ -92,7 +91,7 @@ def EnterLogIntoDbase(date, place, title, text, trippeople, expedition, logtime_
|
|||||||
|
|
||||||
trippersons, author = GetTripPersons(trippeople, expedition, logtime_underground)
|
trippersons, author = GetTripPersons(trippeople, expedition, logtime_underground)
|
||||||
if not author:
|
if not author:
|
||||||
print(" - Skipping logentry: " + title + " - no author for entry")
|
print(" * Skipping logentry: " + title + " - no author for entry")
|
||||||
message = "Skipping logentry: %s - no author for entry in year '%s'" % (title, expedition.year)
|
message = "Skipping logentry: %s - no author for entry in year '%s'" % (title, expedition.year)
|
||||||
models.DataIssue.objects.create(parser='logbooks', message=message)
|
models.DataIssue.objects.create(parser='logbooks', message=message)
|
||||||
return
|
return
|
||||||
@ -135,7 +134,6 @@ def Parselogwikitxt(year, expedition, txt):
|
|||||||
trippara = re.findall(r"===(.*?)===([\s\S]*?)(?====)", txt)
|
trippara = re.findall(r"===(.*?)===([\s\S]*?)(?====)", txt)
|
||||||
for triphead, triptext in trippara:
|
for triphead, triptext in trippara:
|
||||||
tripheadp = triphead.split("|")
|
tripheadp = triphead.split("|")
|
||||||
#print "ttt", tripheadp
|
|
||||||
assert len(tripheadp) == 3, (tripheadp, triptext)
|
assert len(tripheadp) == 3, (tripheadp, triptext)
|
||||||
tripdate, tripplace, trippeople = tripheadp
|
tripdate, tripplace, trippeople = tripheadp
|
||||||
tripsplace = tripplace.split(" - ")
|
tripsplace = tripplace.split(" - ")
|
||||||
@ -143,19 +141,14 @@ def Parselogwikitxt(year, expedition, txt):
|
|||||||
|
|
||||||
tul = re.findall(r"T/?U:?\s*(\d+(?:\.\d*)?|unknown)\s*(hrs|hours)?", triptext)
|
tul = re.findall(r"T/?U:?\s*(\d+(?:\.\d*)?|unknown)\s*(hrs|hours)?", triptext)
|
||||||
if tul:
|
if tul:
|
||||||
#assert len(tul) <= 1, (triphead, triptext)
|
|
||||||
#assert tul[0][1] in ["hrs", "hours"], (triphead, triptext)
|
|
||||||
tu = tul[0][0]
|
tu = tul[0][0]
|
||||||
else:
|
else:
|
||||||
tu = ""
|
tu = ""
|
||||||
#assert tripcave == "Journey", (triphead, triptext)
|
|
||||||
|
|
||||||
#print tripdate
|
|
||||||
ldate = ParseDate(tripdate.strip(), year)
|
ldate = ParseDate(tripdate.strip(), year)
|
||||||
#print "\n", tripcave, "--- ppp", trippeople, len(triptext)
|
|
||||||
EnterLogIntoDbase(date = ldate, place = tripcave, title = tripplace, text = triptext, trippeople=trippeople, expedition=expedition, logtime_underground=0)
|
EnterLogIntoDbase(date = ldate, place = tripcave, title = tripplace, text = triptext, trippeople=trippeople, expedition=expedition, logtime_underground=0)
|
||||||
|
|
||||||
# 2002, 2004, 2005, 2007, 2010 - 2018
|
# 2002, 2004, 2005, 2007, 2010 - now
|
||||||
def Parseloghtmltxt(year, expedition, txt):
|
def Parseloghtmltxt(year, expedition, txt):
|
||||||
#print(" - Starting log html parser")
|
#print(" - Starting log html parser")
|
||||||
tripparas = re.findall(r"<hr\s*/>([\s\S]*?)(?=<hr)", txt)
|
tripparas = re.findall(r"<hr\s*/>([\s\S]*?)(?=<hr)", txt)
|
||||||
@ -175,28 +168,21 @@ def Parseloghtmltxt(year, expedition, txt):
|
|||||||
''', trippara)
|
''', trippara)
|
||||||
if not s:
|
if not s:
|
||||||
if not re.search(r"Rigging Guide", trippara):
|
if not re.search(r"Rigging Guide", trippara):
|
||||||
print("can't parse: ", trippara) # this is 2007 which needs editing
|
print(("can't parse: ", trippara)) # this is 2007 which needs editing
|
||||||
#assert s, trippara
|
|
||||||
continue
|
continue
|
||||||
tripid, tripid1, tripdate, trippeople, triptitle, triptext, tu = s.groups()
|
tripid, tripid1, tripdate, trippeople, triptitle, triptext, tu = s.groups()
|
||||||
ldate = ParseDate(tripdate.strip(), year)
|
ldate = ParseDate(tripdate.strip(), year)
|
||||||
#assert tripid[:-1] == "t" + tripdate, (tripid, tripdate)
|
|
||||||
#trippeople = re.sub(r"Ol(?!l)", "Olly", trippeople)
|
|
||||||
#trippeople = re.sub(r"Wook(?!e)", "Wookey", trippeople)
|
|
||||||
triptitles = triptitle.split(" - ")
|
triptitles = triptitle.split(" - ")
|
||||||
if len(triptitles) >= 2:
|
if len(triptitles) >= 2:
|
||||||
tripcave = triptitles[0]
|
tripcave = triptitles[0]
|
||||||
else:
|
else:
|
||||||
tripcave = "UNKNOWN"
|
tripcave = "UNKNOWN"
|
||||||
#print("\n", tripcave, "--- ppp", trippeople, len(triptext))
|
|
||||||
ltriptext = re.sub(r"</p>", "", triptext)
|
ltriptext = re.sub(r"</p>", "", triptext)
|
||||||
ltriptext = re.sub(r"\s*?\n\s*", " ", ltriptext)
|
ltriptext = re.sub(r"\s*?\n\s*", " ", ltriptext)
|
||||||
ltriptext = re.sub(r"<p>", "</br></br>", ltriptext).strip()
|
ltriptext = re.sub(r"<p>", "</br></br>", ltriptext).strip()
|
||||||
EnterLogIntoDbase(date = ldate, place = tripcave, title = triptitle, text = ltriptext,
|
EnterLogIntoDbase(date = ldate, place = tripcave, title = triptitle, text = ltriptext,
|
||||||
trippeople=trippeople, expedition=expedition, logtime_underground=0,
|
trippeople=trippeople, expedition=expedition, logtime_underground=0,
|
||||||
entry_type="html")
|
entry_type="html")
|
||||||
if logbook_entry_count == 0:
|
|
||||||
print(" - No trip entries found in logbook, check the syntax matches htmltxt format")
|
|
||||||
|
|
||||||
|
|
||||||
# main parser for 1991 - 2001. simpler because the data has been hacked so much to fit it
|
# main parser for 1991 - 2001. simpler because the data has been hacked so much to fit it
|
||||||
@ -210,9 +196,6 @@ def Parseloghtml01(year, expedition, txt):
|
|||||||
tripid = mtripid and mtripid.group(1) or ""
|
tripid = mtripid and mtripid.group(1) or ""
|
||||||
tripheader = re.sub(r"</?(?:[ab]|span)[^>]*>", "", tripheader)
|
tripheader = re.sub(r"</?(?:[ab]|span)[^>]*>", "", tripheader)
|
||||||
|
|
||||||
#print " ", [tripheader]
|
|
||||||
#continue
|
|
||||||
|
|
||||||
tripdate, triptitle, trippeople = tripheader.split("|")
|
tripdate, triptitle, trippeople = tripheader.split("|")
|
||||||
ldate = ParseDate(tripdate.strip(), year)
|
ldate = ParseDate(tripdate.strip(), year)
|
||||||
|
|
||||||
@ -230,19 +213,14 @@ def Parseloghtml01(year, expedition, txt):
|
|||||||
|
|
||||||
mtail = re.search(r'(?:<a href="[^"]*">[^<]*</a>|\s|/|-|&|</?p>|\((?:same day|\d+)\))*$', ltriptext)
|
mtail = re.search(r'(?:<a href="[^"]*">[^<]*</a>|\s|/|-|&|</?p>|\((?:same day|\d+)\))*$', ltriptext)
|
||||||
if mtail:
|
if mtail:
|
||||||
#print mtail.group(0)
|
|
||||||
ltriptext = ltriptext[:mtail.start(0)]
|
ltriptext = ltriptext[:mtail.start(0)]
|
||||||
ltriptext = re.sub(r"</p>", "", ltriptext)
|
ltriptext = re.sub(r"</p>", "", ltriptext)
|
||||||
ltriptext = re.sub(r"\s*?\n\s*", " ", ltriptext)
|
ltriptext = re.sub(r"\s*?\n\s*", " ", ltriptext)
|
||||||
ltriptext = re.sub(r"<p>|<br>", "\n\n", ltriptext).strip()
|
ltriptext = re.sub(r"<p>|<br>", "\n\n", ltriptext).strip()
|
||||||
#ltriptext = re.sub("[^\s0-9a-zA-Z\-.,:;'!]", "NONASCII", ltriptext)
|
|
||||||
ltriptext = re.sub(r"</?u>", "_", ltriptext)
|
ltriptext = re.sub(r"</?u>", "_", ltriptext)
|
||||||
ltriptext = re.sub(r"</?i>", "''", ltriptext)
|
ltriptext = re.sub(r"</?i>", "''", ltriptext)
|
||||||
ltriptext = re.sub(r"</?b>", "'''", ltriptext)
|
ltriptext = re.sub(r"</?b>", "'''", ltriptext)
|
||||||
|
|
||||||
|
|
||||||
#print ldate, trippeople.strip()
|
|
||||||
# could includ the tripid (url link for cross referencing)
|
|
||||||
EnterLogIntoDbase(date=ldate, place=tripcave, title=triptitle, text=ltriptext,
|
EnterLogIntoDbase(date=ldate, place=tripcave, title=triptitle, text=ltriptext,
|
||||||
trippeople=trippeople, expedition=expedition, logtime_underground=0,
|
trippeople=trippeople, expedition=expedition, logtime_underground=0,
|
||||||
entry_type="html")
|
entry_type="html")
|
||||||
@ -269,7 +247,6 @@ def Parseloghtml03(year, expedition, txt):
|
|||||||
tripcave = triptitles[0]
|
tripcave = triptitles[0]
|
||||||
else:
|
else:
|
||||||
tripcave = "UNKNOWN"
|
tripcave = "UNKNOWN"
|
||||||
#print tripcave, "--- ppp", triptitle, trippeople, len(triptext)
|
|
||||||
ltriptext = re.sub(r"</p>", "", triptext)
|
ltriptext = re.sub(r"</p>", "", triptext)
|
||||||
ltriptext = re.sub(r"\s*?\n\s*", " ", ltriptext)
|
ltriptext = re.sub(r"\s*?\n\s*", " ", ltriptext)
|
||||||
ltriptext = re.sub(r"<p>", "\n\n", ltriptext).strip()
|
ltriptext = re.sub(r"<p>", "\n\n", ltriptext).strip()
|
||||||
@ -299,64 +276,71 @@ def SetDatesFromLogbookEntries(expedition):
|
|||||||
|
|
||||||
|
|
||||||
def LoadLogbookForExpedition(expedition):
|
def LoadLogbookForExpedition(expedition):
|
||||||
""" Parses all logbook entries for one expedition """
|
""" Parses all logbook entries for one expedition
|
||||||
|
"""
|
||||||
global logentries
|
global logentries
|
||||||
|
|
||||||
expowebbase = os.path.join(settings.EXPOWEB, "years")
|
|
||||||
yearlinks = settings.LOGBOOK_PARSER_SETTINGS
|
|
||||||
|
|
||||||
logbook_parseable = False
|
logbook_parseable = False
|
||||||
logbook_cached = False
|
logbook_cached = False
|
||||||
|
yearlinks = settings.LOGBOOK_PARSER_SETTINGS
|
||||||
|
expologbase = os.path.join(settings.EXPOWEB, "years")
|
||||||
|
|
||||||
if expedition.year in yearlinks:
|
if expedition.year in yearlinks:
|
||||||
# print " - Valid logbook year: ", expedition.year
|
logbookfile = os.path.join(expologbase, yearlinks[expedition.year][0])
|
||||||
year_settings = yearlinks[expedition.year]
|
parsefunc = yearlinks[expedition.year][1]
|
||||||
|
else:
|
||||||
|
logbookfile = os.path.join(expologbase, expedition.year, settings.DEFAULT_LOGBOOK_FILE)
|
||||||
|
parsefunc = settings.DEFAULT_LOGBOOK_PARSER
|
||||||
|
cache_filename = logbookfile + ".cache"
|
||||||
|
|
||||||
try:
|
try:
|
||||||
bad_cache = False
|
bad_cache = False
|
||||||
cache_filename = os.path.join(expowebbase, year_settings[0])+".cache"
|
|
||||||
now = time.time()
|
now = time.time()
|
||||||
cache_t = os.path.getmtime(cache_filename)
|
cache_t = os.path.getmtime(cache_filename)
|
||||||
file_t = os.path.getmtime(os.path.join(expowebbase, year_settings[0]))
|
if os.path.getmtime(logbookfile) - cache_t > 2: # at least 2 secs later
|
||||||
if file_t - cache_t > 2: # at least 2 secs later
|
|
||||||
#print " - Cache is stale."
|
|
||||||
bad_cache= True
|
bad_cache= True
|
||||||
if now - cache_t > 30*24*60*60:
|
if now - cache_t > 30*24*60*60:
|
||||||
#print " - Cache is more than 30 days old."
|
|
||||||
bad_cache= True
|
bad_cache= True
|
||||||
if bad_cache:
|
if bad_cache:
|
||||||
print " - Cache is either stale or more than 30 days old. Deleting it."
|
print(" - ! Cache is either stale or more than 30 days old. Deleting it.")
|
||||||
os.remove(cache_filename)
|
os.remove(cache_filename)
|
||||||
logentries=[]
|
logentries=[]
|
||||||
|
print(" ! Removed stale or corrupt cache file")
|
||||||
raise
|
raise
|
||||||
print(" - Reading cache: " + cache_filename )
|
print(" - Reading cache: " + cache_filename, end='')
|
||||||
try:
|
try:
|
||||||
with open(cache_filename, "rb") as f:
|
with open(cache_filename, "rb") as f:
|
||||||
logentries = pickle.load(f)
|
logentries = pickle.load(f)
|
||||||
print " - Loaded ", len(logentries), " objects"
|
print(" -- Loaded ", len(logentries), " log entries")
|
||||||
logbook_cached = True
|
logbook_cached = True
|
||||||
except:
|
except:
|
||||||
print " - Failed to load corrupt cache. Deleting it.\n"
|
print("\n ! Failed to load corrupt cache. Deleting it.\n")
|
||||||
os.remove(cache_filename)
|
os.remove(cache_filename)
|
||||||
logentries=[]
|
logentries=[]
|
||||||
except:
|
raise
|
||||||
print(" - Opening logbook: ")
|
except : # no cache found
|
||||||
file_in = open(os.path.join(expowebbase, year_settings[0]))
|
#print(" - No cache \"" + cache_filename +"\"")
|
||||||
|
try:
|
||||||
|
file_in = open(logbookfile,'rb')
|
||||||
txt = file_in.read().decode("latin1")
|
txt = file_in.read().decode("latin1")
|
||||||
file_in.close()
|
file_in.close()
|
||||||
parsefunc = year_settings[1]
|
|
||||||
logbook_parseable = True
|
logbook_parseable = True
|
||||||
print(" - Parsing logbook: " + year_settings[0] + "\n - Using parser: " + year_settings[1])
|
print((" - Using: " + parsefunc + " to parse " + logbookfile))
|
||||||
|
except (IOError):
|
||||||
|
logbook_parseable = False
|
||||||
|
print((" ! Couldn't open logbook " + logbookfile))
|
||||||
|
|
||||||
if logbook_parseable:
|
if logbook_parseable:
|
||||||
parser = globals()[parsefunc]
|
parser = globals()[parsefunc]
|
||||||
parser(expedition.year, expedition, txt)
|
parser(expedition.year, expedition, txt)
|
||||||
SetDatesFromLogbookEntries(expedition)
|
SetDatesFromLogbookEntries(expedition)
|
||||||
# and this has also stored all the objects in logentries[]
|
# and this has also stored all the log entries in logentries[]
|
||||||
print " - Storing " , len(logentries), " log entries"
|
if len(logentries) >0:
|
||||||
cache_filename = os.path.join(expowebbase, year_settings[0])+".cache"
|
print(" - Cacheing " , len(logentries), " log entries")
|
||||||
with open(cache_filename, "wb") as f:
|
with open(cache_filename, "wb") as fc:
|
||||||
pickle.dump(logentries, f, 2)
|
pickle.dump(logentries, fc, 2)
|
||||||
|
else:
|
||||||
|
print(" ! NO TRIP entries found in logbook, check the syntax.")
|
||||||
|
|
||||||
logentries=[] # flush for next year
|
logentries=[] # flush for next year
|
||||||
|
|
||||||
if logbook_cached:
|
if logbook_cached:
|
||||||
@ -364,36 +348,22 @@ def LoadLogbookForExpedition(expedition):
|
|||||||
for entrytuple in range(len(logentries)):
|
for entrytuple in range(len(logentries)):
|
||||||
date, place, title, text, trippeople, expedition, logtime_underground, \
|
date, place, title, text, trippeople, expedition, logtime_underground, \
|
||||||
entry_type = logentries[i]
|
entry_type = logentries[i]
|
||||||
#print " - - obj ", i, date, title
|
|
||||||
EnterLogIntoDbase(date, place, title, text, trippeople, expedition, logtime_underground,\
|
EnterLogIntoDbase(date, place, title, text, trippeople, expedition, logtime_underground,\
|
||||||
entry_type)
|
entry_type)
|
||||||
i +=1
|
i +=1
|
||||||
else:
|
|
||||||
try:
|
|
||||||
file_in = open(os.path.join(expowebbase, expedition.year, settings.DEFAULT_LOGBOOK_FILE))
|
|
||||||
txt = file_in.read().decode("latin1")
|
|
||||||
file_in.close()
|
|
||||||
logbook_parseable = True
|
|
||||||
print("No set parser found using default")
|
|
||||||
parsefunc = settings.DEFAULT_LOGBOOK_PARSER
|
|
||||||
except (IOError):
|
|
||||||
logbook_parseable = False
|
|
||||||
print("Couldn't open default logbook file and nothing in settings for expo " + expedition.year)
|
|
||||||
|
|
||||||
|
|
||||||
#return "TOLOAD: " + year + " " + str(expedition.personexpedition_set.all()[1].logbookentry_set.count()) + " " + str(models.PersonTrip.objects.filter(personexpedition__expedition=expedition).count())
|
#return "TOLOAD: " + year + " " + str(expedition.personexpedition_set.all()[1].logbookentry_set.count()) + " " + str(models.PersonTrip.objects.filter(personexpedition__expedition=expedition).count())
|
||||||
|
|
||||||
def LoadLogbooks():
|
def LoadLogbooks():
|
||||||
""" This is the master function for parsing all logbooks into the Troggle database. """
|
""" This is the master function for parsing all logbooks into the Troggle database.
|
||||||
|
"""
|
||||||
# Clear the logbook data issues as we are reloading
|
DataIssue.objects.filter(parser='logbooks').delete()
|
||||||
models.DataIssue.objects.filter(parser='logbooks').delete()
|
expos = Expedition.objects.all()
|
||||||
# Fetch all expos
|
nologbook = ["1976", "1977","1978","1979","1980","1980","1981","1983","1984",
|
||||||
expos = models.Expedition.objects.all()
|
"1985","1986","1987","1988","1989","1990",]
|
||||||
for expo in expos:
|
for expo in expos:
|
||||||
print("\nLoading Logbook for: " + expo.year)
|
if expo.year not in nologbook:
|
||||||
|
print((" - Logbook for: " + expo.year))
|
||||||
# Load logbook for expo
|
|
||||||
LoadLogbookForExpedition(expo)
|
LoadLogbookForExpedition(expo)
|
||||||
|
|
||||||
|
|
||||||
@ -418,25 +388,25 @@ def parseAutoLogBookEntry(filename):
|
|||||||
year, month, day = [int(x) for x in dateMatch.groups()]
|
year, month, day = [int(x) for x in dateMatch.groups()]
|
||||||
date = datetime.date(year, month, day)
|
date = datetime.date(year, month, day)
|
||||||
else:
|
else:
|
||||||
errors.append("Date could not be found")
|
errors.append(" - Date could not be found")
|
||||||
|
|
||||||
expeditionYearMatch = expeditionYearRegex.search(contents)
|
expeditionYearMatch = expeditionYearRegex.search(contents)
|
||||||
if expeditionYearMatch:
|
if expeditionYearMatch:
|
||||||
try:
|
try:
|
||||||
expedition = models.Expedition.objects.get(year = expeditionYearMatch.groups()[0])
|
expedition = models.Expedition.objects.get(year = expeditionYearMatch.groups()[0])
|
||||||
personExpeditionNameLookup = GetPersonExpeditionNameLookup(expedition)
|
personExpeditionNameLookup = GetPersonExpeditionNameLookup(expedition)
|
||||||
except models.Expedition.DoesNotExist:
|
except Expedition.DoesNotExist:
|
||||||
errors.append("Expedition not in database")
|
errors.append(" - Expedition not in database")
|
||||||
else:
|
else:
|
||||||
errors.append("Expedition Year could not be parsed")
|
errors.append(" - Expedition Year could not be parsed")
|
||||||
|
|
||||||
titleMatch = titleRegex.search(contents)
|
titleMatch = titleRegex.search(contents)
|
||||||
if titleMatch:
|
if titleMatch:
|
||||||
title, = titleMatch.groups()
|
title, = titleMatch.groups()
|
||||||
if len(title) > settings.MAX_LOGBOOK_ENTRY_TITLE_LENGTH:
|
if len(title) > settings.MAX_LOGBOOK_ENTRY_TITLE_LENGTH:
|
||||||
errors.append("Title too long")
|
errors.append(" - Title too long")
|
||||||
else:
|
else:
|
||||||
errors.append("Title could not be found")
|
errors.append(" - Title could not be found")
|
||||||
|
|
||||||
caveMatch = caveRegex.search(contents)
|
caveMatch = caveRegex.search(contents)
|
||||||
if caveMatch:
|
if caveMatch:
|
||||||
@ -445,7 +415,7 @@ def parseAutoLogBookEntry(filename):
|
|||||||
cave = models.getCaveByReference(caveRef)
|
cave = models.getCaveByReference(caveRef)
|
||||||
except AssertionError:
|
except AssertionError:
|
||||||
cave = None
|
cave = None
|
||||||
errors.append("Cave not found in database")
|
errors.append(" - Cave not found in database")
|
||||||
else:
|
else:
|
||||||
cave = None
|
cave = None
|
||||||
|
|
||||||
@ -456,13 +426,13 @@ def parseAutoLogBookEntry(filename):
|
|||||||
location = None
|
location = None
|
||||||
|
|
||||||
if cave is None and location is None:
|
if cave is None and location is None:
|
||||||
errors.append("Location nor cave could not be found")
|
errors.append(" - Location nor cave could not be found")
|
||||||
|
|
||||||
reportMatch = reportRegex.search(contents)
|
reportMatch = reportRegex.search(contents)
|
||||||
if reportMatch:
|
if reportMatch:
|
||||||
report, = reportMatch.groups()
|
report, = reportMatch.groups()
|
||||||
else:
|
else:
|
||||||
errors.append("Contents could not be found")
|
errors.append(" - Contents could not be found")
|
||||||
if errors:
|
if errors:
|
||||||
return errors # Easiest to bail out at this point as we need to make sure that we know which expedition to look for people from.
|
return errors # Easiest to bail out at this point as we need to make sure that we know which expedition to look for people from.
|
||||||
people = []
|
people = []
|
||||||
@ -473,21 +443,21 @@ def parseAutoLogBookEntry(filename):
|
|||||||
if name.lower() in personExpeditionNameLookup:
|
if name.lower() in personExpeditionNameLookup:
|
||||||
personExpo = personExpeditionNameLookup[name.lower()]
|
personExpo = personExpeditionNameLookup[name.lower()]
|
||||||
else:
|
else:
|
||||||
errors.append("Person could not be found in database")
|
errors.append(" - Person could not be found in database")
|
||||||
author = bool(author)
|
author = bool(author)
|
||||||
else:
|
else:
|
||||||
errors.append("Persons name could not be found")
|
errors.append(" - Persons name could not be found")
|
||||||
|
|
||||||
TUMatch = TURegex.search(contents)
|
TUMatch = TURegex.search(contents)
|
||||||
if TUMatch:
|
if TUMatch:
|
||||||
TU, = TUMatch.groups()
|
TU, = TUMatch.groups()
|
||||||
else:
|
else:
|
||||||
errors.append("TU could not be found")
|
errors.append(" - TU could not be found")
|
||||||
if not errors:
|
if not errors:
|
||||||
people.append((name, author, TU))
|
people.append((name, author, TU))
|
||||||
if errors:
|
if errors:
|
||||||
return errors # Bail out before commiting to the database
|
return errors # Bail out before committing to the database
|
||||||
logbookEntry = models.LogbookEntry(date = date,
|
logbookEntry = LogbookEntry(date = date,
|
||||||
expedition = expedition,
|
expedition = expedition,
|
||||||
title = title, cave = cave, place = location,
|
title = title, cave = cave, place = location,
|
||||||
text = report, slug = slugify(title)[:50],
|
text = report, slug = slugify(title)[:50],
|
||||||
|
141
parsers/survex.py
Normal file → Executable file
141
parsers/survex.py
Normal file → Executable file
@ -1,26 +1,31 @@
|
|||||||
import troggle.settings as settings
|
from __future__ import absolute_import, division, print_function
|
||||||
import troggle.core.models as models
|
|
||||||
import troggle.settings as settings
|
|
||||||
|
|
||||||
from subprocess import call, Popen, PIPE
|
|
||||||
|
|
||||||
from troggle.parsers.people import GetPersonExpeditionNameLookup
|
|
||||||
from django.utils.timezone import get_current_timezone
|
|
||||||
from django.utils.timezone import make_aware
|
|
||||||
|
|
||||||
import re
|
|
||||||
import os
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
import time
|
import time
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
import sys
|
from subprocess import PIPE, Popen, call
|
||||||
|
|
||||||
|
from django.utils.timezone import get_current_timezone, make_aware
|
||||||
|
|
||||||
|
import troggle.settings as settings
|
||||||
|
import troggle.core.models as models
|
||||||
|
import troggle.core.models_survex as models_survex
|
||||||
|
from troggle.parsers.people import GetPersonExpeditionNameLookup
|
||||||
|
from troggle.core.views_caves import MapLocations
|
||||||
|
|
||||||
"""A 'survex block' is a *begin...*end set of cave data.
|
"""A 'survex block' is a *begin...*end set of cave data.
|
||||||
A 'survexscansfolder' is what we today call a "survey scans folder" or a "wallet".
|
A 'survexscansfolder' is what we today call a "survey scans folder" or a "wallet".
|
||||||
"""
|
"""
|
||||||
|
|
||||||
line_leg_regex = re.compile(r"[\d\-+.]+$")
|
line_leg_regex = re.compile(r"[\d\-+.]+$")
|
||||||
|
survexlegsalllength = 0.0
|
||||||
|
survexlegsnumber = 0
|
||||||
|
|
||||||
def LoadSurvexLineLeg(survexblock, stardata, sline, comment, cave):
|
def LoadSurvexLineLeg(survexblock, stardata, sline, comment, cave):
|
||||||
|
global survexlegsalllength
|
||||||
|
global survexlegsnumber
|
||||||
# The try catches here need replacing as they are relatively expensive
|
# The try catches here need replacing as they are relatively expensive
|
||||||
ls = sline.lower().split()
|
ls = sline.lower().split()
|
||||||
ssfrom = survexblock.MakeSurvexStation(ls[stardata["from"]])
|
ssfrom = survexblock.MakeSurvexStation(ls[stardata["from"]])
|
||||||
@ -32,13 +37,14 @@ def LoadSurvexLineLeg(survexblock, stardata, sline, comment, cave):
|
|||||||
if stardata["type"] == "normal":
|
if stardata["type"] == "normal":
|
||||||
try:
|
try:
|
||||||
survexleg.tape = float(ls[stardata["tape"]])
|
survexleg.tape = float(ls[stardata["tape"]])
|
||||||
|
survexlegsnumber += 1
|
||||||
except ValueError:
|
except ValueError:
|
||||||
print("! Tape misread in", survexblock.survexfile.path)
|
print("! Tape misread in", survexblock.survexfile.path)
|
||||||
print(" Stardata:", stardata)
|
print(" Stardata:", stardata)
|
||||||
print(" Line:", ls)
|
print(" Line:", ls)
|
||||||
message = ' ! Value Error: Tape misread in line %s in %s' % (ls, survexblock.survexfile.path)
|
message = ' ! Value Error: Tape misread in line %s in %s' % (ls, survexblock.survexfile.path)
|
||||||
models.DataIssue.objects.create(parser='survex', message=message)
|
models.DataIssue.objects.create(parser='survex', message=message)
|
||||||
survexleg.tape = 1000
|
survexleg.tape = 0
|
||||||
try:
|
try:
|
||||||
lclino = ls[stardata["clino"]]
|
lclino = ls[stardata["clino"]]
|
||||||
except:
|
except:
|
||||||
@ -84,15 +90,20 @@ def LoadSurvexLineLeg(survexblock, stardata, sline, comment, cave):
|
|||||||
survexleg.cave = cave
|
survexleg.cave = cave
|
||||||
|
|
||||||
# only save proper legs
|
# only save proper legs
|
||||||
survexleg.save()
|
# No need to save as we are measuring lengths only on parsing now.
|
||||||
|
# delete the object so that django autosaving doesn't save it.
|
||||||
|
survexleg = None
|
||||||
|
#survexleg.save()
|
||||||
|
|
||||||
itape = stardata.get("tape")
|
itape = stardata.get("tape")
|
||||||
if itape:
|
if itape:
|
||||||
try:
|
try:
|
||||||
survexblock.totalleglength += float(ls[itape])
|
survexblock.totalleglength += float(ls[itape])
|
||||||
|
survexlegsalllength += float(ls[itape])
|
||||||
except ValueError:
|
except ValueError:
|
||||||
print("! Length not added")
|
print("! Length not added")
|
||||||
survexblock.save()
|
# No need to save as we are measuring lengths only on parsing now.
|
||||||
|
#survexblock.save()
|
||||||
|
|
||||||
|
|
||||||
def LoadSurvexEquate(survexblock, sline):
|
def LoadSurvexEquate(survexblock, sline):
|
||||||
@ -129,7 +140,7 @@ regex_team_member = re.compile(r" and | / |, | & | \+ |^both$|^none$(?i)"
|
|||||||
regex_qm = re.compile(r'^\s*QM(\d)\s+?([a-dA-DxX])\s+([\w\-]+)\.(\d+)\s+(([\w\-]+)\.(\d+)|\-)\s+(.+)$')
|
regex_qm = re.compile(r'^\s*QM(\d)\s+?([a-dA-DxX])\s+([\w\-]+)\.(\d+)\s+(([\w\-]+)\.(\d+)|\-)\s+(.+)$')
|
||||||
|
|
||||||
insp = ""
|
insp = ""
|
||||||
|
callcount = 0
|
||||||
def RecursiveLoad(survexblock, survexfile, fin, textlines):
|
def RecursiveLoad(survexblock, survexfile, fin, textlines):
|
||||||
"""Follows the *include links in all the survex files from the root file 1623.svx
|
"""Follows the *include links in all the survex files from the root file 1623.svx
|
||||||
and reads in the survex blocks, other data and the wallet references (survexscansfolder) as it
|
and reads in the survex blocks, other data and the wallet references (survexscansfolder) as it
|
||||||
@ -141,12 +152,20 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
|
|||||||
stardata = stardatadefault
|
stardata = stardatadefault
|
||||||
teammembers = [ ]
|
teammembers = [ ]
|
||||||
global insp
|
global insp
|
||||||
|
global callcount
|
||||||
|
global survexlegsnumber
|
||||||
|
|
||||||
# uncomment to print out all files during parsing
|
# uncomment to print out all files during parsing
|
||||||
print(insp+" - Reading file: " + survexblock.survexfile.path + " <> " + survexfile.path)
|
print(insp+" - Reading file: " + survexblock.survexfile.path + " <> " + survexfile.path)
|
||||||
stamp = datetime.now()
|
stamp = datetime.now()
|
||||||
lineno = 0
|
lineno = 0
|
||||||
|
|
||||||
|
sys.stderr.flush();
|
||||||
|
callcount +=1
|
||||||
|
if callcount >=10:
|
||||||
|
callcount=0
|
||||||
|
print(".", file=sys.stderr,end='')
|
||||||
|
|
||||||
# Try to find the cave in the DB if not use the string as before
|
# Try to find the cave in the DB if not use the string as before
|
||||||
path_match = re.search(r"caves-(\d\d\d\d)/(\d+|\d\d\d\d-?\w+-\d+)/", survexblock.survexfile.path)
|
path_match = re.search(r"caves-(\d\d\d\d)/(\d+|\d\d\d\d-?\w+-\d+)/", survexblock.survexfile.path)
|
||||||
if path_match:
|
if path_match:
|
||||||
@ -328,6 +347,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
|
|||||||
else:
|
else:
|
||||||
print(insp+' - No match (b) for %s' % newsvxpath)
|
print(insp+' - No match (b) for %s' % newsvxpath)
|
||||||
|
|
||||||
|
previousnlegs = survexlegsnumber
|
||||||
name = line.lower()
|
name = line.lower()
|
||||||
print(insp+' - Begin found for: ' + name)
|
print(insp+' - Begin found for: ' + name)
|
||||||
# print(insp+'Block cave: ' + str(survexfile.cave))
|
# print(insp+'Block cave: ' + str(survexfile.cave))
|
||||||
@ -347,7 +367,11 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
|
|||||||
if iblankbegins:
|
if iblankbegins:
|
||||||
iblankbegins -= 1
|
iblankbegins -= 1
|
||||||
else:
|
else:
|
||||||
survexblock.text = "".join(textlines)
|
#survexblock.text = "".join(textlines)
|
||||||
|
# .text not used, using it for number of legs per block
|
||||||
|
legsinblock = survexlegsnumber - previousnlegs
|
||||||
|
print("LEGS: {} (previous: {}, now:{})".format(legsinblock,previousnlegs,survexlegsnumber))
|
||||||
|
survexblock.text = str(legsinblock)
|
||||||
survexblock.save()
|
survexblock.save()
|
||||||
# print(insp+' - End found: ')
|
# print(insp+' - End found: ')
|
||||||
endstamp = datetime.now()
|
endstamp = datetime.now()
|
||||||
@ -428,6 +452,8 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
|
|||||||
# print(insp+' - Time to process: ' + str(timetaken))
|
# print(insp+' - Time to process: ' + str(timetaken))
|
||||||
|
|
||||||
def LoadAllSurvexBlocks():
|
def LoadAllSurvexBlocks():
|
||||||
|
global survexlegsalllength
|
||||||
|
global survexlegsnumber
|
||||||
|
|
||||||
print(' - Flushing All Survex Blocks...')
|
print(' - Flushing All Survex Blocks...')
|
||||||
|
|
||||||
@ -455,6 +481,7 @@ def LoadAllSurvexBlocks():
|
|||||||
survexfile.SetDirectory()
|
survexfile.SetDirectory()
|
||||||
|
|
||||||
#Load all
|
#Load all
|
||||||
|
# this is the first so id=1
|
||||||
survexblockroot = models.SurvexBlock(name="root", survexpath="", begin_char=0, cave=None, survexfile=survexfile, totalleglength=0.0)
|
survexblockroot = models.SurvexBlock(name="root", survexpath="", begin_char=0, cave=None, survexfile=survexfile, totalleglength=0.0)
|
||||||
survexblockroot.save()
|
survexblockroot.save()
|
||||||
fin = survexfile.OpenFile()
|
fin = survexfile.OpenFile()
|
||||||
@ -462,13 +489,20 @@ def LoadAllSurvexBlocks():
|
|||||||
# The real work starts here
|
# The real work starts here
|
||||||
RecursiveLoad(survexblockroot, survexfile, fin, textlines)
|
RecursiveLoad(survexblockroot, survexfile, fin, textlines)
|
||||||
fin.close()
|
fin.close()
|
||||||
survexblockroot.text = "".join(textlines)
|
survexblockroot.totalleglength = survexlegsalllength
|
||||||
|
survexblockroot.text = str(survexlegsnumber)
|
||||||
|
#survexblockroot.text = "".join(textlines) these are all blank
|
||||||
survexblockroot.save()
|
survexblockroot.save()
|
||||||
|
|
||||||
# Close the file
|
# Close the file
|
||||||
sys.stdout.close()
|
sys.stdout.close()
|
||||||
|
print("+", file=sys.stderr)
|
||||||
|
sys.stderr.flush();
|
||||||
|
|
||||||
# Restore sys.stdout to our old saved file handler
|
# Restore sys.stdout to our old saved file handler
|
||||||
sys.stdout = stdout_orig
|
sys.stdout = stdout_orig
|
||||||
|
print(" - total number of survex legs: {}".format(survexlegsnumber))
|
||||||
|
print(" - total leg lengths loaded: {}m".format(survexlegsalllength))
|
||||||
print(' - Loaded All Survex Blocks.')
|
print(' - Loaded All Survex Blocks.')
|
||||||
|
|
||||||
|
|
||||||
@ -502,66 +536,103 @@ def LoadPos():
|
|||||||
|
|
||||||
now = time.time()
|
now = time.time()
|
||||||
if now - updtcache > 3*24*60*60:
|
if now - updtcache > 3*24*60*60:
|
||||||
print " cache is more than 3 days old. Deleting."
|
print( " cache is more than 3 days old. Deleting.")
|
||||||
os.remove(cachefile)
|
os.remove(cachefile)
|
||||||
elif age < 0 :
|
elif age < 0 :
|
||||||
print " cache is stale. Deleting."
|
print(" cache is stale. Deleting.")
|
||||||
os.remove(cachefile)
|
os.remove(cachefile)
|
||||||
else:
|
else:
|
||||||
print " cache is fresh. Reading..."
|
print(" cache is fresh. Reading...")
|
||||||
try:
|
try:
|
||||||
with open(cachefile, "r") as f:
|
with open(cachefile, "r") as f:
|
||||||
for line in f:
|
for line in f:
|
||||||
l = line.rstrip()
|
l = line.rstrip()
|
||||||
if l in notfoundbefore:
|
if l in notfoundbefore:
|
||||||
notfoundbefore[l] +=1 # should not be duplicates
|
notfoundbefore[l] +=1 # should not be duplicates
|
||||||
print " DUPLICATE ", line, notfoundbefore[l]
|
print(" DUPLICATE ", line, notfoundbefore[l])
|
||||||
else:
|
else:
|
||||||
notfoundbefore[l] =1
|
notfoundbefore[l] =1
|
||||||
except:
|
except:
|
||||||
print " FAILURE READ opening cache file %s" % (cachefile)
|
print(" FAILURE READ opening cache file %s" % (cachefile))
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
notfoundnow =[]
|
notfoundnow =[]
|
||||||
found = 0
|
found = 0
|
||||||
skip = {}
|
skip = {}
|
||||||
print "\n" # extra line because cavern overwrites the text buffer somehow
|
print("\n") # extra line because cavern overwrites the text buffer somehow
|
||||||
# cavern defaults to using same cwd as supplied input file
|
# cavern defaults to using same cwd as supplied input file
|
||||||
call([settings.CAVERN, "--output=%s.3d" % (topdata), "%s.svx" % (topdata)])
|
call([settings.CAVERN, "--output=%s.3d" % (topdata), "%s.svx" % (topdata)])
|
||||||
call([settings.THREEDTOPOS, '%s.3d' % (topdata)], cwd = settings.SURVEX_DATA)
|
call([settings.THREEDTOPOS, '%s.3d' % (topdata)], cwd = settings.SURVEX_DATA)
|
||||||
print " - This next bit takes a while. Matching ~32,000 survey positions. Be patient..."
|
print(" - This next bit takes a while. Matching ~32,000 survey positions. Be patient...")
|
||||||
|
|
||||||
|
mappoints = {}
|
||||||
|
for pt in MapLocations().points():
|
||||||
|
svxid, number, point_type, label = pt
|
||||||
|
mappoints[svxid]=True
|
||||||
|
|
||||||
posfile = open("%s.pos" % (topdata))
|
posfile = open("%s.pos" % (topdata))
|
||||||
posfile.readline() #Drop header
|
posfile.readline() #Drop header
|
||||||
|
|
||||||
|
survexblockroot = models_survex.SurvexBlock.objects.get(id=1)
|
||||||
for line in posfile.readlines():
|
for line in posfile.readlines():
|
||||||
r = poslineregex.match(line)
|
r = poslineregex.match(line)
|
||||||
if r:
|
if r:
|
||||||
x, y, z, name = r.groups() # easting, northing, altitude
|
x, y, z, id = r.groups()
|
||||||
if name in notfoundbefore:
|
if id in notfoundbefore:
|
||||||
skip[name] = 1
|
skip[id] = 1
|
||||||
else:
|
else:
|
||||||
|
for sid in mappoints:
|
||||||
|
if id.endswith(sid):
|
||||||
|
notfoundnow.append(id)
|
||||||
|
# Now that we don't import any stations, we create it rather than look it up
|
||||||
|
# ss = models_survex.SurvexStation.objects.lookup(id)
|
||||||
|
|
||||||
|
# need to set block_id which means doing a search on all the survex blocks..
|
||||||
|
# remove dot at end and add one at beginning
|
||||||
|
blockpath = "." + id[:-len(sid)].strip(".")
|
||||||
try:
|
try:
|
||||||
ss = models.SurvexStation.objects.lookup(name)
|
sbqs = models_survex.SurvexBlock.objects.filter(survexpath=blockpath)
|
||||||
|
if len(sbqs)==1:
|
||||||
|
sb = sbqs[0]
|
||||||
|
if len(sbqs)>1:
|
||||||
|
message = ' ! MULTIPLE SurvexBlocks matching Entrance point {} {}'.format(blockpath, sid)
|
||||||
|
print(message)
|
||||||
|
models.DataIssue.objects.create(parser='survex', message=message)
|
||||||
|
sb = sbqs[0]
|
||||||
|
elif len(sbqs)<=0:
|
||||||
|
message = ' ! ZERO SurvexBlocks matching Entrance point {} {}'.format(blockpath, sid)
|
||||||
|
print(message)
|
||||||
|
models.DataIssue.objects.create(parser='survex', message=message)
|
||||||
|
sb = survexblockroot
|
||||||
|
except:
|
||||||
|
message = ' ! FAIL in getting SurvexBlock matching Entrance point {} {}'.format(blockpath, sid)
|
||||||
|
print(message)
|
||||||
|
models.DataIssue.objects.create(parser='survex', message=message)
|
||||||
|
try:
|
||||||
|
ss = models_survex.SurvexStation(name=id, block=sb)
|
||||||
ss.x = float(x)
|
ss.x = float(x)
|
||||||
ss.y = float(y)
|
ss.y = float(y)
|
||||||
ss.z = float(z)
|
ss.z = float(z)
|
||||||
ss.save()
|
ss.save()
|
||||||
found += 1
|
found += 1
|
||||||
except:
|
except:
|
||||||
notfoundnow.append(name)
|
message = ' ! FAIL to create SurvexStation Entrance point {} {}'.format(blockpath, sid)
|
||||||
print " - %s stations not found in lookup of SurvexStation.objects. %s found. %s skipped." % (len(notfoundnow),found, len(skip))
|
print(message)
|
||||||
|
models.DataIssue.objects.create(parser='survex', message=message)
|
||||||
|
raise
|
||||||
|
|
||||||
|
#print(" - %s failed lookups of SurvexStation.objects. %s found. %s skipped." % (len(notfoundnow),found, len(skip)))
|
||||||
|
|
||||||
if found > 10: # i.e. a previous cave import has been done
|
if found > 10: # i.e. a previous cave import has been done
|
||||||
try:
|
try:
|
||||||
with open(cachefile, "w") as f:
|
with open(cachefile, "w") as f:
|
||||||
c = len(notfoundnow)+len(skip)
|
c = len(notfoundnow)+len(skip)
|
||||||
for i in notfoundnow:
|
for i in notfoundnow:
|
||||||
f.write("%s\n" % i)
|
pass #f.write("%s\n" % i)
|
||||||
for j in skip:
|
for j in skip:
|
||||||
f.write("%s\n" % j) # NB skip not notfoundbefore
|
pass #f.write("%s\n" % j) # NB skip not notfoundbefore
|
||||||
print(' Not-found cache file written: %s entries' % c)
|
print((' Not-found cache file written: %s entries' % c))
|
||||||
except:
|
except:
|
||||||
print " FAILURE WRITE opening cache file %s" % (cachefile)
|
print(" FAILURE WRITE opening cache file %s" % (cachefile))
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
@ -29,67 +29,14 @@ def get_or_create_placeholder(year):
|
|||||||
placeholder_logbook_entry, newly_created = save_carefully(LogbookEntry, lookupAttribs, nonLookupAttribs)
|
placeholder_logbook_entry, newly_created = save_carefully(LogbookEntry, lookupAttribs, nonLookupAttribs)
|
||||||
return placeholder_logbook_entry
|
return placeholder_logbook_entry
|
||||||
|
|
||||||
# obsolete surveys.csv does not exist.
|
def listdir(*directories):
|
||||||
# def readSurveysFromCSV():
|
try:
|
||||||
# try: # could probably combine these two
|
return os.listdir(os.path.join(settings.SURVEYS, *directories))
|
||||||
# surveytab = open(os.path.join(settings.SURVEY_SCANS, "Surveys.csv"))
|
except:
|
||||||
# except IOError:
|
import urllib.request, urllib.parse, urllib.error
|
||||||
# import io, urllib.request, urllib.parse, urllib.error
|
url = settings.SURVEYS + reduce(lambda x, y: x + "/" + y, ["listdir"] + list(directories))
|
||||||
# surveytab = io.StringIO(urllib.request.urlopen(settings.SURVEY_SCANS + "/Surveys.csv").read())
|
folders = urllib.request.urlopen(url.replace("#", "%23")).readlines()
|
||||||
# dialect=csv.Sniffer().sniff(surveytab.read())
|
return [folder.rstrip(r"/") for folder in folders]
|
||||||
# surveytab.seek(0,0)
|
|
||||||
# surveyreader = csv.reader(surveytab,dialect=dialect)
|
|
||||||
# headers = next(surveyreader)
|
|
||||||
# header = dict(list(zip(headers, list(range(len(headers)))))) #set up a dictionary where the indexes are header names and the values are column numbers
|
|
||||||
|
|
||||||
# # test if the expeditions have been added yet
|
|
||||||
# if Expedition.objects.count()==0:
|
|
||||||
# print("There are no expeditions in the database. Please run the logbook parser.")
|
|
||||||
# sys.exit()
|
|
||||||
|
|
||||||
|
|
||||||
# logging.info("Deleting all scanned images")
|
|
||||||
# ScannedImage.objects.all().delete()
|
|
||||||
|
|
||||||
|
|
||||||
# logging.info("Deleting all survey objects")
|
|
||||||
# Survey.objects.all().delete()
|
|
||||||
|
|
||||||
|
|
||||||
# logging.info("Beginning to import surveys from "+str(os.path.join(settings.SURVEYS, "Surveys.csv"))+"\n"+"-"*60+"\n")
|
|
||||||
|
|
||||||
# for survey in surveyreader:
|
|
||||||
# #I hate this, but some surveys have a letter eg 2000#34a. The next line deals with that.
|
|
||||||
# walletNumberLetter = re.match(r'(?P<number>\d*)(?P<letter>[a-zA-Z]*)',survey[header['Survey Number']])
|
|
||||||
# # print(walletNumberLetter.groups())
|
|
||||||
# year=survey[header['Year']]
|
|
||||||
|
|
||||||
|
|
||||||
# surveyobj = Survey(
|
|
||||||
# expedition = Expedition.objects.filter(year=year)[0],
|
|
||||||
# wallet_number = walletNumberLetter.group('number'),
|
|
||||||
# logbook_entry = get_or_create_placeholder(year),
|
|
||||||
# comments = survey[header['Comments']],
|
|
||||||
# location = survey[header['Location']]
|
|
||||||
# )
|
|
||||||
# surveyobj.wallet_letter = walletNumberLetter.group('letter')
|
|
||||||
# if survey[header['Finished']]=='Yes':
|
|
||||||
# #try and find the sketch_scan
|
|
||||||
# pass
|
|
||||||
# surveyobj.save()
|
|
||||||
|
|
||||||
|
|
||||||
# logging.info("added survey " + survey[header['Year']] + "#" + surveyobj.wallet_number + "\r")
|
|
||||||
|
|
||||||
# dead
|
|
||||||
# def listdir(*directories):
|
|
||||||
# try:
|
|
||||||
# return os.listdir(os.path.join(settings.SURVEYS, *directories))
|
|
||||||
# except:
|
|
||||||
# import urllib.request, urllib.parse, urllib.error
|
|
||||||
# url = settings.SURVEYS + reduce(lambda x, y: x + "/" + y, ["listdir"] + list(directories))
|
|
||||||
# folders = urllib.request.urlopen(url.replace("#", "%23")).readlines()
|
|
||||||
# return [folder.rstrip(r"/") for folder in folders]
|
|
||||||
|
|
||||||
# add survey scans
|
# add survey scans
|
||||||
# def parseSurveyScans(expedition, logfile=None):
|
# def parseSurveyScans(expedition, logfile=None):
|
||||||
@ -157,19 +104,6 @@ def get_or_create_placeholder(year):
|
|||||||
# yearPath=os.path.join(settings.SURVEY_SCANS, "surveyscans", expedition.year)
|
# yearPath=os.path.join(settings.SURVEY_SCANS, "surveyscans", expedition.year)
|
||||||
# print((" ! No folder found for " + expedition.year + " at:- " + yearPath))
|
# print((" ! No folder found for " + expedition.year + " at:- " + yearPath))
|
||||||
|
|
||||||
# dead
|
|
||||||
# def parseSurveys(logfile=None):
|
|
||||||
# try:
|
|
||||||
# readSurveysFromCSV()
|
|
||||||
# except (IOError, OSError):
|
|
||||||
# print(" ! Survey CSV not found..")
|
|
||||||
# pass
|
|
||||||
|
|
||||||
# print(" - Loading scans by expedition year")
|
|
||||||
# for expedition in Expedition.objects.filter(year__gte=2000): #expos since 2000, because paths and filenames were nonstandard before then
|
|
||||||
# print("%s" % expedition, end=' ')
|
|
||||||
# parseSurveyScans(expedition)
|
|
||||||
|
|
||||||
# dead
|
# dead
|
||||||
# def isInterlacedPNG(filePath): #We need to check for interlaced PNGs because the thumbnail engine can't handle them (uses PIL)
|
# def isInterlacedPNG(filePath): #We need to check for interlaced PNGs because the thumbnail engine can't handle them (uses PIL)
|
||||||
# file=Image.open(filePath)
|
# file=Image.open(filePath)
|
||||||
@ -196,7 +130,6 @@ def GetListDir(sdir):
|
|||||||
|
|
||||||
def LoadListScansFile(survexscansfolder):
|
def LoadListScansFile(survexscansfolder):
|
||||||
gld = [ ]
|
gld = [ ]
|
||||||
|
|
||||||
# flatten out any directories in these wallet folders - should not be any
|
# flatten out any directories in these wallet folders - should not be any
|
||||||
for (fyf, ffyf, fisdiryf) in GetListDir(survexscansfolder.fpath):
|
for (fyf, ffyf, fisdiryf) in GetListDir(survexscansfolder.fpath):
|
||||||
if fisdiryf:
|
if fisdiryf:
|
||||||
@ -204,24 +137,31 @@ def LoadListScansFile(survexscansfolder):
|
|||||||
else:
|
else:
|
||||||
gld.append((fyf, ffyf, fisdiryf))
|
gld.append((fyf, ffyf, fisdiryf))
|
||||||
|
|
||||||
|
c=0
|
||||||
for (fyf, ffyf, fisdiryf) in gld:
|
for (fyf, ffyf, fisdiryf) in gld:
|
||||||
#assert not fisdiryf, ffyf
|
#assert not fisdiryf, ffyf
|
||||||
if re.search(r"\.(?:png|jpg|jpeg|pdf|jpeg|svg)(?i)$", fyf):
|
if re.search(r"\.(?:png|jpg|jpeg|pdf|svg|gif)(?i)$", fyf):
|
||||||
survexscansingle = SurvexScanSingle(ffile=ffyf, name=fyf, survexscansfolder=survexscansfolder)
|
survexscansingle = SurvexScanSingle(ffile=ffyf, name=fyf, survexscansfolder=survexscansfolder)
|
||||||
survexscansingle.save()
|
survexscansingle.save()
|
||||||
|
c+=1
|
||||||
|
if c>=10:
|
||||||
|
print(".", end='')
|
||||||
|
c = 0
|
||||||
|
|
||||||
|
|
||||||
# this iterates through the scans directories (either here or on the remote server)
|
# this iterates through the scans directories (either here or on the remote server)
|
||||||
# and builds up the models we can access later
|
# and builds up the models we can access later
|
||||||
def LoadListScans():
|
def LoadListScans():
|
||||||
|
|
||||||
print(' - Loading Survey Scans... (deleting all objects first)')
|
print(' - Loading Survey Scans')
|
||||||
|
|
||||||
SurvexScanSingle.objects.all().delete()
|
SurvexScanSingle.objects.all().delete()
|
||||||
SurvexScansFolder.objects.all().delete()
|
SurvexScansFolder.objects.all().delete()
|
||||||
|
print(' - deleting all scansFolder and scansSingle objects')
|
||||||
|
|
||||||
# first do the smkhs (large kh survey scans) directory
|
# first do the smkhs (large kh survey scans) directory
|
||||||
survexscansfoldersmkhs = SurvexScansFolder(fpath=os.path.join(settings.SURVEY_SCANS, "smkhs"), walletname="smkhs")
|
survexscansfoldersmkhs = SurvexScansFolder(fpath=os.path.join(settings.SURVEY_SCANS, "../surveys/smkhs"), walletname="smkhs")
|
||||||
|
print("smkhs", end=' ')
|
||||||
if os.path.isdir(survexscansfoldersmkhs.fpath):
|
if os.path.isdir(survexscansfoldersmkhs.fpath):
|
||||||
survexscansfoldersmkhs.save()
|
survexscansfoldersmkhs.save()
|
||||||
LoadListScansFile(survexscansfoldersmkhs)
|
LoadListScansFile(survexscansfoldersmkhs)
|
||||||
@ -229,7 +169,7 @@ def LoadListScans():
|
|||||||
|
|
||||||
# iterate into the surveyscans directory
|
# iterate into the surveyscans directory
|
||||||
print(' - ', end=' ')
|
print(' - ', end=' ')
|
||||||
for f, ff, fisdir in GetListDir(os.path.join(settings.SURVEY_SCANS, "surveyscans")):
|
for f, ff, fisdir in GetListDir(settings.SURVEY_SCANS):
|
||||||
if not fisdir:
|
if not fisdir:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
3
settings.py
Normal file → Executable file
3
settings.py
Normal file → Executable file
@ -1,4 +1,5 @@
|
|||||||
from localsettings import * #inital localsettings call so that urljoins work
|
from localsettings import *
|
||||||
|
#inital localsettings call so that urljoins work
|
||||||
import os
|
import os
|
||||||
import urlparse
|
import urlparse
|
||||||
import django
|
import django
|
||||||
|
@ -35,13 +35,11 @@
|
|||||||
<a href="{% url "survexcaveslist" %}">All Survex</a> |
|
<a href="{% url "survexcaveslist" %}">All Survex</a> |
|
||||||
<a href="{% url "surveyscansfolders" %}">Scans</a> |
|
<a href="{% url "surveyscansfolders" %}">Scans</a> |
|
||||||
<a href="{% url "tunneldata" %}">Tunneldata</a> |
|
<a href="{% url "tunneldata" %}">Tunneldata</a> |
|
||||||
<a href="{% url "survexcavessingle" 107 %}">107</a> |
|
<a href="{% url "survexcavessingle" "caves-1623/290/290.svx" %}">290</a> |
|
||||||
<a href="{% url "survexcavessingle" 161 %}">161</a> |
|
<a href="{% url "survexcavessingle" "caves-1623/291/291.svx" %}">291</a> |
|
||||||
<a href="{% url "survexcavessingle" 204 %}">204</a> |
|
<a href="{% url "survexcavessingle" "caves-1626/359/359.svx" %}">359</a> |
|
||||||
<a href="{% url "survexcavessingle" 258 %}">258</a> |
|
<a href="{% url "survexcavessingle" "caves-1623/258/258.svx" %}">258</a> |
|
||||||
<a href="{% url "survexcavessingle" 264 %}">264</a> |
|
<a href="{% url "survexcavessingle" "caves-1623/264/264.svx" %}">264</a> |
|
||||||
<a href="{% url "expedition" 2016 %}">Expo2016</a> |
|
|
||||||
<a href="{% url "expedition" 2017 %}">Expo2017</a> |
|
|
||||||
<a href="{% url "expedition" 2018 %}">Expo2018</a> |
|
<a href="{% url "expedition" 2018 %}">Expo2018</a> |
|
||||||
<a href="{% url "expedition" 2019 %}">Expo2019</a> |
|
<a href="{% url "expedition" 2019 %}">Expo2019</a> |
|
||||||
<a href="{% url "expedition" 2020 %}">Expo2020</a> |
|
<a href="{% url "expedition" 2020 %}">Expo2020</a> |
|
||||||
|
@ -15,16 +15,6 @@
|
|||||||
{% endfor %}
|
{% endfor %}
|
||||||
</ul>
|
</ul>
|
||||||
|
|
||||||
<h3>1623</h3>
|
|
||||||
|
|
||||||
<table class="searchable">
|
|
||||||
{% for cave in caves1623 %}
|
|
||||||
|
|
||||||
<tr><td> <a href="{{ cave.url }}">{% if cave.kataster_number %}{{ cave.kataster_number }} {{cave.official_name|safe}}</a> {% if cave.unofficial_number %}({{cave.unofficial_number }}){% endif %}{% else %}{{cave.unofficial_number }} {{cave.official_name|safe}}</a> {% endif %}</td></tr>
|
|
||||||
|
|
||||||
{% endfor %}
|
|
||||||
</table>
|
|
||||||
|
|
||||||
<h3>1626</h3>
|
<h3>1626</h3>
|
||||||
|
|
||||||
<ul class="searchable">
|
<ul class="searchable">
|
||||||
@ -35,7 +25,20 @@
|
|||||||
|
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
</ul>
|
</ul>
|
||||||
|
<p style="text-align:right">
|
||||||
<a href="{% url "newcave" %}">New Cave</a>
|
<a href="{% url "newcave" %}">New Cave</a>
|
||||||
|
</p>
|
||||||
|
<h3>1623</h3>
|
||||||
|
|
||||||
|
<table class="searchable">
|
||||||
|
{% for cave in caves1623 %}
|
||||||
|
|
||||||
|
<tr><td> <a href="{{ cave.url }}">{% if cave.kataster_number %}{{ cave.kataster_number }} {{cave.official_name|safe}}</a> {% if cave.unofficial_number %}({{cave.unofficial_number }}){% endif %}{% else %}{{cave.unofficial_number }} {{cave.official_name|safe}}</a> {% endif %}</td></tr>
|
||||||
|
|
||||||
|
{% endfor %}
|
||||||
|
</table>
|
||||||
|
|
||||||
|
<p style="text-align:right">
|
||||||
|
<a href="{% url "newcave" %}">New Cave</a>
|
||||||
|
</p>
|
||||||
{% endblock %}
|
{% endblock %}
|
||||||
|
@ -129,23 +129,6 @@
|
|||||||
</td>
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
|
||||||
<tr>
|
|
||||||
<td>
|
|
||||||
surveys to Surveys.csv
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
<form name="export" method="post" action="">
|
|
||||||
<p>Overwrite the existing Surveys.csv file with one generated by Troggle.</p>
|
|
||||||
<input disabled name="export_surveys" type="submit" value="Update {{settings.SURVEYS}}noinfo/Surveys.csv" />
|
|
||||||
</form>
|
|
||||||
</td>
|
|
||||||
<td>
|
|
||||||
<form name="export" method="get" action={% url "downloadsurveys" %}>
|
|
||||||
<p>Download a Surveys.csv file which is dynamically generated by Troggle.</p>
|
|
||||||
<input disabled name="download_surveys" type="submit" value="Download Surveys.csv" />
|
|
||||||
</form>
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
|
|
||||||
<tr>
|
<tr>
|
||||||
<td>qms to qms.csv</td><td>
|
<td>qms to qms.csv</td><td>
|
||||||
|
4
templates/experimental.html
Normal file → Executable file
4
templates/experimental.html
Normal file → Executable file
@ -8,7 +8,9 @@
|
|||||||
|
|
||||||
<h1>Expo Experimental</h1>
|
<h1>Expo Experimental</h1>
|
||||||
|
|
||||||
<p>Number of survey legs: {{nsurvexlegs}}, total length: {{totalsurvexlength}}</p>
|
<p>Number of survey legs: {{nsurvexlegs}}<br />
|
||||||
|
Total length: {{totalsurvexlength}} m on importing survex files.<br />
|
||||||
|
Total length: {{addupsurvexlength}} m adding up all the years below.</p>
|
||||||
|
|
||||||
<table>
|
<table>
|
||||||
<tr><th>Year</th><th>Surveys</th><th>Survey Legs</th><th>Total length</th></tr>
|
<tr><th>Year</th><th>Surveys</th><th>Survey Legs</th><th>Total length</th></tr>
|
||||||
|
@ -1,11 +1,10 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
<html>
|
<html>
|
||||||
<head>
|
<head>
|
||||||
|
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
|
||||||
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" />
|
|
||||||
<title>{% block title %}{% endblock %}
|
<title>{% block title %}{% endblock %}
|
||||||
</title>
|
</title>
|
||||||
<link rel="stylesheet" type="text/css" href="../css/main2.css" />
|
<link rel="stylesheet" type="text/css" href="../css/main2.css" />
|
||||||
|
|
||||||
</head>
|
</head>
|
||||||
<body>
|
<body>
|
||||||
<div id="mainmenu">
|
<div id="mainmenu">
|
||||||
@ -13,17 +12,19 @@
|
|||||||
<li><a href="/index.htm">Expo website home</a></li>
|
<li><a href="/index.htm">Expo website home</a></li>
|
||||||
<li><a href="/intro.html">Introduction</a></li>
|
<li><a href="/intro.html">Introduction</a></li>
|
||||||
<li><a href="/infodx.htm">Main index</a></li>
|
<li><a href="/infodx.htm">Main index</a></li>
|
||||||
<li><a href="/indxal.htm">Cave index</a></li>
|
<li><a href="/caves">Cave index</a></li>
|
||||||
{% if cavepage %}
|
{% if cavepage %}
|
||||||
<ul>
|
<ul>
|
||||||
<li><a href="{% url "survexcaveslist" %}">All Survex</a></li>
|
<li><a href="{% url "survexcaveslist" %}">All Survex</a></li>
|
||||||
<li><a href="{% url "surveyscansfolders" %}">Scans</a></li>
|
<li><a href="{% url "surveyscansfolders" %}">Scans</a></li>
|
||||||
<li><a href="{% url "tunneldata" %}">Tunneldata</a></li>
|
<li><a href="{% url "tunneldata" %}">Tunneldata</a></li>
|
||||||
<li><a href="{% url "survexcavessingle" 161 %}">161</a></li>
|
<li><a href="{% url "survexcavessingle" "caves-1623/290/290.svx" %}">290</a></li>
|
||||||
<li><a href="{% url "survexcavessingle" 204 %}">204</a></li>
|
<li><a href="{% url "survexcavessingle" "caves-1623/291/291.svx" %}">291</a></li>
|
||||||
<li><a href="{% url "survexcavessingle" 258 %}">258</a></li>
|
<li><a href="{% url "survexcavessingle" "caves-1626/359/359.svx" %}">359</a></li>
|
||||||
<li><a href="{% url "expedition" 2012 %}">Expo2012</a></li>
|
<li><a href="{% url "survexcavessingle" "caves-1623/258/258.svx" %}">258</a></li>
|
||||||
<li><a href="{% url "expedition" 2013 %}">Expo2013</a></li>
|
<li><a href="{% url "survexcavessingle" "caves-1623/264/264.svx" %}">264</a></li>
|
||||||
|
<li><a href="{% url "expedition" 2018 %}">Expo2018</a></li>
|
||||||
|
<li><a href="{% url "expedition" 2019 %}">Expo2019</a></li>
|
||||||
<li><a href="/admin">Django admin</a></li>
|
<li><a href="/admin">Django admin</a></li>
|
||||||
</ul>
|
</ul>
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
<li><a href="/handbook/index.htm">Handbook</a></li>
|
<li><a href="/handbook/index.htm">Handbook</a></li>
|
||||||
<li><a href="/pubs.htm">Reports</a></li>
|
<li><a href="/pubs.htm">Reports</a></li>
|
||||||
<li><a href="/areas.htm">Areas</a></li>
|
<li><a href="/areas.htm">Areas</a></li>
|
||||||
<li><a href="/indxal.htm">Caves</a></li>
|
<li><a href="/caves">Caves</a></li>
|
||||||
<li><a href="/expedition/2019">Troggle</a></li>
|
<li><a href="/expedition/2019">Troggle</a></li>
|
||||||
<li><form name=P method=get action="/search" target="_top">
|
<li><form name=P method=get action="/search" target="_top">
|
||||||
<input id="omega-autofocus" type=search name=P value="testing" size=8 autofocus>
|
<input id="omega-autofocus" type=search name=P value="testing" size=8 autofocus>
|
||||||
|
16
urls.py
Normal file → Executable file
16
urls.py
Normal file → Executable file
@ -22,14 +22,10 @@ admin.autodiscover()
|
|||||||
|
|
||||||
actualurlpatterns = patterns('',
|
actualurlpatterns = patterns('',
|
||||||
|
|
||||||
url(r'^testingurl/?$' , views_caves.millenialcaves, name="testing"),
|
|
||||||
|
|
||||||
url(r'^millenialcaves/?$', views_caves.millenialcaves, name="millenialcaves"),
|
|
||||||
|
|
||||||
url(r'^troggle$', views_other.frontpage, name="frontpage"),
|
url(r'^troggle$', views_other.frontpage, name="frontpage"),
|
||||||
url(r'^todo/$', views_other.todo, name="todo"),
|
url(r'^todo/$', views_other.todo, name="todo"),
|
||||||
|
|
||||||
url(r'^caves/?$', views_caves.caveindex, name="caveindex"),
|
url(r'^caves$', views_caves.caveindex, name="caveindex"),
|
||||||
url(r'^people/?$', views_logbooks.personindex, name="personindex"),
|
url(r'^people/?$', views_logbooks.personindex, name="personindex"),
|
||||||
|
|
||||||
url(r'^newqmnumber/?$', views_other.ajax_QM_number, ),
|
url(r'^newqmnumber/?$', views_other.ajax_QM_number, ),
|
||||||
@ -89,8 +85,6 @@ actualurlpatterns = patterns('',
|
|||||||
|
|
||||||
# Is all this lot out of date ? Maybe the logbooks work?
|
# Is all this lot out of date ? Maybe the logbooks work?
|
||||||
url(r'^controlpanel/?$', views_other.controlPanel, name="controlpanel"),
|
url(r'^controlpanel/?$', views_other.controlPanel, name="controlpanel"),
|
||||||
url(r'^CAVETAB2\.CSV/?$', views_other.downloadCavetab, name="downloadcavetab"),
|
|
||||||
url(r'^Surveys\.csv/?$', views_other.downloadSurveys, name="downloadsurveys"),
|
|
||||||
url(r'^logbook(?P<year>\d\d\d\d)\.(?P<extension>.*)/?$',views_other.downloadLogbook),
|
url(r'^logbook(?P<year>\d\d\d\d)\.(?P<extension>.*)/?$',views_other.downloadLogbook),
|
||||||
url(r'^logbook/?$',views_other.downloadLogbook, name="downloadlogbook"),
|
url(r'^logbook/?$',views_other.downloadLogbook, name="downloadlogbook"),
|
||||||
url(r'^cave/(?P<cave_id>[^/]+)/qm\.csv/?$', views_other.downloadQMs, name="downloadqms"),
|
url(r'^cave/(?P<cave_id>[^/]+)/qm\.csv/?$', views_other.downloadQMs, name="downloadqms"),
|
||||||
@ -112,6 +106,10 @@ actualurlpatterns = patterns('',
|
|||||||
|
|
||||||
# (r'^personform/(.*)$', personForm),
|
# (r'^personform/(.*)$', personForm),
|
||||||
|
|
||||||
|
(r'^expofiles/(?P<path>.*)$', 'django.views.static.serve',
|
||||||
|
{'document_root': settings.EXPOFILES, 'show_indexes': True}),
|
||||||
|
(r'^static/(?P<path>.*)$', 'django.views.static.serve',
|
||||||
|
{'document_root': settings.STATIC_ROOT, 'show_indexes': True}),
|
||||||
(r'^site_media/(?P<path>.*)$', 'django.views.static.serve',
|
(r'^site_media/(?P<path>.*)$', 'django.views.static.serve',
|
||||||
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
|
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
|
||||||
(r'^tinymce_media/(?P<path>.*)$', 'django.views.static.serve',
|
(r'^tinymce_media/(?P<path>.*)$', 'django.views.static.serve',
|
||||||
@ -126,7 +124,7 @@ actualurlpatterns = patterns('',
|
|||||||
|
|
||||||
|
|
||||||
url(r'^survexfile/caves/$', views_survex.survexcaveslist, name="survexcaveslist"),
|
url(r'^survexfile/caves/$', views_survex.survexcaveslist, name="survexcaveslist"),
|
||||||
url(r'^survexfile/caves/(?P<survex_cave>.*)$', views_survex.survexcavesingle, name="survexcavessingle"),
|
url(r'^survexfile/(?P<survex_cave>.*)$', views_survex.survexcavesingle, name="survexcavessingle"),
|
||||||
url(r'^survexfileraw/(?P<survex_file>.*?)\.svx$', views_survex.svxraw, name="svxraw"),
|
url(r'^survexfileraw/(?P<survex_file>.*?)\.svx$', views_survex.svxraw, name="svxraw"),
|
||||||
|
|
||||||
|
|
||||||
@ -139,7 +137,7 @@ actualurlpatterns = patterns('',
|
|||||||
#(r'^survey_scans/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.SURVEY_SCANS, 'show_indexes':True}),
|
#(r'^survey_scans/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.SURVEY_SCANS, 'show_indexes':True}),
|
||||||
url(r'^survey_scans/$', view_surveys.surveyscansfolders, name="surveyscansfolders"),
|
url(r'^survey_scans/$', view_surveys.surveyscansfolders, name="surveyscansfolders"),
|
||||||
url(r'^survey_scans/(?P<path>[^/]+)/$', view_surveys.surveyscansfolder, name="surveyscansfolder"),
|
url(r'^survey_scans/(?P<path>[^/]+)/$', view_surveys.surveyscansfolder, name="surveyscansfolder"),
|
||||||
url(r'^survey_scans/(?P<path>[^/]+)/(?P<file>[^/]+(?:png|jpg|jpeg))$',
|
url(r'^survey_scans/(?P<path>[^/]+)/(?P<file>[^/]+(?:png|jpg|jpeg|pdf|PNG|JPG|JPEG|PDF))$',
|
||||||
view_surveys.surveyscansingle, name="surveyscansingle"),
|
view_surveys.surveyscansingle, name="surveyscansingle"),
|
||||||
|
|
||||||
url(r'^tunneldata/$', view_surveys.tunneldata, name="tunneldata"),
|
url(r'^tunneldata/$', view_surveys.tunneldata, name="tunneldata"),
|
||||||
|
Loading…
x
Reference in New Issue
Block a user