forked from expo/troggle
1003 lines
47 KiB
Python
1003 lines
47 KiB
Python
import sys
|
|
import os
|
|
import re
|
|
import time
|
|
|
|
from datetime import datetime, timedelta
|
|
from subprocess import call, run
|
|
|
|
from django.utils.timezone import get_current_timezone
|
|
from django.utils.timezone import make_aware
|
|
|
|
import troggle.settings as settings
|
|
import troggle.core.models as models
|
|
import troggle.core.models_caves as models_caves
|
|
import troggle.core.models_survex as models_survex
|
|
from troggle.utils import ChaosMonkey
|
|
from troggle.parsers.people import GetPersonExpeditionNameLookup
|
|
from troggle.parsers.logbooks import GetCaveLookup
|
|
from troggle.core.views_caves import MapLocations
|
|
|
|
survexblockroot = None
|
|
ROOTBLOCK = "rootblock"
|
|
|
|
class SurvexLeg():
|
|
"""No longer a models.Model subclass, so no longer a database table
|
|
"""
|
|
tape = 0.0
|
|
compass = 0.0
|
|
clino = 0.0
|
|
|
|
class LoadingSurvex():
|
|
"""A 'survex block' is a *begin...*end set of cave data.
|
|
A survex file can contain many begin-end blocks, which can be nested, and which can *include
|
|
other survex files.
|
|
A 'scansfolder' is what we today call a "survey scans folder" or a "wallet".
|
|
"""
|
|
|
|
rx_linelen = re.compile(r"[\d\-+.]+$")
|
|
rx_team = re.compile(r"(?i)(Insts|Notes|Tape|Dog|Useless|Pics|Helper|Disto|Consultant)\s+(.*)$")
|
|
rx_person = re.compile(r"(?i) and | / |, | & | \+ |^both$|^none$")
|
|
rx_qm = re.compile(r'(?i)^\s*QM(\d)\s+?([a-dA-DxX])\s+([\w\-]+)\.(\d+)\s+(([\w\-]+)\.(\d+)|\-)\s+(.+)$')
|
|
# remember there is also QM_PATTERN used in views_other and set in settings.py
|
|
|
|
rx_cave = re.compile(r'(?i)caves-(\d\d\d\d)/([-\d\w]+|\d\d\d\d-?\w+-\d+)')
|
|
rx_comment = re.compile(r'([^;]*?)\s*(?:;\s*(.*))?\n?$')
|
|
rx_comminc = re.compile(r'(?i)^\*include[\s]*([-\w/]*).*$') # inserted by linear collate ;*include
|
|
rx_commcni = re.compile(r'(?i)^\*edulcni[\s]*([-\w/]*).*$') # inserted by linear collate ;*edulcni
|
|
rx_include = re.compile(r'(?i)^\s*(\*include[\s].*)$')
|
|
rx_ref = re.compile(r'(?i)^\s*ref[\s.:]*(\d+)\s*#\s*(X)?\s*(\d+)')
|
|
rx_star = re.compile(r'(?i)\s*\*[\s,]*(\w+)\s*(.*?)\s*(?:;.*)?$')
|
|
rx_starref = re.compile(r'(?i)^\s*\*ref[\s.:]*((?:19[6789]\d)|(?:20[0123]\d))\s*#?\s*(X)?\s*(.*?\d+.*?)$')
|
|
rx_argsref = re.compile(r'(?i)^[\s.:]*((?:19[6789]\d)|(?:20[0123]\d))\s*#?\s*(X)?\s*(.*?\d+.*?)$')
|
|
|
|
# This interprets the survex "*data normal" command which sets out the order of the fields in the data, e.g.
|
|
# *DATA normal from to length gradient bearing ignore ignore ignore ignore
|
|
stardatadefault = {"type":"normal", "from":0, "to":1, "tape":2, "compass":3, "clino":4}
|
|
|
|
stardata ={}
|
|
survexlegsalllength = 0.0
|
|
survexlegsnumber = 0
|
|
depthbegin = 0
|
|
depthinclude = 0
|
|
stackbegin =[]
|
|
stackinclude = []
|
|
stacksvxfiles = []
|
|
svxfileslist = []
|
|
svxdirs = {}
|
|
survexdict = {} # each key is a directory, and its value is a list of files
|
|
lineno = 0
|
|
insp = ""
|
|
callcount = 0
|
|
ignoreprefix = ["surface", "kataster", "fixedpts", "gpx"]
|
|
ignorenoncave = ["caves-1623", "caves-1623/2007-neu"]
|
|
includedfilename =""
|
|
currentsurvexblock = None
|
|
currentsurvexfile = None
|
|
currentcave = None
|
|
caverndate = None
|
|
|
|
def __init__(self):
|
|
self.caveslist = GetCaveLookup()
|
|
pass
|
|
|
|
def LoadSurvexIgnore(self, survexblock, line, cmd):
|
|
if cmd == "require":
|
|
pass # should we check survex version available for processing?
|
|
elif cmd in ["equate", "fix", "alias", "calibrate", "cs","entrance", "export", "case",
|
|
"declination", "infer","instrument", "sd", "units"]:
|
|
pass # we ignore all these, which is fine.
|
|
else:
|
|
if cmd in ["include", "data", "flags", "title", "set", "ref"]:
|
|
message = "! Unparsed [*{}]: '{}' {}".format(cmd, line, survexblock.survexfile.path)
|
|
print((self.insp+message))
|
|
models.DataIssue.objects.create(parser='survex', message=message)
|
|
else:
|
|
message = "! Bad svx command: [*{}] {} ({}) {}".format(cmd, line, survexblock, survexblock.survexfile.path)
|
|
print((self.insp+message))
|
|
models.DataIssue.objects.create(parser='survex', message=message)
|
|
|
|
def LoadSurvexTeam(self, survexblock, line):
|
|
teammembers = [ ]
|
|
mteammember = self.rx_team.match(line)
|
|
if mteammember:
|
|
for tm in self.rx_person.split(mteammember.group(2)):
|
|
if tm:
|
|
personexpedition = survexblock.expedition and GetPersonExpeditionNameLookup(survexblock.expedition).get(tm.lower())
|
|
if (personexpedition, tm) not in teammembers:
|
|
teammembers.append((personexpedition, tm))
|
|
personrole = models_survex.SurvexPersonRole(survexblock=survexblock, nrole=mteammember.group(1).lower(), personexpedition=personexpedition, personname=tm)
|
|
personrole.save()
|
|
personrole.expeditionday = survexblock.expeditionday
|
|
if personexpedition:
|
|
personrole.person=personexpedition.person
|
|
personrole.save()
|
|
|
|
def LoadSurvexDate(self, survexblock, line):
|
|
# we should make this a date range for everything
|
|
if len(line) == 10:
|
|
survexblock.date = make_aware(datetime.strptime(re.sub(r"\.", "-", line), '%Y-%m-%d'), get_current_timezone())
|
|
expeditions = models.Expedition.objects.filter(year=line[:4])
|
|
if expeditions:
|
|
assert len(expeditions) == 1
|
|
survexblock.expedition = expeditions[0]
|
|
survexblock.expeditionday = survexblock.expedition.get_expedition_day(survexblock.date)
|
|
survexblock.save()
|
|
|
|
def LoadSurvexLineLeg(self, survexblock, svxline, sline, comment):
|
|
"""This reads compass, clino and tape data but only keeps the tape lengths,
|
|
the rest is discarded after error-checking.
|
|
"""
|
|
# Check first to see if we are in a splay and abort if so.
|
|
# TO DO splay abort
|
|
invalid_clino = 180.0
|
|
invalid_compass = 720.0
|
|
invalid_tape = 0.0
|
|
stardata = self.stardata
|
|
survexleg = SurvexLeg()
|
|
|
|
ls = sline.lower().split()
|
|
# this next fails for two surface survey svx files which use / for decimal point
|
|
# e.g. '29/09' in the tape measurement, or use decimals but in brackets, e.g. (06.05)
|
|
if stardata["type"] == "normal": # should use current flags setting for this. May not be default order!
|
|
#print("! stardata {}++{}\n{} ".format(stardata, survexblock.survexfile.path, sline), file=sys.stderr)
|
|
try:
|
|
tape = ls[stardata["tape"]]
|
|
except:
|
|
print(("! stardata parsing incorrect", survexblock.survexfile.path))
|
|
print((" Stardata:", stardata))
|
|
print((" Line:", ls))
|
|
message = ' ! stardata parsing incorrect in line %s in %s' % (ls, survexblock.survexfile.path)
|
|
models.DataIssue.objects.create(parser='survexleg', message=message)
|
|
survexleg.tape = invalid_tape
|
|
return
|
|
tape = tape.replace("(","")
|
|
tape = tape.replace(")","")
|
|
tape = tape.replace("/",".")
|
|
try:
|
|
survexleg.tape = float(tape)
|
|
self.survexlegsnumber += 1
|
|
except ValueError:
|
|
print(("! Tape misread in", survexblock.survexfile.path))
|
|
print((" Stardata:", stardata))
|
|
print((" Line:", ls))
|
|
message = ' ! Value Error: Tape misread in line %s in %s' % (ls, survexblock.survexfile.path)
|
|
models.DataIssue.objects.create(parser='survexleg', message=message)
|
|
survexleg.tape = invalid_tape
|
|
try:
|
|
survexblock.totalleglength += survexleg.tape
|
|
self.survexlegsalllength += survexleg.tape
|
|
except ValueError:
|
|
message = ' ! Value Error: Tape length not added %s in %s' % (ls, survexblock.survexfile.path)
|
|
models.DataIssue.objects.create(parser='survexleg', message=message)
|
|
|
|
try:
|
|
lcompass = ls[stardata["compass"]]
|
|
except:
|
|
print(("! Compass not found in", survexblock.survexfile.path))
|
|
print((" Stardata:", stardata))
|
|
print((" Line:", ls))
|
|
message = ' ! Value Error: Compass not found in line %s in %s' % (ls, survexblock.survexfile.path)
|
|
models.DataIssue.objects.create(parser='survexleg', message=message)
|
|
lcompass = invalid_compass
|
|
|
|
try:
|
|
lclino = ls[stardata["clino"]]
|
|
except:
|
|
print(("! Clino misread in", survexblock.survexfile.path))
|
|
print((" Stardata:", stardata))
|
|
print((" Line:", ls))
|
|
message = ' ! Value Error: Clino misread in line %s in %s' % (ls, survexblock.survexfile.path)
|
|
models.DataIssue.objects.create(parser='survexleg', message=message)
|
|
lclino = invalid_clino
|
|
|
|
if lclino == "up":
|
|
survexleg.clino = 90.0
|
|
lcompass = invalid_compass
|
|
elif lclino == "down":
|
|
survexleg.clino = -90.0
|
|
lcompass = invalid_compass
|
|
elif lclino == "-" or lclino == "level":
|
|
survexleg.clino = -90.0
|
|
|
|
try:
|
|
survexleg.compass = float(lcompass)
|
|
except ValueError:
|
|
print(("! Compass misread in", survexblock.survexfile.path))
|
|
print((" Stardata:", stardata))
|
|
print((" Line:", ls))
|
|
message = " ! Value Error: lcompass:'{}' line {} in '{}'".format(lcompass,
|
|
ls, survexblock.survexfile.path)
|
|
models.DataIssue.objects.create(parser='survexleg', message=message)
|
|
survexleg.compass = invalid_compass
|
|
|
|
#print(" !! lineno '{}'\n !! svxline '{}'\n !! sline '{}'\n !! ls '{}'\n !! stardata {}".format(self.lineno, svxline, sline, ls,stardata))
|
|
# delete the object to save memory
|
|
survexleg = None
|
|
|
|
def LoadSurvexRef(self, survexblock, args):
|
|
# *REF but also ; Ref years from 1960 to 2039
|
|
if len(args)< 4:
|
|
message = " ! Empty or BAD *REF command '{}' in '{}'".format(args, survexblock.survexfile.path)
|
|
print((self.insp+message))
|
|
models.DataIssue.objects.create(parser='survex', message=message)
|
|
return
|
|
|
|
argsgps = self.rx_argsref.match(args)
|
|
if argsgps:
|
|
yr, letterx, wallet = argsgps.groups()
|
|
else:
|
|
message = " ! BAD *REF command '{}' in '{}'".format(args, survexblock.survexfile.path)
|
|
print((self.insp+message))
|
|
models.DataIssue.objects.create(parser='survex', message=message)
|
|
return
|
|
|
|
if not letterx:
|
|
letterx = ""
|
|
else:
|
|
letterx = "X"
|
|
if len(wallet)<2:
|
|
wallet = "0" + wallet
|
|
assert (int(yr)>1960 and int(yr)<2039), "Wallet year out of bounds: %s" % yr
|
|
refscan = "%s#%s%s" % (yr, letterx, wallet)
|
|
try:
|
|
if int(wallet)>100:
|
|
message = " ! Wallet *REF {} - too big in '{}'".format(refscan, survexblock.survexfile.path)
|
|
print((self.insp+message))
|
|
models.DataIssue.objects.create(parser='survex', message=message)
|
|
except:
|
|
message = " ! Wallet *REF {} - not numeric in '{}'".format(refscan, survexblock.survexfile.path)
|
|
print((self.insp+message))
|
|
models.DataIssue.objects.create(parser='survex', message=message)
|
|
manyscansfolders = models_survex.ScansFolder.objects.filter(walletname=refscan)
|
|
if manyscansfolders:
|
|
survexblock.scansfolder = manyscansfolders[0]
|
|
survexblock.save()
|
|
if len(manyscansfolders) > 1:
|
|
message = " ! Wallet *REF {} - {} scan folders from DB search in {}".format(refscan, len(manyscansfolders), survexblock.survexfile.path)
|
|
print((self.insp+message))
|
|
models.DataIssue.objects.create(parser='survex', message=message)
|
|
else:
|
|
message = " ! Wallet *REF '{}' - NOT found in DB search '{}'".format(refscan, survexblock.survexfile.path)
|
|
print((self.insp+message))
|
|
models.DataIssue.objects.create(parser='survex', message=message)
|
|
|
|
def LoadSurvexQM(self, survexblock, qmline):
|
|
insp = self.insp
|
|
qm_no = qmline.group(1)
|
|
qm_grade = qmline.group(2)
|
|
if qmline.group(3): # usual closest survey station
|
|
qm_nearest = qmline.group(3)
|
|
if qmline.group(4):
|
|
qm_nearest = qm_nearest +"."+ qmline.group(4)
|
|
|
|
if qmline.group(6) and qmline.group(6) != '-':
|
|
qm_resolve_station = qmline.group(6)
|
|
if qmline.group(7):
|
|
qm_resolve_station = qm_resolve_station +"."+ qmline.group(7)
|
|
else:
|
|
qm_resolve_station = ""
|
|
qm_notes = qmline.group(8)
|
|
# Spec of QM in SVX files:
|
|
# ;Serial number grade(A/B/C/D/X) nearest-station resolution-station description
|
|
# ;QM1 a hobnob_hallway_2.42 hobnob-hallway_3.42 junction of keyhole passage
|
|
# ;QM1 a hobnob_hallway_2.42 - junction of keyhole passage
|
|
|
|
# NB none of the SurveyStations are in the DB now, so if we want to link to aSurvexStation
|
|
# we would have to create one. But that is not obligatory and no QMs loaded from CSVs have one
|
|
try:
|
|
qm = models_caves.QM.objects.create(number=qm_no,
|
|
# nearest_station=a_survex_station_object, # can be null
|
|
nearest_station_description=qm_resolve_station,
|
|
nearest_station_name=qm_nearest,
|
|
grade=qm_grade.upper(),
|
|
location_description=qm_notes)
|
|
qm.save
|
|
# message = " ! QM{} '{}' CREATED in DB in '{}'".format(qm_no, qm_nearest,survexblock.survexfile.path)
|
|
# print(insp+message)
|
|
# models.DataIssue.objects.create(parser='survex', message=message)
|
|
except:
|
|
message = " ! QM{} FAIL to create {} in'{}'".format(qm_no, qm_nearest,survexblock.survexfile.path)
|
|
print(insp+message)
|
|
models.DataIssue.objects.create(parser='survex', message=message)
|
|
|
|
def LoadSurvexDataCmd(self,survexblock,args):
|
|
"""Sets the order for data elements in this and following blocks, e.g.
|
|
*data normal from to compass clino tape
|
|
*data normal from to tape compass clino
|
|
We are only collecting length data so we are disinterested in from, to, LRUD etc.
|
|
"""
|
|
# stardatadefault = { # included here as reference to help understand the code
|
|
# "type":"normal",
|
|
# "t":"leg",
|
|
# "from":0,
|
|
# "to":1,
|
|
# "tape":2,
|
|
# "compass":3,
|
|
# "clino":4}
|
|
stardata = self.stardatadefault
|
|
if args == "":
|
|
# naked '*data' which is relevant only for passages. Ignore. Continue with previous settings.
|
|
return
|
|
|
|
ls = args.lower().split()
|
|
if ls[0] == "normal":
|
|
if not (("from" in stardata and "to" in stardata) or "station" in stardata):
|
|
message = " ! - Unrecognised *data normal statement '{}' {}|{}".format(args, survexblock.name, survexblock.survexpath)
|
|
print(message)
|
|
print(message,file=sys.stderr)
|
|
models.DataIssue.objects.create(parser='survex', message=message)
|
|
return
|
|
else:
|
|
stardata = self.stardatadefault
|
|
# ls = ["normal", "from", "to", "tape", "compass", "clino" ]
|
|
for i in range(1, len(ls)): # len[0] is "normal"
|
|
if ls[i] in ["bearing","compass"]:
|
|
stardata["compass"] = i-1
|
|
if ls[i] in ["clino","gradient"]:
|
|
stardata["clino"] = i-1
|
|
if ls[i] in ["tape","length"]:
|
|
stardata["tape"] = i-1
|
|
self.stardata = stardata
|
|
return
|
|
elif ls[0] == "default":
|
|
self.stardata = self.stardatadefault
|
|
elif ls[0] == "passage" or ls[0] == "nosurvey":
|
|
# we ignore everything else, such as '*data passage'
|
|
pass
|
|
elif ls[0] == "cartesian" or ls[0] == "nosurvey":
|
|
message = " ! - *data cartesian survey blocks are ignored. Length not calculated. '{}' {}|{}".format(args, survexblock.name, survexblock.survexpath)
|
|
print(message)
|
|
print(message,file=sys.stderr)
|
|
models.DataIssue.objects.create(parser='survex', message=message)
|
|
else:
|
|
message = " ! - Unrecognised *data statement '{}'".format(args)
|
|
print(message)
|
|
print(message,file=sys.stderr)
|
|
models.DataIssue.objects.create(parser='survex', message=message)
|
|
|
|
def LoadSurvexFlags(self, line, cmd):
|
|
# Here we could set on/off 'splay', 'not splay', 'surface', 'not surface', or 'duplicate'
|
|
# but this data is only used for sense-checking not to actually calculate anything important
|
|
pass
|
|
|
|
def IdentifyCave(self, cavepath):
|
|
if cavepath.lower() in self.caveslist:
|
|
return self.caveslist[cavepath.lower()]
|
|
# TO DO - some of this is already done in generating self.caveslist so simplify this
|
|
# esp. as it is in a loop.
|
|
path_match = self.rx_cave.search(cavepath)
|
|
if path_match:
|
|
sluggy = '{}-{}'.format(path_match.group(1), path_match.group(2))
|
|
guesses = [sluggy.lower(), path_match.group(2).lower()]
|
|
for g in guesses:
|
|
if g in self.caveslist:
|
|
self.caveslist[cavepath] = self.caveslist[g]
|
|
return self.caveslist[g]
|
|
print(' ! Failed to find cave for {}'.format(cavepath.lower()))
|
|
else:
|
|
# not a cave, but that is fine.
|
|
# print(' ! No regex(standard identifier) cave match for %s' % cavepath.lower())
|
|
return None
|
|
|
|
def GetSurvexDirectory(self, headpath):
|
|
"""This creates a SurvexDirectory if it has not been seen before, and on creation
|
|
it sets the primarysurvexfile. This is correct as it should be set on the first file
|
|
in the directory, where first is defined by the *include ordering. Which is what we
|
|
are doing.
|
|
"""
|
|
if not headpath:
|
|
return self.svxdirs[""]
|
|
if headpath.lower() not in self.svxdirs:
|
|
self.svxdirs[headpath.lower()] = models_survex.SurvexDirectory(path=headpath, primarysurvexfile=self.currentsurvexfile)
|
|
self.svxdirs[headpath.lower()].save()
|
|
return self.svxdirs[headpath.lower()]
|
|
|
|
def ReportNonCaveIncludes(self, headpath, includelabel):
|
|
"""Ignore surface, kataser and gps *include survex files
|
|
"""
|
|
if headpath in self.ignorenoncave:
|
|
return
|
|
for i in self.ignoreprefix:
|
|
if headpath.startswith(i):
|
|
return
|
|
message = " ! {} is not a cave. (while creating '{}' sfile & sdirectory)".format(headpath, includelabel)
|
|
print("\n"+message)
|
|
print("\n"+message,file=sys.stderr)
|
|
models.DataIssue.objects.create(parser='survex', message=message)
|
|
|
|
def LoadSurvexFile(self, svxid):
|
|
"""Creates SurvexFile in the database, and SurvexDirectory if needed
|
|
with links to 'cave'
|
|
Creates a new current survexfile and valid .survexdirectory
|
|
The survexblock passed-in is not necessarily the parent. FIX THIS.
|
|
"""
|
|
self.stardata = self.stardatadefault
|
|
|
|
depth = " " * self.depthbegin
|
|
print("{:2}{} - NEW survexfile:'{}'".format(self.depthbegin, depth, svxid))
|
|
headpath = os.path.dirname(svxid)
|
|
|
|
newfile = models_survex.SurvexFile(path=svxid)
|
|
newfile.save() # until we do this there is no internal id so no foreign key works
|
|
self.currentsurvexfile = newfile
|
|
newdirectory = self.GetSurvexDirectory(headpath)
|
|
newdirectory.save()
|
|
newfile.survexdirectory = newdirectory
|
|
self.survexdict[newdirectory] = [newfile,]
|
|
cave = self.IdentifyCave(headpath) # cave already exists in db
|
|
|
|
if not newdirectory:
|
|
message = " ! 'None' SurvexDirectory returned from GetSurvexDirectory({})".format(headpath)
|
|
print(message)
|
|
print(message,file=sys.stderr)
|
|
models.DataIssue.objects.create(parser='survex', message=message)
|
|
|
|
if cave:
|
|
newdirectory.cave = cave
|
|
newfile.cave = cave
|
|
#print("\n"+str(newdirectory.cave),file=sys.stderr)
|
|
else:
|
|
self.ReportNonCaveIncludes(headpath, svxid)
|
|
|
|
if not newfile.survexdirectory:
|
|
message = " ! SurvexDirectory NOT SET in new SurvexFile {} ".format(svxid)
|
|
print(message)
|
|
print(message,file=sys.stderr)
|
|
models.DataIssue.objects.create(parser='survex', message=message)
|
|
self.currentsurvexfile.save() # django insists on this although it is already saved !?
|
|
try:
|
|
newdirectory.save()
|
|
except:
|
|
print(newdirectory, file=sys.stderr)
|
|
print(newdirectory.primarysurvexfile, file=sys.stderr)
|
|
raise
|
|
|
|
def ProcessIncludeLine(self, included):
|
|
svxid = included.groups()[0]
|
|
self.LoadSurvexFile(svxid)
|
|
self.stacksvxfiles.append(self.currentsurvexfile)
|
|
|
|
def ProcessEdulcniLine(self, edulcni):
|
|
"""Saves the current survexfile in the db
|
|
"""
|
|
svxid = edulcni.groups()[0]
|
|
#depth = " " * self.depthbegin
|
|
#print("{:2}{} - Edulcni survexfile:'{}'".format(self.depthbegin, depth, svxid))
|
|
self.currentsurvexfile.save()
|
|
self.currentsurvexfile = self.stacksvxfiles.pop()
|
|
|
|
def LoadSurvexComment(self, survexblock, comment):
|
|
# ignore all comments except ;ref and ;QM and ;*include (for collated survex file)
|
|
refline = self.rx_ref.match(comment)
|
|
if refline:
|
|
comment = re.sub('(?i)\s*ref[.;]?',"",comment.strip())
|
|
self.LoadSurvexRef(survexblock, comment)
|
|
|
|
qmline = self.rx_qm.match(comment)
|
|
if qmline:
|
|
self.LoadSurvexQM(survexblock, qmline)
|
|
|
|
included = self.rx_comminc.match(comment)
|
|
# ;*include means 'we have been included'; whereas *include means 'proceed to include'
|
|
if included:
|
|
self.ProcessIncludeLine(included)
|
|
|
|
edulcni = self.rx_commcni.match(comment)
|
|
# ;*edulcni means we are returning from an included file
|
|
if edulcni:
|
|
self.ProcessEdulcniLine(edulcni)
|
|
|
|
def LoadSurvexSetup(self,survexblock, survexfile):
|
|
self.depthbegin = 0
|
|
self.stardata = self.stardatadefault
|
|
blocklegs = self.survexlegsnumber
|
|
print(self.insp+" - MEM:{:.3f} Reading. parent:{} <> {} ".format(models.get_process_memory(),survexblock.survexfile.path, survexfile.path))
|
|
self.lineno = 0
|
|
sys.stderr.flush();
|
|
self.callcount +=1
|
|
if self.callcount % 10 ==0 :
|
|
print(".", file=sys.stderr,end='')
|
|
if self.callcount % 500 ==0 :
|
|
print("\n", file=sys.stderr,end='')
|
|
# Try to find the cave in the DB if not use the string as before
|
|
path_match = re.search(r"caves-(\d\d\d\d)/(\d+|\d\d\d\d-?\w+-\d+)/", survexblock.survexfile.path)
|
|
if path_match:
|
|
pos_cave = '%s-%s' % (path_match.group(1), path_match.group(2))
|
|
cave = models_caves.getCaveByReference(pos_cave)
|
|
if cave:
|
|
survexfile.cave = cave
|
|
|
|
def LinearLoad(self, survexblock, path, svxlines):
|
|
"""Loads a single survex file. Usually used to import all the survex files which have been collated
|
|
into a single file. Loads the begin/end blocks using a stack for labels.
|
|
"""
|
|
self.relativefilename = path
|
|
cave = self.IdentifyCave(path) # this will produce null for survex files which are geographic collections
|
|
|
|
self.currentsurvexfile = survexblock.survexfile
|
|
self.currentsurvexfile.save() # django insists on this although it is already saved !?
|
|
|
|
blockcount = 0
|
|
lineno = 0
|
|
def tickle():
|
|
nonlocal blockcount
|
|
blockcount +=1
|
|
if blockcount % 10 ==0 :
|
|
print(".", file=sys.stderr,end='')
|
|
if blockcount % 200 ==0 :
|
|
print("\n", file=sys.stderr,end='')
|
|
print(" - MEM:{:7.3f} MB in use".format(models.get_process_memory()),file=sys.stderr)
|
|
sys.stderr.flush()
|
|
|
|
for svxline in svxlines:
|
|
lineno += 1
|
|
sline, comment = self.rx_comment.match(svxline).groups()
|
|
if comment:
|
|
self.LoadSurvexComment(survexblock, comment) # this catches the ;*include and ;*edulcni lines too
|
|
|
|
if not sline:
|
|
continue # skip blank lines
|
|
|
|
# detect a star command
|
|
mstar = self.rx_star.match(sline)
|
|
if mstar: # yes we are reading a *cmd
|
|
cmd, args = mstar.groups()
|
|
cmd = cmd.lower()
|
|
|
|
# ------------------------BEGIN
|
|
if re.match("begin$(?i)", cmd):
|
|
self.depthbegin += 1
|
|
depth = " " * self.depthbegin
|
|
blockid = args.lower()
|
|
self.stackbegin.append(blockid)
|
|
|
|
previousnlegs = self.survexlegsnumber
|
|
print("{:2}{} - Begin for :'{}'".format(self.depthbegin,depth, blockid))
|
|
pathlist = ""
|
|
for id in self.stackbegin:
|
|
if len(id) > 0:
|
|
pathlist += "." + id
|
|
newsurvexblock = models_survex.SurvexBlock(name=blockid, parent=survexblock,
|
|
survexpath=pathlist,
|
|
cave=self.currentcave, survexfile=self.currentsurvexfile,
|
|
legsall=0, legssplay=0, legssurfc=0, totalleglength=0.0)
|
|
newsurvexblock.save()
|
|
newsurvexblock.title = "("+survexblock.title+")" # copy parent inititally
|
|
survexblock = newsurvexblock
|
|
survexblock.save() # django insists on this , but we want to save at the end !
|
|
tickle()
|
|
|
|
# ---------------------------END
|
|
elif re.match("end$(?i)", cmd):
|
|
depth = " " * self.depthbegin
|
|
|
|
print("{:2}{} - End from:'{}'".format(self.depthbegin,depth,args))
|
|
legsinblock = self.survexlegsnumber - previousnlegs
|
|
print("{:2}{} - LEGS: {} (previous: {}, now:{})".format(self.depthbegin,
|
|
depth,legsinblock,previousnlegs,self.survexlegsnumber))
|
|
survexblock.legsall = legsinblock
|
|
try:
|
|
survexblock.parent.save() # django insists on this although it is already saved !?
|
|
except:
|
|
print(survexblock.parent, file=sys.stderr)
|
|
raise
|
|
try:
|
|
survexblock.save() # save to db at end of block
|
|
except:
|
|
print(survexblock, file=sys.stderr)
|
|
raise
|
|
self.currentsurvexblock = survexblock.parent
|
|
survexblock = survexblock.parent
|
|
blockid = self.stackbegin.pop()
|
|
self.depthbegin -= 1
|
|
|
|
# -----------------------------
|
|
elif re.match("(?i)title$", cmd):
|
|
survexblock.title = args # block has own title, overwrite that from parent
|
|
elif re.match("(?i)ref$", cmd):
|
|
self.LoadSurvexRef(survexblock, args)
|
|
elif re.match("(?i)flags$", cmd):
|
|
self.LoadSurvexFlags(args, cmd)
|
|
elif re.match("(?i)data$", cmd):
|
|
self.LoadSurvexDataCmd(survexblock, args)
|
|
elif re.match("(?i)date$", cmd):
|
|
self.LoadSurvexDate(survexblock, args)
|
|
elif re.match("(?i)team$", cmd):
|
|
self.LoadSurvexTeam(survexblock, args)
|
|
elif re.match("(?i)set$", cmd) and re.match("(?i)names", args):
|
|
pass
|
|
elif re.match("(?i)include$", cmd):
|
|
message = " ! -ERROR *include command not expected here {}. Re-run a full Survex import.".format(path)
|
|
print(message)
|
|
print(message,file=sys.stderr)
|
|
models.DataIssue.objects.create(parser='survex', message=message)
|
|
else:
|
|
self.LoadSurvexIgnore(survexblock, args, cmd)
|
|
else: # not a *cmd so we are reading data OR rx_comment failed
|
|
if "from" in self.stardata: # only interested in survey legs
|
|
self.LoadSurvexLineLeg(survexblock, svxline, sline, comment)
|
|
else:
|
|
pass # ignore all other sorts of data
|
|
|
|
def RecursiveScan(self, survexblock, path, fin, flinear, fcollate):
|
|
"""Follows the *include links in all the survex files from the root file 1623.svx
|
|
and reads only the *include and *begin and *end statements. It produces a linearised
|
|
list of the include tree
|
|
"""
|
|
indent = " " * self.depthinclude
|
|
sys.stderr.flush();
|
|
self.callcount +=1
|
|
if self.callcount % 10 ==0 :
|
|
print(".", file=sys.stderr,end='')
|
|
if self.callcount % 500 ==0 :
|
|
print("\n", file=sys.stderr,end='')
|
|
|
|
|
|
|
|
if path in self.svxfileslist:
|
|
message = " * Warning. Duplicate in *include list at:{} depth:{} file:{}".format(self.callcount, self.depthinclude, path)
|
|
print(message)
|
|
print(message,file=flinear)
|
|
print("\n"+message,file=sys.stderr)
|
|
models.DataIssue.objects.create(parser='survex', message=message)
|
|
if self.svxfileslist.count(path) > 20:
|
|
message = " ! ERROR. Survex file already seen 20x. Probably an infinite loop so fix your *include statements that include this. Aborting. {}".format(path)
|
|
print(message)
|
|
print(message,file=flinear)
|
|
print(message,file=sys.stderr)
|
|
models.DataIssue.objects.create(parser='survex', message=message)
|
|
return
|
|
self.svxfileslist.append(path)
|
|
|
|
svxlines = fin.read().splitlines()
|
|
for svxline in svxlines:
|
|
self.lineno += 1
|
|
includestmt =self.rx_include.match(svxline)
|
|
if not includestmt:
|
|
fcollate.write("{}\n".format(svxline.strip()))
|
|
|
|
sline, comment = self.rx_comment.match(svxline.strip()).groups()
|
|
mstar = self.rx_star.match(sline)
|
|
if mstar: # yes we are reading a *cmd
|
|
cmd, args = mstar.groups()
|
|
cmd = cmd.lower()
|
|
if re.match("(?i)include$", cmd):
|
|
includepath = os.path.normpath(os.path.join(os.path.split(path)[0], re.sub(r"\.svx$", "", args)))
|
|
|
|
fullpath = os.path.join(settings.SURVEX_DATA, includepath + ".svx")
|
|
self.RunSurvexIfNeeded(os.path.join(settings.SURVEX_DATA, includepath))
|
|
if os.path.isfile(fullpath):
|
|
#--------------------------------------------------------
|
|
self.depthinclude += 1
|
|
fininclude = open(fullpath,'r')
|
|
fcollate.write(";*include {}\n".format(includepath))
|
|
flinear.write("{:2} {} *include {}\n".format(self.depthinclude, indent, includepath))
|
|
push = includepath.lower()
|
|
self.stackinclude.append(push)
|
|
#-----------------
|
|
self.RecursiveScan(survexblock, includepath, fininclude, flinear, fcollate)
|
|
#-----------------
|
|
pop = self.stackinclude.pop()
|
|
if pop != push:
|
|
message = "!! ERROR mismatch *include pop!=push {}".format(pop, push, self.stackinclude)
|
|
print(message)
|
|
print(message,file=flinear)
|
|
print(message,file=sys.stderr)
|
|
models.DataIssue.objects.create(parser='survex', message=message)
|
|
flinear.write("{:2} {} *edulcni {}\n".format(self.depthinclude, indent, pop))
|
|
fcollate.write(";*edulcni {}\n".format(pop))
|
|
fininclude.close()
|
|
self.depthinclude -= 1
|
|
#--------------------------------------------------------
|
|
else:
|
|
message = " ! ERROR *include file not found for:'{}'".format(includepath)
|
|
print(message)
|
|
print(message,file=sys.stderr)
|
|
models.DataIssue.objects.create(parser='survex', message=message)
|
|
elif re.match("(?i)begin$", cmd):
|
|
self.depthbegin += 1
|
|
depth = " " * self.depthbegin
|
|
if args:
|
|
pushargs = args
|
|
else:
|
|
pushargs = " "
|
|
self.stackbegin.append(pushargs.lower())
|
|
flinear.write(" {:2} {} *begin {}\n".format(self.depthbegin, depth, args))
|
|
pass
|
|
elif re.match("(?i)end$", cmd):
|
|
depth = " " * self.depthbegin
|
|
flinear.write(" {:2} {} *end {}\n".format(self.depthbegin, depth, args))
|
|
if not args:
|
|
args = " "
|
|
popargs = self.stackbegin.pop()
|
|
if popargs != args.lower():
|
|
message = "!! ERROR mismatch in BEGIN/END labels pop!=push '{}'!='{}'\n{}".format(popargs, args, self. stackbegin)
|
|
print(message)
|
|
print(message,file=flinear)
|
|
print(message,file=sys.stderr)
|
|
models.DataIssue.objects.create(parser='survex', message=message)
|
|
|
|
self.depthbegin -= 1
|
|
pass
|
|
elif re.match("(?i)title$", cmd):
|
|
depth = " " * self.depthbegin
|
|
flinear.write(" {:2} {} *title {}\n".format(self.depthbegin, depth, args))
|
|
pass
|
|
|
|
def RunSurvexIfNeeded(self,fullpath):
|
|
cav_t = 0
|
|
log_t = 0
|
|
svx_t = 0
|
|
now = time.time()
|
|
|
|
def runcavern():
|
|
print(" - Regenerating stale cavern .log and .3d for '{}'\n days old: {:.1f} {:.1f} {:.1f}".
|
|
format(fullpath, (svx_t - log_t)/(24*3600), (cav_t - log_t)/(24*3600), (now - log_t)/(24*3600)))
|
|
call([settings.CAVERN, "--log", "--output={}".format(fullpath), "{}.svx".format(fullpath)])
|
|
|
|
svxpath = fullpath + ".svx"
|
|
logpath = fullpath + ".log"
|
|
|
|
if not os.path.isfile(logpath):
|
|
runcavern()
|
|
return
|
|
|
|
if not self.caverndate:
|
|
completed_process = run(["which", "{}".format(settings.CAVERN)],
|
|
capture_output=True, check=True, text=True)
|
|
self.caverndate = os.path.getmtime(completed_process.stdout.strip())
|
|
cav_t = self.caverndate
|
|
log_t = os.path.getmtime(logpath)
|
|
svx_t = os.path.getmtime(svxpath)
|
|
now = time.time()
|
|
|
|
if svx_t - log_t > 0: # stale, older than svx file
|
|
runcavern()
|
|
return
|
|
if now - log_t > 60 *24*60*60: # >60 days, re-run anyway
|
|
runcavern()
|
|
return
|
|
if cav_t - log_t > 0: # new version of cavern
|
|
runcavern()
|
|
return
|
|
if ChaosMonkey(30):
|
|
runcavern()
|
|
|
|
def FindAndLoadSurvex(survexblockroot):
|
|
"""Follows the *include links recursively to find files
|
|
"""
|
|
print(' - redirecting stdout to svxblks.log...')
|
|
stdout_orig = sys.stdout
|
|
# Redirect sys.stdout to the file
|
|
sys.stdout = open('svxblks.log', 'w')
|
|
|
|
print(' - SCANNING All Survex Blocks...',file=sys.stderr)
|
|
survexfileroot = survexblockroot.survexfile # i.e. SURVEX_TOPNAME only
|
|
collatefilename = "_" + survexfileroot.path + ".svx"
|
|
|
|
svx_scan = LoadingSurvex()
|
|
svx_scan.callcount = 0
|
|
svx_scan.depthinclude = 0
|
|
fullpathtotop = os.path.join(survexfileroot.survexdirectory.path, survexfileroot.path)
|
|
print(" - RunSurvexIfNeeded cavern on '{}'".format(fullpathtotop), file=sys.stderr)
|
|
svx_scan.RunSurvexIfNeeded(fullpathtotop)
|
|
indent=""
|
|
fcollate = open(collatefilename, 'w')
|
|
|
|
mem0 = models.get_process_memory()
|
|
print(" - MEM:{:7.2f} MB START".format(mem0),file=sys.stderr)
|
|
flinear = open('svxlinear.log', 'w')
|
|
flinear.write(" - MEM:{:7.2f} MB START {}\n".format(mem0,survexfileroot.path))
|
|
|
|
finroot = survexfileroot.OpenFile()
|
|
fcollate.write(";*include {}\n".format(survexfileroot.path))
|
|
flinear.write("{:2} {} *include {}\n".format(svx_scan.depthinclude, indent, survexfileroot.path))
|
|
#----------------------------------------------------------------
|
|
svx_scan.RecursiveScan(survexblockroot, survexfileroot.path, finroot, flinear, fcollate)
|
|
#----------------------------------------------------------------
|
|
flinear.write("{:2} {} *edulcni {}\n".format(svx_scan.depthinclude, indent, survexfileroot.path))
|
|
fcollate.write(";*edulcni {}\n".format(survexfileroot.path))
|
|
mem1 = models.get_process_memory()
|
|
flinear.write("\n - MEM:{:.2f} MB STOP {}\n".format(mem1,survexfileroot.path))
|
|
flinear.write(" - MEM:{:.3f} MB USED\n".format(mem1-mem0))
|
|
svxfileslist = svx_scan.svxfileslist
|
|
flinear.write(" - {:,} survex files in linear include list \n".format(len(svxfileslist)))
|
|
flinear.close()
|
|
fcollate.close()
|
|
svx_scan = None # Hmm. Does this actually delete all the instance variables if they are lists, dicts etc.?
|
|
print("\n - {:,} survex files in linear include list \n".format(len(svxfileslist)),file=sys.stderr)
|
|
|
|
mem1 = models.get_process_memory()
|
|
print(" - MEM:{:7.2f} MB END ".format(mem0),file=sys.stderr)
|
|
print(" - MEM:{:7.3f} MB USED".format(mem1-mem0),file=sys.stderr)
|
|
svxfileslist = [] # free memory
|
|
|
|
# Before doing this, it would be good to identify the *equate and *entrance we need that are relevant to the
|
|
# entrance locations currently loaded after this by LoadPos(), but could better be done before ?
|
|
# look in MapLocations() for how we find the entrances
|
|
|
|
print('\n - Loading All Survex Blocks (LinearLoad)',file=sys.stderr)
|
|
svx_load = LoadingSurvex()
|
|
|
|
svx_load.svxdirs[""] = survexfileroot.survexdirectory
|
|
with open(collatefilename, "r") as fcollate:
|
|
svxlines = fcollate.read().splitlines()
|
|
#----------------------------------------------------------------
|
|
svx_load.LinearLoad(survexblockroot,survexfileroot.path, svxlines)
|
|
#----------------------------------------------------------------
|
|
|
|
print("\n - MEM:{:7.2f} MB STOP".format(mem1),file=sys.stderr)
|
|
print(" - MEM:{:7.3f} MB USED".format(mem1-mem0),file=sys.stderr)
|
|
|
|
survexlegsnumber = svx_load.survexlegsnumber
|
|
survexlegsalllength = svx_load.survexlegsalllength
|
|
mem1 = models.get_process_memory()
|
|
|
|
print(" - Number of SurvexDirectories: {}".format(len(svx_load.survexdict)))
|
|
tf=0
|
|
for d in svx_load.survexdict:
|
|
tf += len(svx_load.survexdict[d])
|
|
print(" - Number of SurvexFiles: {}".format(tf))
|
|
svx_load = None
|
|
|
|
# Close the logging file, Restore sys.stdout to our old saved file handle
|
|
sys.stdout.close()
|
|
print("+", file=sys.stderr)
|
|
sys.stderr.flush();
|
|
sys.stdout = stdout_orig
|
|
return (survexlegsnumber, survexlegsalllength)
|
|
|
|
def MakeSurvexFileRoot():
|
|
"""Returns a file_object.path = SURVEX_TOPNAME associated with directory_object.path = SURVEX_DATA
|
|
"""
|
|
fileroot = models_survex.SurvexFile(path=settings.SURVEX_TOPNAME, cave=None)
|
|
fileroot.save()
|
|
directoryroot = models_survex.SurvexDirectory(path=settings.SURVEX_DATA, cave=None, primarysurvexfile=fileroot)
|
|
directoryroot.save()
|
|
fileroot.survexdirectory = directoryroot # i.e. SURVEX_DATA/SURVEX_TOPNAME
|
|
fileroot.save() # mutually dependent objects need a double-save like this
|
|
return fileroot
|
|
|
|
def LoadSurvexBlocks():
|
|
|
|
print(' - Flushing All Survex Blocks...')
|
|
models_survex.SurvexBlock.objects.all().delete()
|
|
models_survex.SurvexFile.objects.all().delete()
|
|
models_survex.SurvexDirectory.objects.all().delete()
|
|
models_survex.SurvexPersonRole.objects.all().delete()
|
|
models_survex.SurvexStation.objects.all().delete()
|
|
print(" - survex Data Issues flushed")
|
|
models.DataIssue.objects.filter(parser='survex').delete()
|
|
|
|
survexfileroot = MakeSurvexFileRoot()
|
|
# this next makes a block_object assciated with a file_object.path = SURVEX_TOPNAME
|
|
survexblockroot = models_survex.SurvexBlock(name=ROOTBLOCK, survexpath="", cave=None, survexfile=survexfileroot,
|
|
legsall=0, legssplay=0, legssurfc=0, totalleglength=0.0)
|
|
# this is the first so id=1
|
|
survexblockroot.save()
|
|
|
|
print(' - Loading Survex Blocks...')
|
|
memstart = models.get_process_memory()
|
|
#----------------------------------------------------------------
|
|
survexlegsnumber, survexlegsalllength = FindAndLoadSurvex(survexblockroot)
|
|
#----------------------------------------------------------------
|
|
memend = models.get_process_memory()
|
|
print(" - MEMORY start:{:.3f} MB end:{:.3f} MB increase={:.3f} MB".format(memstart,memend, memend-memstart))
|
|
|
|
survexblockroot.totalleglength = survexlegsalllength
|
|
survexblockroot.legsall = survexlegsnumber
|
|
survexblockroot.save()
|
|
|
|
print(" - total number of survex legs: {}".format(survexlegsnumber))
|
|
print(" - total leg lengths loaded: {}m".format(survexlegsalllength))
|
|
print(' - Loaded All Survex Blocks.')
|
|
|
|
poslineregex = re.compile(r"^\(\s*([+-]?\d*\.\d*),\s*([+-]?\d*\.\d*),\s*([+-]?\d*\.\d*)\s*\)\s*([^\s]+)$")
|
|
|
|
def LoadPos():
|
|
"""First load the survex stations for entrances and fixed points (about 600) into the database.
|
|
Run cavern to produce a complete .3d file, then run 3dtopos to produce a table of
|
|
all survey point positions. Then lookup each position by name to see if we have it in the database
|
|
and if we do, then save the x/y/z coordinates. This gives us coordinates of the entrances.
|
|
If we don't have it in the database, print an error message and discard it.
|
|
"""
|
|
svx_t = 0
|
|
d3d_t = 0
|
|
def runcavern3d():
|
|
print(" - Regenerating stale (or chaos-monkeyed) cavern .log and .3d for '{}'\n days old: {:.1f} {:.1f} {:.1f}".
|
|
format(topdata, (svx_t - d3d_t)/(24*3600), (cav_t - d3d_t)/(24*3600), (now - d3d_t)/(24*3600)))
|
|
call([settings.CAVERN, "--log", "--output={}".format(topdata), "{}.svx".format(topdata)])
|
|
call([settings.THREEDTOPOS, '{}.3d'.format(topdata)], cwd = settings.SURVEX_DATA)
|
|
|
|
topdata = settings.SURVEX_DATA + settings.SURVEX_TOPNAME
|
|
print((' - Generating a list of Pos from %s.svx and then loading...' % (topdata)))
|
|
|
|
found = 0
|
|
skip = {}
|
|
print("\n") # extra line because cavern overwrites the text buffer somehow
|
|
# cavern defaults to using same cwd as supplied input file
|
|
|
|
completed_process = run(["which", "{}".format(settings.CAVERN)],
|
|
capture_output=True, check=True, text=True)
|
|
cav_t = os.path.getmtime(completed_process.stdout.strip())
|
|
|
|
svxpath = topdata + ".svx"
|
|
d3dpath = topdata + ".3d"
|
|
|
|
svx_t = os.path.getmtime(svxpath)
|
|
|
|
if os.path.isfile(d3dpath):
|
|
d3d_t = os.path.getmtime(d3dpath)
|
|
|
|
now = time.time()
|
|
if not os.path.isfile(d3dpath):
|
|
runcavern3d()
|
|
elif svx_t - d3d_t > 0: # stale, 3d older than svx file
|
|
runcavern3d()
|
|
elif now - d3d_t > 60 *24*60*60: # >60 days old, re-run anyway
|
|
runcavern3d()
|
|
elif cav_t - d3d_t > 0: # new version of cavern
|
|
runcavern3d()
|
|
|
|
mappoints = {}
|
|
for pt in MapLocations().points():
|
|
svxid, number, point_type, label = pt
|
|
mappoints[svxid]=True
|
|
|
|
posfile = open("%s.pos" % (topdata))
|
|
posfile.readline() #Drop header
|
|
try:
|
|
survexblockroot = models_survex.SurvexBlock.objects.get(name=ROOTBLOCK)
|
|
except:
|
|
try:
|
|
survexblockroot = models_survex.SurvexBlock.objects.get(id=1)
|
|
except:
|
|
message = ' ! FAILED to find root SurvexBlock'
|
|
print(message)
|
|
models.DataIssue.objects.create(parser='survex', message=message)
|
|
raise
|
|
for line in posfile.readlines():
|
|
r = poslineregex.match(line)
|
|
if r:
|
|
x, y, z, id = r.groups()
|
|
for sid in mappoints:
|
|
if id.endswith(sid):
|
|
blockpath = "." + id[:-len(sid)].strip(".")
|
|
# But why are we doing this? Why do we need the survexblock id for each of these ?
|
|
# ..because mostly they don't actually appear in any SVX file. We should match them up
|
|
# via the cave data, not by this half-arsed syntactic match which almost never works. PMS.
|
|
if False:
|
|
try:
|
|
sbqs = models_survex.SurvexBlock.objects.filter(survexpath=blockpath)
|
|
if len(sbqs)==1:
|
|
sb = sbqs[0]
|
|
if len(sbqs)>1:
|
|
message = " ! MULTIPLE SurvexBlocks {:3} matching Entrance point {} {} '{}'".format(len(sbqs), blockpath, sid, id)
|
|
print(message)
|
|
models.DataIssue.objects.create(parser='survex', message=message)
|
|
sb = sbqs[0]
|
|
elif len(sbqs)<=0:
|
|
message = " ! ZERO SurvexBlocks matching Entrance point {} {} '{}'".format(blockpath, sid, id)
|
|
print(message)
|
|
models.DataIssue.objects.create(parser='survex', message=message)
|
|
sb = survexblockroot
|
|
except:
|
|
message = ' ! FAIL in getting SurvexBlock matching Entrance point {} {}'.format(blockpath, sid)
|
|
print(message)
|
|
models.DataIssue.objects.create(parser='survex', message=message)
|
|
try:
|
|
ss = models_survex.SurvexStation(name=id, block=survexblockroot)
|
|
ss.x = float(x)
|
|
ss.y = float(y)
|
|
ss.z = float(z)
|
|
ss.save()
|
|
found += 1
|
|
except:
|
|
message = ' ! FAIL to create SurvexStation Entrance point {} {}'.format(blockpath, sid)
|
|
print(message)
|
|
models.DataIssue.objects.create(parser='survex', message=message)
|
|
raise
|
|
print(" - {} SurvexStation entrances found.".format(found))
|
|
|