mirror of
https://expo.survex.com/repositories/troggle/.git
synced 2024-11-25 16:51:54 +00:00
1649 lines
76 KiB
Python
1649 lines
76 KiB
Python
import sys
|
|
import os
|
|
import re
|
|
import time
|
|
import copy
|
|
import subprocess
|
|
|
|
from pathlib import Path
|
|
from datetime import datetime, timedelta, date
|
|
|
|
from django.utils.timezone import get_current_timezone
|
|
from django.utils.timezone import make_aware
|
|
|
|
import troggle.settings as settings
|
|
from troggle.core.models.caves import Entrance, QM, LogbookEntry
|
|
from troggle.core.utils import get_process_memory, chaosmonkey
|
|
from troggle.parsers.people import GetPersonExpeditionNameLookup
|
|
from troggle.parsers.logbooks import GetCaveLookup
|
|
from troggle.core.models.troggle import DataIssue, Expedition
|
|
from troggle.core.models.survex import SurvexPersonRole, Wallet, SurvexDirectory, SurvexFile, SurvexBlock, SurvexStation
|
|
|
|
'''Imports the tree of survex files following form a defined root .svx file
|
|
It does also NOT scan the Loser repo for all the svx files - though it should !
|
|
'''
|
|
|
|
todo = '''Also walk the entire tree in the :loser: repo looking for unconnected survex files
|
|
- add them to the system so that they can be reported-on
|
|
- produce a parser report and create a troggle report page (some are OK, e.g. futility series replaced by ARGE survey in 115)
|
|
|
|
- If you look at e.g. http://expo.survex.com/survexfile/161#T_caves-1623/161/lhr/alllhr
|
|
you will see than have the team members are recognised by this parser, but not recognised by the
|
|
wider troggle system (the name is not a hyperlink) - apparently randomly.
|
|
GetPersonExpeditionNameLookup() needs to be fixed.
|
|
|
|
-#BUG, if *date comes after *team, the person's date is not set at all. It needs re-setting at the endof the block.
|
|
|
|
- LoadSurvexFile() Creates a new current survexfile and valid .survexdirectory
|
|
The survexblock passed-in is not necessarily the parent. FIX THIS.
|
|
|
|
- rx_qm recognises only simple survey point ids. EXTEND to cover more naming formats and test fully for 2023
|
|
'''
|
|
survexblockroot = None
|
|
ROOTBLOCK = "rootblock"
|
|
METRESINFEET = 3.28084
|
|
|
|
debugprint = False # Turns on debug printout for just one *include file
|
|
debugprinttrigger = "!"
|
|
# debugprinttrigger = "caves-1623/40/old/EisSVH"
|
|
|
|
class MapLocations(object):
|
|
p = [
|
|
("laser.0_7", "BNase", "Reference", "Bräuning Nase laser point"),
|
|
("226-96", "BZkn", "Reference", "Bräuning Zinken trig point"),
|
|
("vd1","VD1","Reference", "VD1 survey point"),
|
|
("laser.kt114_96","HSK","Reference", "Hinterer Schwarzmooskogel trig point"),
|
|
("2000","Nipple","Reference", "Nipple (Weiße Warze)"),
|
|
("3000","VSK","Reference", "Vorderer Schwarzmooskogel summit"),
|
|
("topcamp", "OTC", "Reference", "Old Top Camp"),
|
|
("laser.0", "LSR0", "Reference", "Laser Point 0"),
|
|
("laser.0_1", "LSR1", "Reference", "Laser Point 0/1"),
|
|
("laser.0_3", "LSR3", "Reference", "Laser Point 0/3"),
|
|
("laser.0_5", "LSR5", "Reference", "Laser Point 0/5"),
|
|
("225-96", "BAlm", "Reference", "Bräuning Alm trig point")
|
|
]
|
|
def points(self):
|
|
for ent in Entrance.objects.all():
|
|
if ent.best_station():
|
|
try:
|
|
k = ent.caveandentrance_set.all()[0].cave
|
|
except:
|
|
message = " ! Failed to get Cave linked to Entrance:{} from:{} best:{}".format(ent.name, ent.filename, ent.best_station())
|
|
DataIssue.objects.create(parser='entrances', message=message)
|
|
print(message)
|
|
raise
|
|
try:
|
|
areaName = k.getArea().short_name
|
|
except:
|
|
message = " ! Failed to get Area on cave '{}' linked to Entrance:{} from:{} best:{}".format(cave, ent.name, ent.filename, ent.best_station())
|
|
DataIssue.objects.create(parser='entrances', message=message)
|
|
print(message)
|
|
raise
|
|
self.p.append((ent.best_station(), "%s-%s" % (areaName, str(ent)[5:]), ent.needs_surface_work(), str(ent)))
|
|
message = f" - {len(self.p)} entrances linked to caves."
|
|
print(message)
|
|
return self.p
|
|
|
|
def __str__(self):
|
|
return "{} map locations".format(len(self.p))
|
|
|
|
class SurvexLeg():
|
|
"""No longer a models.Model subclass, so no longer a database table
|
|
"""
|
|
tape = 0.0
|
|
compass = 0.0
|
|
clino = 0.0
|
|
|
|
class LoadingSurvex():
|
|
"""A 'survex block' is a *begin...*end set of cave data.
|
|
A survex file can contain many begin-end blocks, which can be nested, and which can *include
|
|
other survex files.
|
|
A 'scanswallet' is what we today call a "survey scans folder" or a "wallet".
|
|
"""
|
|
rx_begin = re.compile(r'(?i)begin')
|
|
rx_end = re.compile(r'(?i)end$')
|
|
rx_title = re.compile(r'(?i)title$')
|
|
rx_ref = re.compile(r'(?i)ref$')
|
|
rx_data = re.compile(r'(?i)data$')
|
|
rx_flags = re.compile(r'(?i)flags$')
|
|
rx_alias = re.compile(r'(?i)alias$')
|
|
rx_entrance = re.compile(r'(?i)entrance$')
|
|
rx_date = re.compile(r'(?i)date$')
|
|
rx_units = re.compile(r'(?i)units$')
|
|
rx_team = re.compile(r'(?i)team$')
|
|
rx_set = re.compile(r'(?i)set$')
|
|
|
|
rx_names = re.compile(r'(?i)names')
|
|
rx_flagsnot= re.compile(r"not\s")
|
|
rx_linelen = re.compile(r"[\d\-+.]+$")
|
|
instruments = "(waiting_patiently|slacker|Useless|nagging|unknown|Inst|instrument|rig|rigger|rigging|helper|something| compass|comp|clino|Notes|sketch|book|Tape|Dog|Pics|photo|drawing|Helper|GPS|Disto|Distox|Distox2|topodroid|point|Consultant|nail|polish|nail_polish_bitch|nail_polish_monkey|varnish|nail_polish|nail_varnish|bitch|monkey|PowerDrill|drill)"
|
|
rx_teammem = re.compile(r"(?i)"+instruments+"?(?:es|s)?\s+(.*)"+instruments+"?(?:es|s)?$")
|
|
rx_person = re.compile(r"(?i) and | / |, | & | \+ |^both$|^none$")
|
|
rx_qm = re.compile(r'(?i)^\s*QM(\d)\s+?([a-dA-DxX])\s+([\w\-]+)\.(\d+)\s+(([\w\-]+)\.(\d+)|\-)\s+(.+)$')
|
|
# does not recognise non numeric suffix survey point ids
|
|
rx_qm0 = re.compile(r'(?i)^\s*QM(\d)\s+(.+)$')
|
|
# remember there is also QM_PATTERN used in views.other and set in settings.py
|
|
rx_tapelng = re.compile(r'(?i).*(tape|length).*$')
|
|
|
|
rx_cave = re.compile(r'(?i)caves-(\d\d\d\d)/([-\d\w]+|\d\d\d\d-?\w+-\d+)')
|
|
rx_comment = re.compile(r'([^;]*?)\s*(?:;\s*(.*))?\n?$')
|
|
rx_comminc = re.compile(r'(?i)^\*include[\s]*([-\w/]*).*$') # inserted by linear collate ;*include
|
|
rx_commcni = re.compile(r'(?i)^\*edulcni[\s]*([-\w/]*).*$') # inserted by linear collate ;*edulcni
|
|
rx_include = re.compile(r'(?i)^\s*(\*include[\s].*)$')
|
|
rx_commref = re.compile(r'(?i)^\s*ref(?:erence)?[\s.:]*(\d+)\s*#\s*(X)?\s*(\d+)')
|
|
rx_wallet = re.compile(r'(?i)^\s*wallet[\s.:]*(\d+)\s*#\s*(X)?\s*(\d+)')
|
|
rx_implicit= re.compile(r'(?i)^[\s.:]*(\d+)\s*#\s*(X)?\s*(\d+)')
|
|
rx_ref_text= re.compile(r'(?i)^\s*\"[^"]*\"\s*$')
|
|
rx_star = re.compile(r'(?i)\s*\*[\s,]*(\w+)\s*(.*?)\s*(?:;.*)?$')
|
|
rx_starref = re.compile(r'(?i)^\s*\*ref[\s.:]*((?:19[6789]\d)|(?:20[0123]\d))\s*#?\s*(X)?\s*(.*?\d+.*?)$')
|
|
rx_argsref = re.compile(r'(?i)^[\s.:]*((?:19[6789]\d)|(?:20[0123]\d))\s*#?\s*(X)?\s*(.*?\d+.*?)$')
|
|
rx_badmerge= re.compile(r'(?i).*(\>\>\>\>\>)|(\=\=\=\=\=)|(\<\<\<\<\<).*$')
|
|
rx_ref2 = re.compile(r'(?i)\s*ref[.;]?')
|
|
rx_ref3 = re.compile(r'(?i)\s*wallet[.;]?')
|
|
|
|
|
|
# This interprets the survex "*data normal" command which sets out the order of the fields in the data, e.g.
|
|
# *DATA normal from to length gradient bearing ignore ignore ignore ignore
|
|
datastardefault = {"type":"normal", "from":0, "to":1, "tape":2, "compass":3, "clino":4}
|
|
flagsdefault = {"duplicate":False, "surface":False, "splay":False, "skiplegs":False, "splayalias":False}
|
|
|
|
datastar ={}
|
|
flagsstar = {}
|
|
units = "metres"
|
|
unitsfactor = None
|
|
slength = 0.0
|
|
legsnumber = 0
|
|
depthbegin = 0
|
|
depthinclude = 0
|
|
unitsstack = []
|
|
legsnumberstack = []
|
|
slengthstack = []
|
|
personexpedstack = []
|
|
stackbegin =[]
|
|
flagsstack =[]
|
|
datastack =[]
|
|
includestack = []
|
|
stacksvxfiles = []
|
|
svxfileslist = []
|
|
svxdirs = {}
|
|
uniquename = {}
|
|
expos = {}
|
|
survexdict = {} # each key is a directory, and its value is a list of files
|
|
lineno = 0
|
|
insp = ""
|
|
callcount = 0
|
|
caverncount = 0
|
|
ignoreprefix = ["surface", "kataster", "fixedpts", "gpx"]
|
|
ignorenoncave = ["caves-1623", "caves-1623/2007-neu"]
|
|
includedfilename =""
|
|
currentsurvexblock = None
|
|
currentsurvexfile = None
|
|
currentcave = None
|
|
caverndate = None
|
|
currentpersonexped = []
|
|
|
|
def __init__(self):
|
|
self.caveslist = GetCaveLookup()
|
|
pass
|
|
|
|
def LoadSurvexFallThrough(self, survexblock, line, cmd):
|
|
if cmd == "require":
|
|
pass # should we check survex version available for processing?
|
|
elif cmd in ["equate", "fix", "calibrate", "cs", "export", "case",
|
|
"declination", "infer","instrument", "sd"]:
|
|
pass # we ignore all these, which is fine.
|
|
else:
|
|
if cmd in ["include", "data", "flags", "title", "entrance","set", "units", "alias", "ref"]:
|
|
message = "! Unparsed [*{}]: '{}' {} - not an error (probably)".format(cmd, line, survexblock.survexfile.path)
|
|
print((self.insp+message))
|
|
DataIssue.objects.create(parser='survex', message=message)
|
|
else:
|
|
message = "! Bad svx command: [*{}] {} ({}) {}".format(cmd, line, survexblock, survexblock.survexfile.path)
|
|
print((self.insp+message))
|
|
DataIssue.objects.create(parser='survex', message=message)
|
|
|
|
def LoadSurvexTeam(self, survexblock, line):
|
|
"""Interpeting the *team fields means interpreting older style survex as well as current survex standard,
|
|
*team Insts Anthony Day - this is how most of our files specify the team member
|
|
*team "Anthony Day" notes pictures tape - this is how the survex documentation says it should be done
|
|
We have a huge variety of abbreviations and mispellings. The most laconic being
|
|
*team gb, bl
|
|
If you look at e.g. http://expo.survex.com/survexfile/161#T_caves-1623/161/lhr/alllhr
|
|
you will see than have the team members are recognised by this parser, but not recognised by the
|
|
wider troggle system (the name is not a hyperlink) - apparently randomly.
|
|
GetPersonExpeditionNameLookup() needs to be fixed.
|
|
"""
|
|
teammembers = [ ]
|
|
mteammember = self.rx_teammem.match(line)
|
|
if mteammember:
|
|
for tm in self.rx_person.split(mteammember.group(2)):
|
|
if tm:
|
|
personexpedition = survexblock.expedition and GetPersonExpeditionNameLookup(survexblock.expedition).get(tm.lower())
|
|
if (personexpedition, tm) not in teammembers:
|
|
teammembers.append((personexpedition, tm))
|
|
personrole = SurvexPersonRole(survexblock=survexblock, personexpedition=personexpedition, personname=tm)
|
|
personrole.save()
|
|
personrole.expeditionday = survexblock.expeditionday #BUG, if *date comes after *team, this is NOT SET.
|
|
if personexpedition:
|
|
personrole.person=personexpedition.person
|
|
self.currentpersonexped.append(personexpedition)
|
|
personrole.save()
|
|
|
|
def LoadSurvexEntrance(self, survexblock, line):
|
|
# Not using this yet
|
|
pass
|
|
|
|
def LoadSurvexAlias(self, survexblock, line):
|
|
# *alias station - ..
|
|
splayalias = re.match("(?i)station\s*\-\s*\.\.\s*$",line)
|
|
if splayalias:
|
|
self.flagsstar["splayalias"] = True
|
|
else:
|
|
message = "! Bad *ALIAS: '{}' ({}) {}".format(line, survexblock, survexblock.survexfile.path)
|
|
print((self.insp+message))
|
|
DataIssue.objects.create(parser='survex', message=message)
|
|
|
|
def LoadSurvexUnits(self, survexblock, line):
|
|
# all for 4 survex files with measurements in feet. bugger.
|
|
tapeunits = self.rx_tapelng.match(line) # tape|length
|
|
if not tapeunits:
|
|
return
|
|
convert = re.match("(?i)(\w*)\s*([\.\d]+)\s*(\w*)",line)
|
|
if convert:
|
|
factor = convert.groups()[1]
|
|
self.unitsfactor = float(factor)
|
|
if debugprint:
|
|
message = "! *UNITS NUMERICAL conversion [{}x] '{}' ({}) {}".format(factor, line, survexblock, survexblock.survexfile.path)
|
|
print((self.insp+message))
|
|
DataIssue.objects.create(parser='survexunits', message=message)
|
|
|
|
feet = re.match("(?i).*feet$",line)
|
|
metres = re.match("(?i).*(METRIC|METRES|METERS)$",line)
|
|
if feet:
|
|
self.units = "feet"
|
|
elif metres:
|
|
self.units = "metres"
|
|
else:
|
|
message = "! *UNITS in YARDS!? - not converted '{}' ({}) {}".format(line, survexblock, survexblock.survexfile.path)
|
|
print((self.insp+message))
|
|
DataIssue.objects.create(parser='survexunits', message=message)
|
|
|
|
def LoadSurvexDate(self, survexblock, line):
|
|
# we should make this a date RANGE for everything
|
|
def findexpedition(year):
|
|
return Expedition.objects.filter(year=year)
|
|
|
|
def setdate(year):
|
|
# cacheing to save DB query on every block and to prepare for django-less troggle in future
|
|
if year in self.expos:
|
|
expo = self.expos[year]
|
|
else:
|
|
expeditions = findexpedition(year)
|
|
if len(expeditions) != 1 :
|
|
message = f"! More than one expedition in year {year} '{line}' ({survexblock}) {survexblock.survexfile.path}"
|
|
print((self.insp+message))
|
|
DataIssue.objects.create(parser='survexunits', message=message)
|
|
|
|
expo= expeditions[0]
|
|
self.expos[year]= expo
|
|
|
|
survexblock.expedition = expo
|
|
survexblock.expeditionday = survexblock.expedition.get_expedition_day(survexblock.date)
|
|
survexblock.save()
|
|
|
|
if len(line) > 10:
|
|
if line[10] == "-":
|
|
line = line[0:10]
|
|
if len(line) == 10:
|
|
year = line[:4]
|
|
# TO DO set to correct Austrian timezone Europe/Vienna
|
|
# %m and %d need leading zeros. Source svx files require them.
|
|
survexblock.date = datetime.strptime(re.sub(r"\.", "-", line), '%Y-%m-%d')
|
|
setdate(year)
|
|
elif len(line) == 7:
|
|
year = line[:4]
|
|
survexblock.date = datetime.strptime(re.sub(r"\.", "-", line), '%Y-%m') # sets to first of month
|
|
setdate(year)
|
|
elif len(line) == 4:
|
|
year = line[:4]
|
|
survexblock.date = datetime.strptime(line, '%Y') # sets to January 1st
|
|
setdate(year)
|
|
else:
|
|
message = "! DATE unrecognised '{}' ({}) {}".format(line, survexblock, survexblock.survexfile.path)
|
|
print((self.insp+message))
|
|
DataIssue.objects.create(parser='survex', message=message)
|
|
|
|
def LoadSurvexLeg(self, survexblock, sline, comment):
|
|
"""This reads compass, clino and tape data but only keeps the tape lengths,
|
|
the rest is discarded after error-checking.
|
|
Now skipping the error checking - returns as soon as the leg is not one we count.
|
|
"""
|
|
invalid_clino = 180.0
|
|
invalid_compass = 720.0
|
|
invalid_tape = 0.0
|
|
|
|
if self.flagsstar["skiplegs"]:
|
|
if debugprint:
|
|
print("skip in ", self.flagsstar, survexblock.survexfile.path)
|
|
return
|
|
|
|
if debugprint:
|
|
print("! LEG datastar type:{}++{}\n{} ".format(self.datastar["type"].upper(), survexblock.survexfile.path, sline))
|
|
if self.datastar["type"] == "passage":
|
|
return
|
|
if self.datastar["type"] == "cartesian":
|
|
return
|
|
if self.datastar["type"] == "nosurvey":
|
|
return
|
|
if self.datastar["type"] == "diving":
|
|
return
|
|
if self.datastar["type"] == "cylpolar":
|
|
return
|
|
if debugprint:
|
|
print(" !! LEG data lineno:{}\n !! sline:'{}'\n !! datastar['tape']: {}".format(self.lineno, sline, self.datastar["tape"]))
|
|
|
|
if self.datastar["type"] != "normal":
|
|
return
|
|
|
|
datastar = self.datastar # shallow copy: alias but the things inside are the same things
|
|
survexleg = SurvexLeg()
|
|
|
|
ls = sline.lower().split()
|
|
|
|
# skip all splay legs
|
|
if ls[datastar["from"]] == ".." or ls[datastar["from"]] == ".":
|
|
if debugprint:
|
|
print("Splay in ", survexblock.survexfile.path)
|
|
return
|
|
if ls[datastar["to"]] == ".." or ls[datastar["to"]] == ".":
|
|
if debugprint:
|
|
print("Splay in ", survexblock.survexfile.path)
|
|
return
|
|
if self.flagsstar["splayalias"]:
|
|
if ls[datastar["from"]] == "-":
|
|
if debugprint:
|
|
print("Aliased splay in ", survexblock.survexfile.path)
|
|
return
|
|
if ls[datastar["to"]] == "-":
|
|
if debugprint:
|
|
print("Aliased splay in ", survexblock.survexfile.path)
|
|
return
|
|
|
|
try:
|
|
tape = ls[datastar["tape"]]
|
|
except:
|
|
message = ' ! datastar parsing incorrect in line %s in %s' % (ls, survexblock.survexfile.path)
|
|
print((self.insp+message))
|
|
DataIssue.objects.create(parser='survexleg', message=message)
|
|
survexleg.tape = invalid_tape
|
|
return
|
|
# e.g. '29/09' or '(06.05)' in the tape measurement
|
|
# tape = tape.replace("(","") # edited original file (only one) instead
|
|
# tape = tape.replace(")","") # edited original file (only one) instead
|
|
# tape = tape.replace("/",".") # edited original file (only one) instead.
|
|
try:
|
|
if self.unitsfactor:
|
|
tape = float(tape) * self.unitsfactor
|
|
if debugprint:
|
|
message = " ! Units: Length scaled {}m '{}' in ({}) units:{} factor:{}x".format(tape, ls, survexblock.survexfile.path, self.units, self.unitsfactor)
|
|
print((self.insp+message))
|
|
DataIssue.objects.create(parser='survexleg', message=message)
|
|
if self.units =="feet":
|
|
tape = float(tape) / METRESINFEET
|
|
if debugprint:
|
|
message = " ! Units: converted to {:.3f}m from {} '{}' in ({})".format(tape, self.units, ls, survexblock.survexfile.path)
|
|
print((self.insp+message))
|
|
DataIssue.objects.create(parser='survexleg', message=message)
|
|
survexleg.tape = float(tape)
|
|
self.legsnumber += 1
|
|
except ValueError:
|
|
message = " ! Value Error: Tape misread in line'{}' in {} units:{}".format(ls, survexblock.survexfile.path, self.units)
|
|
print((self.insp+message))
|
|
DataIssue.objects.create(parser='survexleg', message=message)
|
|
survexleg.tape = invalid_tape
|
|
try:
|
|
survexblock.legslength += survexleg.tape
|
|
self.slength += survexleg.tape
|
|
except ValueError:
|
|
message = " ! Value Error: Tape length not added '{}' in {} units:{}".format(ls, survexblock.survexfile.path, self.units)
|
|
print((self.insp+message))
|
|
DataIssue.objects.create(parser='survexleg', message=message)
|
|
|
|
try:
|
|
lcompass = ls[datastar["compass"]]
|
|
except:
|
|
message = ' ! Value Error: Compass not found in line %s in %s' % (ls, survexblock.survexfile.path)
|
|
print((self.insp+message))
|
|
DataIssue.objects.create(parser='survexleg', message=message)
|
|
lcompass = invalid_compass
|
|
|
|
try:
|
|
lclino = ls[datastar["clino"]]
|
|
except:
|
|
print(("! Clino misread in", survexblock.survexfile.path))
|
|
print((" datastar:", datastar))
|
|
print((" Line:", ls))
|
|
message = ' ! Value Error: Clino misread in line %s in %s' % (ls, survexblock.survexfile.path)
|
|
DataIssue.objects.create(parser='survexleg', message=message)
|
|
lclino = invalid_clino
|
|
|
|
if lclino == "up":
|
|
survexleg.clino = 90.0
|
|
lcompass = invalid_compass
|
|
elif lclino == "down":
|
|
survexleg.clino = -90.0
|
|
lcompass = invalid_compass
|
|
elif lclino == "-" or lclino == "level":
|
|
survexleg.clino = -90.0
|
|
|
|
try:
|
|
survexleg.compass = float(lcompass)
|
|
except ValueError:
|
|
print(("! Compass misread in", survexblock.survexfile.path))
|
|
print((" datastar:", datastar))
|
|
print((" Line:", ls))
|
|
message = " ! Value Error: lcompass:'{}' line {} in '{}'".format(lcompass,
|
|
ls, survexblock.survexfile.path)
|
|
DataIssue.objects.create(parser='survexleg', message=message)
|
|
survexleg.compass = invalid_compass
|
|
|
|
# delete the object to save memory
|
|
survexleg = None
|
|
|
|
def LoadSurvexRef(self, survexblock, args):
|
|
#print(self.insp+ "*REF ---- '"+ args +"'")
|
|
url=f'/survexfile/{survexblock.survexfile.path}'
|
|
# *REF but also ; Ref years from 1960 to 2039
|
|
refline = self.rx_ref_text.match(args)
|
|
if refline:
|
|
# a textual reference such as "1996-1999 Not-KH survey book pp 92-95"
|
|
print(self.insp+ "*REF quoted text so ignored:"+ args)
|
|
return
|
|
|
|
if len(args)< 4:
|
|
message = " ! Empty or BAD *REF statement '{}' in '{}'".format(args, survexblock.survexfile.path)
|
|
print((self.insp+message))
|
|
DataIssue.objects.create(parser='survex', message=message, url=url)
|
|
return
|
|
|
|
argsgps = self.rx_argsref.match(args)
|
|
if argsgps:
|
|
yr, letterx, wallet = argsgps.groups()
|
|
else:
|
|
message = " ! Wallet *REF '{}' malformed id in '{}' ".format(args, survexblock.survexfile.path)
|
|
print(self.insp+message)
|
|
DataIssue.objects.create(parser='survex', message=message, url=url)
|
|
return
|
|
|
|
if not letterx:
|
|
letterx = ""
|
|
else:
|
|
letterx = "X"
|
|
if len(wallet)<2:
|
|
wallet = "0" + wallet
|
|
if not (int(yr)>1960 and int(yr)<2039):
|
|
message = " ! Wallet year out of bounds {yr} '{refscan}' {survexblock.survexfile.path}"
|
|
print((self.insp+message))
|
|
DataIssue.objects.create(parser='survex', message=message, url=url)
|
|
|
|
refscan = "%s#%s%s" % (yr, letterx, wallet)
|
|
try:
|
|
if int(wallet)>99:
|
|
message = " ! Wallet *REF {} - too big in '{}'".format(refscan, survexblock.survexfile.path)
|
|
print((self.insp+message))
|
|
DataIssue.objects.create(parser='survex', message=message, url=url)
|
|
except:
|
|
message = " ! Wallet *REF {} - not numeric in '{}' -- parsing continues".format(refscan, survexblock.survexfile.path)
|
|
print((self.insp+message))
|
|
DataIssue.objects.create(parser='survex', message=message, url=url)
|
|
manywallets = Wallet.objects.filter(walletname=refscan)
|
|
if manywallets:
|
|
survexblock.scanswallet = manywallets[0] # this is a ForeignKey field
|
|
print(manywallets[0])
|
|
survexblock.save()
|
|
if len(manywallets) > 1:
|
|
message = " ! Wallet *REF {} - more than one found {} scan folders in {}".format(refscan, len(manywallets), survexblock.survexfile.path)
|
|
print((self.insp+message))
|
|
DataIssue.objects.create(parser='survex', message=message, url=url)
|
|
else:
|
|
message = " ! Wallet *REF '{}' - NOT found '{}'".format(refscan, survexblock.survexfile.path)
|
|
print((self.insp+message))
|
|
DataIssue.objects.create(parser='survex', message=message, url=url)
|
|
|
|
def LoadSurvexQM(self, survexblock, qmline):
|
|
insp = self.insp
|
|
|
|
qm_no = qmline.group(1) # this may not be unique across multiple survex files
|
|
|
|
qm_grade = qmline.group(2)
|
|
if qmline.group(3): # usual closest survey station
|
|
qm_nearest = qmline.group(3)
|
|
if qmline.group(4):
|
|
qm_nearest = qm_nearest +"."+ qmline.group(4)
|
|
|
|
if qmline.group(6) and qmline.group(6) != '-':
|
|
qm_resolve_station = qmline.group(6)
|
|
if qmline.group(7):
|
|
qm_resolve_station = qm_resolve_station +"."+ qmline.group(7)
|
|
else:
|
|
qm_resolve_station = ""
|
|
qm_notes = qmline.group(8)
|
|
# Spec of QM in SVX files:
|
|
# ;Serial number grade(A/B/C/D/X) nearest-station resolution-station description
|
|
# ;QM1 a hobnob_hallway_2.42 hobnob-hallway_3.42 junction of keyhole passage
|
|
# ;QM1 a hobnob_hallway_2.42 - junction of keyhole passage
|
|
|
|
# NB none of the SurveyStations are in the DB now, so if we want to link to aSurvexStation
|
|
# we would have to create one. But that is not obligatory and no QMs loaded from CSVs have one
|
|
|
|
# Older troggle/CSV assumes a logbook entry 'found_by' for each QM, with a date.
|
|
# We don't need this anymore so we don't need to create a placeholder logbook entry.
|
|
qmyear = str(survexblock.date)[:4]
|
|
blockname = survexblock.name[:7]
|
|
logslug = f'D{int(qmyear)}_{blockname}_{int(qm_no):03d}'
|
|
if survexblock.survexfile.cave:
|
|
caveslug = survexblock.survexfile.cave.slug()
|
|
place = survexblock.survexfile.cave
|
|
else:
|
|
caveslug = "ugh"
|
|
place = "oops"
|
|
|
|
|
|
# message = f' ! - logbook dummy "{logslug}" {str(survexblock.date)[:11]} for cave "{caveslug}" created.'
|
|
|
|
# placeholder, hadToCreate = LogbookEntry.objects.get_or_create(date__year=qmyear,
|
|
# place=place,
|
|
# title="placeholder for survex QM",
|
|
# text=message,
|
|
# entry_type="DUMMY",
|
|
# expedition_id=1,
|
|
# defaults={"date": survexblock.date,"cave_slug":caveslug, "slug": logslug})
|
|
# print(insp+message)
|
|
# DataIssue.objects.create(parser='survex', message=message)
|
|
|
|
try:
|
|
qm = QM.objects.create(number=qm_no,
|
|
# nearest_station=a_survex_station_object, # can be null
|
|
nearest_station_description=qm_resolve_station,
|
|
nearest_station_name=qm_nearest,
|
|
grade=qm_grade.upper(),
|
|
location_description=qm_notes,
|
|
blockname = blockname, # only set for survex-imported QMs
|
|
# found_by = placeholder,
|
|
expoyear = str(survexblock.date.year),
|
|
cave = survexblock.survexfile.cave)
|
|
qm.save
|
|
# message = " ! QM{} '{}' CREATED in DB in '{}'".format(qm_no, qm_nearest,survexblock.survexfile.path)
|
|
# print(insp+message)
|
|
# DataIssue.objects.create(parser='survex', message=message)
|
|
except:
|
|
message = " ! QM{} FAIL to create {} in'{}'".format(qm_no, qm_nearest,survexblock.survexfile.path)
|
|
print(insp+message)
|
|
DataIssue.objects.create(parser='survex', message=message)
|
|
|
|
def LoadSurvexDataNormal(self,survexblock,args):
|
|
"""Sets the order for data elements in this and following blocks, e.g.
|
|
*data normal from to compass clino tape
|
|
*data normal from to tape compass clino
|
|
We are only collecting length data so we are disinterested in from, to, LRUD etc.
|
|
"""
|
|
# datastardefault = { # included here as reference to help understand the code
|
|
# "type":"normal",
|
|
# "t":"leg",
|
|
# "from":0,
|
|
# "to":1,
|
|
# "tape":2,
|
|
# "compass":3,
|
|
# "clino":4}
|
|
datastar = copy.deepcopy(self.datastardefault)
|
|
if args == "":
|
|
# naked '*data' which is relevant only for passages. Ignore. Continue with previous settings.
|
|
return
|
|
# DEFAULT | NORMAL | CARTESIAN| NOSURVEY |PASSAGE | TOPOFIL | CYLPOLAR | DIVING
|
|
ls = args.lower().split()
|
|
if ls[0] == "default":
|
|
self.datastar = copy.deepcopy(self.datastardefault)
|
|
elif ls[0] == "normal" or ls[0] == "topofil":
|
|
if not ("from" in datastar and "to" in datastar):
|
|
message = " ! - Unrecognised *data normal statement '{}' {}|{}".format(args, survexblock.name, survexblock.survexpath)
|
|
print(message)
|
|
print(message,file=sys.stderr)
|
|
DataIssue.objects.create(parser='survex', message=message)
|
|
return
|
|
else:
|
|
datastar = self.datastardefault
|
|
# ls = ["normal", "from", "to", "tape", "compass", "clino" ]
|
|
for i in range(1, len(ls)): # len[0] is "normal"
|
|
if ls[i] in ["bearing","compass"]:
|
|
datastar["compass"] = i-1
|
|
if ls[i] in ["clino","gradient"]:
|
|
datastar["clino"] = i-1
|
|
if ls[i] in ["tape","length"]:
|
|
datastar["tape"] = i-1
|
|
self.datastar = copy.deepcopy(datastar)
|
|
return
|
|
elif ls[0] == "cartesian" or ls[0] == "nosurvey" or ls[0] == "diving" or ls[0] == "cylpolar" or ls[0] == "passage":
|
|
# message = " ! - *data {} blocks ignored. {}|{}" '{}' .format(ls[0].upper(), survexblock.name, survexblock.survexpath, args)
|
|
# print(message)
|
|
# print(message,file=sys.stderr)
|
|
# DataIssue.objects.create(parser='survex', message=message)
|
|
self.datastar["type"] = ls[0]
|
|
else:
|
|
message = " ! - Unrecognised *data statement '{}' {}|{}".format(args, survexblock.name, survexblock.survexpath)
|
|
print(message)
|
|
print(message,file=sys.stderr)
|
|
DataIssue.objects.create(parser='survex', message=message)
|
|
|
|
def LoadSurvexFlags(self, args):
|
|
# Valid flags are DUPLICATE, SPLAY, and SURFACE, and a flag may be preceded with NOT to turn it off.
|
|
# Default values are NOT any of them
|
|
self.flagsstar = copy.deepcopy(self.flagsdefault)
|
|
flags = []
|
|
|
|
args = self.rx_flagsnot.sub("not",args)
|
|
argslist = args.split()
|
|
for s in argslist:
|
|
flags.append(s)
|
|
if debugprint:
|
|
print(" ^ flagslist:{}".format(flags),)
|
|
|
|
if "duplicate" in flags:
|
|
self.flagsstar["duplicate"] = True
|
|
if "surface" in flags:
|
|
self.flagsstar["surface"] = True
|
|
if "splay" in flags:
|
|
self.flagsstar["splay"] = True
|
|
|
|
if "notduplicate" in flags:
|
|
self.flagsstar["duplicate"] = False
|
|
if "notsurface" in flags:
|
|
self.flagsstar["surface"] = False
|
|
if "notsplay" in flags:
|
|
self.flagsstar["splay"] = False
|
|
|
|
# if self.flagsstar["duplicate"] == True or self.flagsstar["surface"] == True or self.flagsstar["splay"] == True:
|
|
# actually we do want to count duplicates as this is for "effort expended in surveying underground"
|
|
if self.flagsstar["surface"] == True or self.flagsstar["splay"] == True:
|
|
self.flagsstar["skiplegs"] = True
|
|
if debugprint:
|
|
print(" $ flagslist:{}".format(flags),)
|
|
|
|
|
|
def IdentifyCave(self, cavepath):
|
|
if cavepath.lower() in self.caveslist:
|
|
return self.caveslist[cavepath.lower()]
|
|
# TO DO - some of this is already done in generating self.caveslist so simplify this
|
|
# esp. as it is in a loop.
|
|
# TO DO recognise cave if different name, e.g. gruenstein == 281
|
|
path_match = self.rx_cave.search(cavepath)
|
|
if path_match:
|
|
sluggy = '{}-{}'.format(path_match.group(1), path_match.group(2))
|
|
guesses = [sluggy.lower(), path_match.group(2).lower()]
|
|
for g in guesses:
|
|
if g in self.caveslist:
|
|
self.caveslist[cavepath] = self.caveslist[g]
|
|
return self.caveslist[g]
|
|
print(' ! Failed to find cave for {}'.format(cavepath.lower()))
|
|
else:
|
|
# not a cave, but that is fine.
|
|
# print(' ! No regex(standard identifier) cave match for %s' % cavepath.lower())
|
|
return None
|
|
|
|
def GetSurvexDirectory(self, headpath):
|
|
"""This creates a SurvexDirectory if it has not been seen before, and on creation
|
|
it sets the primarysurvexfile. This is correct as it should be set on the first file
|
|
in the directory, where first is defined by the *include ordering. Which is what we
|
|
are doing.
|
|
"""
|
|
if not headpath:
|
|
return self.svxdirs[""]
|
|
if headpath.lower() not in self.svxdirs:
|
|
self.svxdirs[headpath.lower()] = SurvexDirectory(path=headpath, primarysurvexfile=self.currentsurvexfile)
|
|
self.svxdirs[headpath.lower()].save()
|
|
self.survexdict[self.svxdirs[headpath.lower()]] = [] # list of the files in the directory
|
|
return self.svxdirs[headpath.lower()]
|
|
|
|
def ReportNonCaveIncludes(self, headpath, includelabel):
|
|
"""Ignore surface, kataser and gpx *include survex files
|
|
"""
|
|
if headpath in self.ignorenoncave:
|
|
#message = f" - {headpath} is <ignorenoncave> (while creating '{includelabel}' sfile & sdirectory)"
|
|
#print("\n"+message)
|
|
#print("\n"+message,file=sys.stderr)
|
|
return
|
|
for i in self.ignoreprefix:
|
|
if headpath.startswith(i):
|
|
message = f" - {headpath} starts with <ignoreprefix> (while creating '{includelabel}' sfile & sdirectory)"
|
|
#print("\n"+message)
|
|
#print("\n"+message,file=sys.stderr)
|
|
return
|
|
message = f" ! Error: '{headpath}' FAILURE (while creating '{includelabel}' in db - not a cave or in the ignore list of surface surveys. )"
|
|
# getting this triggered for gpx/2018 (cavern error) but not for gpx/2017 (no content).
|
|
print("\n"+message)
|
|
print("\n"+message,file=sys.stderr)
|
|
DataIssue.objects.create(parser='survex', message=message)
|
|
print(f' # datastack in LoadSurvexFile:{includelabel} type:', end="",file=sys.stderr)
|
|
for dict in self.datastack:
|
|
print(f'{dict["type"].upper()} ', end="",file=sys.stderr)
|
|
|
|
|
|
def LoadSurvexFile(self, svxid):
|
|
"""Creates SurvexFile in the database, and SurvexDirectory if needed
|
|
with links to 'cave'
|
|
Creates a new current survexfile and valid .survexdirectory
|
|
The survexblock passed-in is not necessarily the parent. FIX THIS.
|
|
"""
|
|
if debugprint:
|
|
print(" # datastack in LoadSurvexFile:{} 'type':".format(svxid), end="")
|
|
for dict in self.datastack:
|
|
print("'{}' ".format(dict["type"].upper()), end="")
|
|
print("")
|
|
|
|
|
|
depth = " " * self.depthbegin
|
|
# print("{:2}{} - NEW survexfile:'{}'".format(self.depthbegin, depth, svxid))
|
|
headpath = os.path.dirname(svxid)
|
|
|
|
newfile = SurvexFile(path=svxid)
|
|
newfile.save() # until we do this there is no internal id so no foreign key works
|
|
self.currentsurvexfile = newfile
|
|
newdirectory = self.GetSurvexDirectory(headpath)
|
|
newdirectory.save()
|
|
newfile.survexdirectory = newdirectory
|
|
self.survexdict[newdirectory].append(newfile)
|
|
cave = self.IdentifyCave(headpath) # cave already exists in db
|
|
|
|
if not newdirectory:
|
|
message = " ! 'None' SurvexDirectory returned from GetSurvexDirectory({})".format(headpath)
|
|
print(message)
|
|
print(message,file=sys.stderr)
|
|
DataIssue.objects.create(parser='survex', message=message)
|
|
|
|
if cave:
|
|
newdirectory.cave = cave
|
|
newfile.cave = cave
|
|
# print(f"\n - New directory {newdirectory} for cave {newdirectory.cave}",file=sys.stderr)
|
|
else: # probably a surface survey
|
|
self.ReportNonCaveIncludes(headpath, svxid)
|
|
|
|
if not newfile.survexdirectory:
|
|
message = " ! SurvexDirectory NOT SET in new SurvexFile {} ".format(svxid)
|
|
print(message)
|
|
print(message,file=sys.stderr)
|
|
DataIssue.objects.create(parser='survex', message=message)
|
|
self.currentsurvexfile.save() # django insists on this although it is already saved !?
|
|
try:
|
|
newdirectory.save()
|
|
except:
|
|
print(newdirectory, file=sys.stderr)
|
|
print(newdirectory.primarysurvexfile, file=sys.stderr)
|
|
raise
|
|
|
|
if debugprint:
|
|
print(" # datastack end LoadSurvexFile:{} 'type':".format(svxid), end="")
|
|
for dict in self.datastack:
|
|
print("'{}' ".format(dict["type"].upper()), end="")
|
|
print("")
|
|
|
|
def ProcessIncludeLine(self, included):
|
|
global debugprint
|
|
svxid = included.groups()[0]
|
|
if svxid.lower() == debugprinttrigger.lower():
|
|
debugprint = True
|
|
self.LoadSurvexFile(svxid)
|
|
self.stacksvxfiles.append(self.currentsurvexfile)
|
|
|
|
def ProcessEdulcniLine(self, edulcni):
|
|
"""Saves the current survexfile in the db
|
|
"""
|
|
global debugprint
|
|
svxid = edulcni.groups()[0]
|
|
if debugprint:
|
|
depth = " " * self.depthbegin
|
|
print("{:2}{} - Edulcni survexfile:'{}'".format(self.depthbegin, depth, svxid))
|
|
if svxid.lower() == debugprinttrigger.lower():
|
|
debugprint = False
|
|
self.currentsurvexfile.save()
|
|
self.currentsurvexfile = self.stacksvxfiles.pop()
|
|
|
|
def LoadSurvexComment(self, survexblock, comment):
|
|
# ignore all comments except ;ref, ; wallet and ;QM and ;*include (for collated survex file)
|
|
# rx_ref2 = re.compile(r'(?i)\s*ref[.;]?')
|
|
# rx_ref3 = re.compile(r'(?i)\s*wallet[.;]?')
|
|
|
|
# This should also check that the QM survey point rxists in the block
|
|
|
|
refline = self.rx_commref.match(comment)
|
|
if refline:
|
|
#comment = re.sub('(?i)\s*ref[.;]?',"",comment.strip())
|
|
comment = self.rx_ref2.sub("",comment.strip())
|
|
self.LoadSurvexRef(survexblock, comment)
|
|
walletline = self.rx_wallet.match(comment)
|
|
if walletline:
|
|
#comment = re.sub('(?i)\s*wallet[.;]?',"",comment.strip())
|
|
comment = self.rx_ref3.sub("",comment.strip())
|
|
self.LoadSurvexRef(survexblock, comment)
|
|
implicitline = self.rx_implicit.match(comment)
|
|
if implicitline:
|
|
self.LoadSurvexRef(survexblock, comment)
|
|
|
|
qml = self.rx_qm0.match(comment)
|
|
if qml:
|
|
qmline = self.rx_qm.match(comment)
|
|
if qmline:
|
|
self.LoadSurvexQM(survexblock, qmline)
|
|
else:
|
|
message = f' ! QM Unrecognised as a valid QM in "{survexblock.survexfile.path}" QM{qml.group(1)} {qml.group(2)}'
|
|
print(message)
|
|
DataIssue.objects.create(parser='survex', message=message, url=f'/survexfile/{survexblock.survexfile.path}.svx')
|
|
|
|
|
|
included = self.rx_comminc.match(comment)
|
|
# ;*include means 'we have been included'; whereas *include means 'proceed to include'
|
|
if included:
|
|
self.ProcessIncludeLine(included)
|
|
|
|
edulcni = self.rx_commcni.match(comment)
|
|
# ;*edulcni means we are returning from an included file
|
|
if edulcni:
|
|
self.ProcessEdulcniLine(edulcni)
|
|
|
|
def LoadSurvexSetup(self,survexblock, survexfile):
|
|
self.depthbegin = 0
|
|
self.datastar = self.datastardefault
|
|
blocklegs = self.legsnumber
|
|
print(self.insp+" - MEM:{:.3f} Reading. parent:{} <> {} ".format(get_process_memory(),survexblock.survexfile.path, survexfile.path))
|
|
self.lineno = 0
|
|
sys.stderr.flush();
|
|
self.callcount +=1
|
|
if self.callcount % 10 ==0 :
|
|
print(".", file=sys.stderr,end='')
|
|
if self.callcount % 500 ==0 :
|
|
print("\n", file=sys.stderr,end='')
|
|
# Try to find the cave in the DB if not use the string as before
|
|
path_match = re.search(r"caves-(\d\d\d\d)/(\d+|\d\d\d\d-?\w+-\d+)/", survexblock.survexfile.path)
|
|
if path_match:
|
|
pos_cave = '%s-%s' % (path_match.group(1), path_match.group(2))
|
|
cave = getCaveByReference(pos_cave)
|
|
if cave:
|
|
survexfile.cave = cave
|
|
|
|
def LinearLoad(self, survexblock, path, svxlines):
|
|
"""Loads a single survex file. Usually used to import all the survex files which have been collated
|
|
into a single file. Loads the begin/end blocks using a stack for labels.
|
|
"""
|
|
blkid = None
|
|
pathlist = None
|
|
args = None
|
|
oldflags = None
|
|
blockcount = 0
|
|
self.lineno = 0
|
|
slengthtotal = 0.0
|
|
nlegstotal = 0
|
|
self.relativefilename = path
|
|
cave = self.IdentifyCave(path) # this will produce null for survex files which are geographic collections
|
|
|
|
self.currentsurvexfile = survexblock.survexfile
|
|
self.currentsurvexfile.save() # django insists on this although it is already saved !?
|
|
|
|
self.datastar = copy.deepcopy(self.datastardefault)
|
|
self.flagsstar = copy.deepcopy(self.flagsdefault)
|
|
|
|
def tickle():
|
|
nonlocal blockcount
|
|
|
|
blockcount +=1
|
|
if blockcount % 10 ==0 :
|
|
print(".", file=sys.stderr,end='')
|
|
if blockcount % 200 ==0 :
|
|
print("\n", file=sys.stderr,end='')
|
|
print(" - MEM:{:7.3f} MB in use".format(get_process_memory()),file=sys.stderr)
|
|
print(" ", file=sys.stderr,end='')
|
|
sys.stderr.flush()
|
|
|
|
def printbegin():
|
|
nonlocal blkid
|
|
nonlocal pathlist
|
|
|
|
depth = " " * self.depthbegin
|
|
self.insp = depth
|
|
if debugprint:
|
|
print("{:2}{} - Begin for :'{}'".format(self.depthbegin,depth, blkid))
|
|
pathlist = ""
|
|
for id in self.stackbegin:
|
|
if len(id) > 0:
|
|
pathlist += "." + id
|
|
|
|
def printend():
|
|
nonlocal args
|
|
|
|
depth = " " * self.depthbegin
|
|
if debugprint:
|
|
print("{:2}{} - End from:'{}'".format(self.depthbegin,depth,args))
|
|
print("{:2}{} - LEGS: {} (n: {}, length:{} units:{})".format(self.depthbegin,
|
|
depth, self.slength, self.slength, self.legsnumber, self.units))
|
|
|
|
def pushblock():
|
|
nonlocal blkid
|
|
if debugprint:
|
|
print(" # datastack at 1 *begin {} 'type':".format(blkid), end="")
|
|
for dict in self.datastack:
|
|
print("'{}' ".format(dict["type"].upper()), end="")
|
|
print("")
|
|
print("'{}' self.datastar ".format(self.datastar["type"].upper()))
|
|
# ------------ * DATA
|
|
self.datastack.append(copy.deepcopy(self.datastar))
|
|
# ------------ * DATA
|
|
if debugprint:
|
|
print(" # datastack at 2 *begin {} 'type':".format(blkid), end="")
|
|
for dict in self.datastack:
|
|
print("'{}' ".format(dict["type"].upper()), end="")
|
|
print("")
|
|
print("'{}' self.datastar ".format(self.datastar["type"].upper()))
|
|
|
|
# ------------ * FLAGS
|
|
self.flagsstack.append(copy.deepcopy(self.flagsstar))
|
|
# ------------ * FLAGS
|
|
pass
|
|
|
|
def popblock():
|
|
nonlocal blkid
|
|
nonlocal oldflags
|
|
if debugprint:
|
|
print(" # datastack at *end '{} 'type':".format(blkid), end="")
|
|
for dict in self.datastack:
|
|
print("'{}' ".format(dict["type"].upper()), end="")
|
|
print("")
|
|
print("'{}' self.datastar ".format(self.datastar["type"].upper()))
|
|
# ------------ * DATA
|
|
self.datastar = copy.deepcopy(self.datastack.pop())
|
|
# ------------ * DATA
|
|
if debugprint:
|
|
print(" # datastack after *end '{} 'type':".format(blkid), end="")
|
|
for dict in self.datastack:
|
|
print("'{}' ".format(dict["type"].upper()), end="")
|
|
print("")
|
|
print("'{}' self.datastar ".format(self.datastar["type"].upper()))
|
|
|
|
# ------------ * FLAGS
|
|
self.flagsstar = copy.deepcopy(self.flagsstack.pop())
|
|
# ------------ * FLAGS
|
|
if debugprint:
|
|
if oldflags["skiplegs"] != self.flagsstar["skiplegs"]:
|
|
print(" # POP 'any' flag now:'{}' was:{} ".format(self.flagsstar["skiplegs"], oldflags["skiplegs"]))
|
|
|
|
def starstatement(star):
|
|
nonlocal survexblock
|
|
nonlocal blkid
|
|
nonlocal pathlist
|
|
nonlocal args
|
|
nonlocal oldflags
|
|
nonlocal slengthtotal
|
|
nonlocal nlegstotal
|
|
|
|
cmd, args = star.groups()
|
|
cmd = cmd.lower()
|
|
|
|
# ------------------------BEGIN
|
|
if self.rx_begin.match(cmd):
|
|
blkid = args.lower()
|
|
# PUSH state ++++++++++++++
|
|
self.stackbegin.append(blkid)
|
|
self.unitsstack.append((self.units, self.unitsfactor))
|
|
self.legsnumberstack.append(self.legsnumber)
|
|
self.slengthstack.append(self.slength)
|
|
self.personexpedstack.append(self.currentpersonexped)
|
|
pushblock()
|
|
# PUSH state ++++++++++++++
|
|
self.legsnumber = 0
|
|
self.slength = 0.0
|
|
self.units = "metres"
|
|
self.currentpersonexped = []
|
|
printbegin()
|
|
newsurvexblock = SurvexBlock(name=blkid, parent=survexblock,
|
|
survexpath=pathlist,
|
|
cave=self.currentcave, survexfile=self.currentsurvexfile,
|
|
legsall=0, legslength=0.0)
|
|
newsurvexblock.save()
|
|
newsurvexblock.title = "("+survexblock.title+")" # copy parent inititally, overwrite if it has its own
|
|
survexblock = newsurvexblock
|
|
survexblock.save() # django insists on this , but we want to save at the end !
|
|
tickle()
|
|
|
|
# ---------------------------END
|
|
elif self.rx_end.match(cmd):
|
|
survexblock.legsall = self.legsnumber
|
|
survexblock.legslength = self.slength
|
|
printend()
|
|
slengthtotal += self.slength
|
|
nlegstotal += self.legsnumber
|
|
|
|
try:
|
|
survexblock.parent.save() # django insists on this although it is already saved !?
|
|
except:
|
|
print(survexblock.parent, file=sys.stderr)
|
|
raise
|
|
try:
|
|
survexblock.save() # save to db at end of block
|
|
except:
|
|
print(survexblock, file=sys.stderr)
|
|
raise
|
|
# POP state ++++++++++++++
|
|
popblock()
|
|
self.currentpersonexped = self.personexpedstack.pop()
|
|
self.legsnumber = self.legsnumberstack.pop()
|
|
self.units, self.unitsfactor = self.unitsstack.pop()
|
|
self.slength = self.slengthstack.pop()
|
|
blkid = self.stackbegin.pop()
|
|
self.currentsurvexblock = survexblock.parent
|
|
survexblock = survexblock.parent
|
|
oldflags = self.flagsstar
|
|
self.depthbegin -= 1
|
|
# POP state ++++++++++++++
|
|
|
|
# -----------------------------
|
|
elif self.rx_title.match(cmd):
|
|
quotedtitle = re.match("(?i)^\"(.*)\"$",args)
|
|
if quotedtitle:
|
|
survexblock.title = quotedtitle.groups()[0]
|
|
else:
|
|
survexblock.title = args
|
|
elif self.rx_ref.match(cmd):
|
|
self.LoadSurvexRef(survexblock, args)
|
|
elif self.rx_flags.match(cmd):
|
|
oldflags = self.flagsstar
|
|
self.LoadSurvexFlags(args)
|
|
if debugprint:
|
|
if oldflags["skiplegs"] != self.flagsstar["skiplegs"]:
|
|
print(" # CHANGE 'any' flag now:'{}' was:{} ".format(self.flagsstar["skiplegs"], oldflags["skiplegs"]))
|
|
|
|
elif self.rx_data.match(cmd):
|
|
self.LoadSurvexDataNormal(survexblock, args)
|
|
elif self.rx_alias.match(cmd):
|
|
self.LoadSurvexAlias(survexblock, args)
|
|
elif self.rx_entrance.match(cmd):
|
|
self.LoadSurvexEntrance(survexblock, args)
|
|
elif self.rx_date.match(cmd):
|
|
self.LoadSurvexDate(survexblock, args)
|
|
elif self.rx_units.match(cmd):
|
|
self.LoadSurvexUnits(survexblock, args)
|
|
elif self.rx_team.match(cmd):
|
|
self.LoadSurvexTeam(survexblock, args)
|
|
elif self.rx_set.match(cmd) and self.rx_names.match(cmd):
|
|
pass
|
|
elif self.rx_include.match(cmd):
|
|
message = " ! -ERROR *include command not expected here {}. Re-run a full Survex import.".format(path)
|
|
print(message)
|
|
print(message,file=sys.stderr)
|
|
DataIssue.objects.create(parser='survex', message=message)
|
|
else:
|
|
self.LoadSurvexFallThrough(survexblock, args, cmd)
|
|
|
|
|
|
for svxline in svxlines:
|
|
self.lineno += 1
|
|
sline, comment = self.rx_comment.match(svxline).groups()
|
|
if comment:
|
|
# this catches the ;*include NEWFILE and ;*edulcni ENDOFFILE lines too
|
|
self.LoadSurvexComment(survexblock, comment)
|
|
|
|
if not sline:
|
|
continue # skip blank lines
|
|
|
|
# detect a merge failure inserted by version control
|
|
mfail = self.rx_badmerge.match(sline)
|
|
if mfail:
|
|
message = f"\n ! - ERROR version control merge failure\n - '{sline}'\n"
|
|
message = message + f" - line {self.lineno} in {blkid} in {survexblock}\n - NERD++ needed to fix it"
|
|
print(message)
|
|
print(message,file=sys.stderr)
|
|
DataIssue.objects.create(parser='survex', message=message)
|
|
continue # skip this line
|
|
|
|
# detect a star command
|
|
star = self.rx_star.match(sline)
|
|
if star:
|
|
# yes we are reading a *command
|
|
starstatement(star)
|
|
else: # not a *cmd so we are reading data OR a ";" rx_comment failed. We hope.
|
|
self.LoadSurvexLeg(survexblock, sline, comment)
|
|
|
|
self.legsnumber = nlegstotal
|
|
self.slength = slengthtotal
|
|
|
|
def PushdownStackScan(self, survexblock, path, fin, flinear, fcollate):
|
|
"""Follows the *include links in all the survex files from the root file 1623.svx
|
|
and reads only the *include and *begin and *end statements. It produces a linearised
|
|
list of the include tree and detects blocks included more than once.
|
|
"""
|
|
thissvxline = 0
|
|
indent = " " * self.depthinclude
|
|
sys.stderr.flush();
|
|
self.callcount +=1
|
|
if self.callcount % 10 ==0 :
|
|
print(".", file=sys.stderr,end='')
|
|
if self.callcount % 500 ==0 :
|
|
print("\n ", file=sys.stderr,end='')
|
|
|
|
if path in self.svxfileslist:
|
|
message = " * Warning. Duplicate detected in *include list at callcount:{} depth:{} file:{}".format(self.callcount, self.depthinclude, path)
|
|
print(message)
|
|
print(message,file=flinear)
|
|
print("\n"+message,file=sys.stderr)
|
|
DataIssue.objects.create(parser='survex', message=message)
|
|
if self.svxfileslist.count(path) > 20:
|
|
message = " ! ERROR. Survex file already seen 20x. Probably an infinite loop so fix your *include statements that include this. Aborting. {}".format(path)
|
|
print(message)
|
|
print(message,file=flinear)
|
|
print(message,file=sys.stderr)
|
|
DataIssue.objects.create(parser='survex', message=message)
|
|
return
|
|
self.svxfileslist.append(path)
|
|
|
|
try:
|
|
svxlines = fin.read().splitlines()
|
|
except UnicodeDecodeError:
|
|
# some bugger put an umlaut in a non-UTF survex file ?!
|
|
message = f" ! ERROR *include file '{path}' in '{survexblock}' has UnicodeDecodeError"
|
|
print(message)
|
|
print(message,file=sys.stderr)
|
|
offendingfile = "/survexfile/" + path + ".svx"
|
|
DataIssue.objects.create(parser='survex', message=message, url=offendingfile)
|
|
return # skip this survex file and all things *included in it
|
|
|
|
|
|
for svxline in svxlines:
|
|
self.lineno += 1
|
|
thissvxline += 1
|
|
# detect a merge failure inserted by version control
|
|
mfail = self.rx_badmerge.match(svxline)
|
|
if mfail:
|
|
message = f"\n!! - ERROR version control merge failure\n - '{svxline}'\n"
|
|
message = message + f" - in '{path}' at line {thissvxline}\n"
|
|
message = message + f" - line {self.lineno} {survexblock}\n - Parsing aborted. NERD++ needed to fix it"
|
|
print(message)
|
|
print(message,file=sys.stderr)
|
|
DataIssue.objects.create(parser='survex', message=message)
|
|
return # skip this survex file and all things *included in it
|
|
|
|
includestmt =self.rx_include.match(svxline)
|
|
if not includestmt:
|
|
fcollate.write("{}\n".format(svxline.strip()))
|
|
|
|
sline, comment = self.rx_comment.match(svxline.strip()).groups()
|
|
star = self.rx_star.match(sline)
|
|
if star: # yes we are reading a *cmd
|
|
cmd, args = star.groups()
|
|
cmd = cmd.lower()
|
|
if re.match("(?i)include$", cmd):
|
|
includepath = os.path.normpath(os.path.join(os.path.split(path)[0], re.sub(r"\.svx$", "", args)))
|
|
|
|
fullpath = os.path.join(settings.SURVEX_DATA, includepath + ".svx")
|
|
self.RunSurvexIfNeeded(os.path.join(settings.SURVEX_DATA, includepath))
|
|
self.checkUniqueness(os.path.join(settings.SURVEX_DATA, includepath))
|
|
if os.path.isfile(fullpath):
|
|
#--------------------------------------------------------
|
|
self.depthinclude += 1
|
|
fininclude = open(fullpath,'r')
|
|
fcollate.write(";*include {}\n".format(includepath))
|
|
flinear.write("{:2} {} *include {}\n".format(self.depthinclude, indent, includepath))
|
|
push = includepath.lower()
|
|
self.includestack.append(push)
|
|
#-----------------
|
|
self.PushdownStackScan(survexblock, includepath, fininclude, flinear, fcollate)
|
|
#-----------------
|
|
pop = self.includestack.pop()
|
|
if pop != push:
|
|
message = "!! ERROR mismatch *include pop!=push {}".format(pop, push, self.includestack)
|
|
print(message)
|
|
print(message,file=flinear)
|
|
print(message,file=sys.stderr)
|
|
DataIssue.objects.create(parser='survex', message=message)
|
|
flinear.write("{:2} {} *edulcni {}\n".format(self.depthinclude, indent, pop))
|
|
fcollate.write(";*edulcni {}\n".format(pop))
|
|
fininclude.close()
|
|
self.depthinclude -= 1
|
|
#--------------------------------------------------------
|
|
else:
|
|
message = " ! ERROR *include file not found for:'{}'".format(includepath)
|
|
print(message)
|
|
print(message,file=sys.stderr)
|
|
DataIssue.objects.create(parser='survex', message=message)
|
|
elif re.match("(?i)begin$", cmd):
|
|
self.depthbegin += 1
|
|
depth = " " * self.depthbegin
|
|
if args:
|
|
pushargs = args
|
|
else:
|
|
pushargs = " "
|
|
self.stackbegin.append(pushargs.lower())
|
|
flinear.write(" {:2} {} *begin {}\n".format(self.depthbegin, depth, args))
|
|
pass
|
|
elif re.match("(?i)end$", cmd):
|
|
depth = " " * self.depthbegin
|
|
flinear.write(" {:2} {} *end {}\n".format(self.depthbegin, depth, args))
|
|
if not args:
|
|
args = " "
|
|
popargs = self.stackbegin.pop()
|
|
if popargs != args.lower():
|
|
message = "!! ERROR mismatch in BEGIN/END labels pop!=push '{}'!='{}'\n{}".format(popargs, args, self. stackbegin)
|
|
print(message)
|
|
print(message,file=flinear)
|
|
print(message,file=sys.stderr)
|
|
DataIssue.objects.create(parser='survex', message=message)
|
|
|
|
self.depthbegin -= 1
|
|
pass
|
|
elif re.match("(?i)title$", cmd):
|
|
depth = " " * self.depthbegin
|
|
flinear.write(" {:2} {} *title {}\n".format(self.depthbegin, depth, args))
|
|
pass
|
|
|
|
|
|
def checkUniqueness(self,fullpath):
|
|
fn = Path(fullpath).name
|
|
if fn not in self.uniquename:
|
|
self.uniquename[fn] = [fullpath]
|
|
else:
|
|
self.uniquename[fn].append(fullpath)
|
|
# This is not an error now that we are moving .3d files to the :loser: directory tree
|
|
# message = f" ! NON-UNIQUE survex filename, '{fn}' - '{self.uniquename[fn]}' #{len(self.uniquename[fn])}"
|
|
# print(message)
|
|
# DataIssue.objects.create(parser='survex', message=message)
|
|
message = f" NOTE: non-unique survex filename, '{fn}' - '{self.uniquename[fn]}' #{len(self.uniquename[fn])}"
|
|
print(message)
|
|
|
|
|
|
def RunSurvexIfNeeded(self,fullpath):
|
|
now = time.time()
|
|
cav_t = now - 365*24*3600
|
|
log_t = now - 365*24*3600
|
|
svx_t = now - 365*24*3600
|
|
|
|
def runcavern():
|
|
'''regenerates the .3d file from the .svx if it is older than the svx file, or older than the software,
|
|
or randomly using chaosmonkey() just to keep things ticking over.
|
|
'''
|
|
print(f" - Regenerating stale (or chaos-monkeyed) cavern .log and .3d for '{fullpath}'\n at '{logpath}'\n")
|
|
print(f"days svx old: {(svx_t - log_t)/(24*3600):.1f} cav:{(cav_t - log_t)/(24*3600):.1f} log old: { (now - log_t)/(24*3600):.1f}")
|
|
|
|
outputdir = Path(str(f'{fullpath}.svx')).parent
|
|
sp = subprocess.run([settings.CAVERN, "--log", f'--output={outputdir}', f'{fullpath}.svx'],
|
|
capture_output=True, check=False, text=True)
|
|
if sp.returncode != 0:
|
|
message = f' ! Error running {settings.CAVERN}: {fullpath}'
|
|
url = f'/survexfile{fullpath}.svx'.replace(settings.SURVEX_DATA, "")
|
|
DataIssue.objects.create(parser='xEntrances', message=message, url=url)
|
|
print(message)
|
|
print(f'stderr:\n\n' + str(sp.stderr) + '\n\n' + str(sp.stdout) + '\n\nreturn code: ' + str(sp.returncode))
|
|
self.caverncount += 1
|
|
|
|
# should also collect all the .err files too and create a DataIssue for each one which
|
|
# - is nonzero in size AND
|
|
# - has Error greater than 5% anywhere, or some other more serious error
|
|
|
|
errpath = Path(fullpath + ".err")
|
|
if errpath.is_file():
|
|
if errpath.stat().st_size == 0:
|
|
errpath.unlink() # delete empty closure error file
|
|
|
|
|
|
svxpath = Path(fullpath + ".svx")
|
|
logpath = Path(fullpath + ".log")
|
|
outputdir = Path(svxpath).parent
|
|
|
|
if not logpath.is_file(): # always run if logfile not there
|
|
runcavern()
|
|
return
|
|
|
|
self.caverndate = now - 2*365*24*3600
|
|
|
|
if not self.caverndate:
|
|
sp = subprocess.run(["which", "{}".format(settings.CAVERN)],
|
|
capture_output=True, check=False, text=True)
|
|
if sp.returncode != 0:
|
|
message = f' ! Error running "which" on {settings.CAVERN}'
|
|
DataIssue.objects.create(parser='entrances', message=message)
|
|
print(message)
|
|
print(f'stderr:\n\n' + str(sp.stderr) + '\n\n' + str(sp.stdout) + '\n\nreturn code: ' + str(sp.returncode))
|
|
self.caverndate = os.path.getmtime(sp.stdout.strip())
|
|
else:
|
|
self.caverndate = now - 2*365*24*3600
|
|
cav_t = self.caverndate
|
|
log_t = os.path.getmtime(logpath)
|
|
svx_t = os.path.getmtime(svxpath)
|
|
now = time.time()
|
|
|
|
if svx_t - log_t > 0: # stale, svx file is newer than log
|
|
runcavern()
|
|
return
|
|
if now - log_t > 60 *24*60*60: # >60 days, re-run anyway
|
|
runcavern()
|
|
return
|
|
if cav_t - log_t > 0: # new version of cavern
|
|
runcavern()
|
|
return
|
|
if chaosmonkey(350): # one in every 350 runs
|
|
runcavern()
|
|
|
|
def FindAndLoadSurvex(survexblockroot):
|
|
"""Follows the *include links successively to find files in the whole include tree
|
|
"""
|
|
print(' - redirecting stdout to svxblks.log...')
|
|
stdout_orig = sys.stdout
|
|
# Redirect sys.stdout to the file
|
|
sys.stdout = open('svxblks.log', 'w')
|
|
|
|
print(' - Scanning Survex Blocks tree from {}.svx ...'.format(settings.SURVEX_TOPNAME),file=sys.stderr)
|
|
survexfileroot = survexblockroot.survexfile # i.e. SURVEX_TOPNAME only
|
|
collatefilename = "_" + survexfileroot.path + ".svx"
|
|
|
|
svx_scan = LoadingSurvex()
|
|
svx_scan.callcount = 0
|
|
svx_scan.depthinclude = 0
|
|
fullpathtotop = os.path.join(survexfileroot.survexdirectory.path, survexfileroot.path)
|
|
|
|
# Rather than do this check for the presence of the .log and .3d files synchronously here,
|
|
# we should instead run this in a separate thread asynchronously.
|
|
print(" - RunSurvexIfNeeded cavern on '{}'".format(fullpathtotop), file=sys.stderr)
|
|
svx_scan.RunSurvexIfNeeded(fullpathtotop)
|
|
svx_scan.checkUniqueness(fullpathtotop)
|
|
|
|
indent=""
|
|
fcollate = open(collatefilename, 'w')
|
|
|
|
mem0 = get_process_memory()
|
|
print(" - MEM:{:7.2f} MB START".format(mem0),file=sys.stderr)
|
|
flinear = open('svxlinear.log', 'w')
|
|
flinear.write(" - MEM:{:7.2f} MB START {}\n".format(mem0,survexfileroot.path))
|
|
print(" ", file=sys.stderr,end='')
|
|
|
|
finroot = survexfileroot.OpenFile()
|
|
fcollate.write(";*include {}\n".format(survexfileroot.path))
|
|
flinear.write("{:2} {} *include {}\n".format(svx_scan.depthinclude, indent, survexfileroot.path))
|
|
|
|
import cProfile, pstats
|
|
from pstats import SortKey
|
|
pr = cProfile.Profile()
|
|
pr.enable()
|
|
#----------------------------------------------------------------
|
|
svx_scan.PushdownStackScan(survexblockroot, survexfileroot.path, finroot, flinear, fcollate)
|
|
#----------------------------------------------------------------
|
|
pr.disable()
|
|
with open('PushdownStackScan.prof', 'w') as f:
|
|
ps = pstats.Stats(pr, stream=f)
|
|
ps.sort_stats(SortKey.CUMULATIVE)
|
|
ps.print_stats()
|
|
|
|
flinear.write("{:2} {} *edulcni {}\n".format(svx_scan.depthinclude, indent, survexfileroot.path))
|
|
fcollate.write(";*edulcni {}\n".format(survexfileroot.path))
|
|
mem1 = get_process_memory()
|
|
flinear.write("\n - MEM:{:.2f} MB STOP {}\n".format(mem1,survexfileroot.path))
|
|
flinear.write(" - MEM:{:.3f} MB USED\n".format(mem1-mem0))
|
|
svxfileslist = svx_scan.svxfileslist
|
|
flinear.write(" - {:,} survex files in linear include list \n".format(len(svxfileslist)))
|
|
flinear.close()
|
|
fcollate.close()
|
|
print("\n - {:,} runs of survex 'cavern' refreshing .3d files \n".format(svx_scan.caverncount),file=sys.stderr)
|
|
svx_scan = None # Hmm. Does this actually delete all the instance variables if they are lists, dicts etc.?
|
|
print("\n - {:,} survex files in linear include list \n".format(len(svxfileslist)),file=sys.stderr)
|
|
|
|
|
|
mem1 = get_process_memory()
|
|
print(" - MEM:{:7.2f} MB END ".format(mem0),file=sys.stderr)
|
|
print(" - MEM:{:7.3f} MB USED".format(mem1-mem0),file=sys.stderr)
|
|
svxfileslist = [] # free memory
|
|
|
|
# Before doing this, it would be good to identify the *equate and *entrance we need that are relevant to the
|
|
# entrance locations currently loaded after this by LoadPos(), but could better be done before ?
|
|
# look in MapLocations() for how we find the entrances
|
|
|
|
print('\n - Loading All Survex Blocks (LinearLoad)',file=sys.stderr)
|
|
svx_load = LoadingSurvex()
|
|
|
|
svx_load.survexdict[survexfileroot.survexdirectory] = []
|
|
svx_load.survexdict[survexfileroot.survexdirectory].append(survexfileroot)
|
|
svx_load.svxdirs[""] = survexfileroot.survexdirectory
|
|
|
|
# This next should be rewritten to use a generator so that only one
|
|
# line is held in memory at a time:
|
|
with open(collatefilename, "r") as fcollate:
|
|
svxlines = fcollate.read().splitlines()
|
|
#pr2 = cProfile.Profile()
|
|
#pr2.enable()
|
|
print(" ", file=sys.stderr,end='')
|
|
#----------------------------------------------------------------
|
|
svx_load.LinearLoad(survexblockroot,survexfileroot.path, svxlines)
|
|
#----------------------------------------------------------------
|
|
#pr2.disable()
|
|
# with open('LinearLoad.prof', 'w') as f:
|
|
# ps = pstats.Stats(pr2, stream=f)
|
|
# ps.sort_stats(SortKey.CUMULATIVE)
|
|
# ps.print_stats()
|
|
|
|
print("\n - MEM:{:7.2f} MB STOP".format(mem1),file=sys.stderr)
|
|
print(" - MEM:{:7.3f} MB USED".format(mem1-mem0),file=sys.stderr)
|
|
|
|
# Close the logging file, Restore sys.stdout to our old saved file handle
|
|
sys.stdout.close()
|
|
print("+", file=sys.stderr)
|
|
sys.stderr.flush();
|
|
sys.stdout = stdout_orig
|
|
|
|
legsnumber = svx_load.legsnumber
|
|
mem1 = get_process_memory()
|
|
|
|
print(" - Number of SurvexDirectories: {}".format(len(svx_load.survexdict)))
|
|
tf=0
|
|
for d in svx_load.survexdict:
|
|
tf += len(svx_load.survexdict[d])
|
|
print(" - Number of SurvexFiles: {}".format(tf))
|
|
print(f" - Number of Survex legs: {legsnumber}")
|
|
svx_load = None
|
|
|
|
return legsnumber
|
|
|
|
def MakeSurvexFileRoot():
|
|
"""Returns a file_object.path = SURVEX_TOPNAME associated with directory_object.path = SURVEX_DATA
|
|
"""
|
|
fileroot = SurvexFile(path=settings.SURVEX_TOPNAME, cave=None)
|
|
fileroot.save()
|
|
directoryroot = SurvexDirectory(path=settings.SURVEX_DATA, cave=None, primarysurvexfile=fileroot)
|
|
directoryroot.save()
|
|
fileroot.survexdirectory = directoryroot # i.e. SURVEX_DATA/SURVEX_TOPNAME
|
|
fileroot.save() # mutually dependent objects need a double-save like this
|
|
return fileroot
|
|
|
|
def LoadSurvexBlocks():
|
|
|
|
print(' - Flushing All Survex Blocks...')
|
|
SurvexBlock.objects.all().delete()
|
|
SurvexFile.objects.all().delete()
|
|
SurvexDirectory.objects.all().delete()
|
|
SurvexPersonRole.objects.all().delete()
|
|
SurvexStation.objects.all().delete()
|
|
print(" - survex Data Issues flushed")
|
|
DataIssue.objects.filter(parser='survex').delete()
|
|
DataIssue.objects.filter(parser='survexleg').delete()
|
|
DataIssue.objects.filter(parser='survexunits').delete()
|
|
DataIssue.objects.filter(parser='entrances').delete()
|
|
DataIssue.objects.filter(parser='xEntrances').delete()
|
|
|
|
survexfileroot = MakeSurvexFileRoot()
|
|
# this next makes a block_object assciated with a file_object.path = SURVEX_TOPNAME
|
|
survexblockroot = SurvexBlock(name=ROOTBLOCK, survexpath="", cave=None, survexfile=survexfileroot,
|
|
legsall=0, legslength=0.0)
|
|
survexblockroot.save()
|
|
|
|
print(' - Loading Survex Blocks...')
|
|
memstart = get_process_memory()
|
|
#----------------------------------------------------------------
|
|
FindAndLoadSurvex(survexblockroot)
|
|
#----------------------------------------------------------------
|
|
memend = get_process_memory()
|
|
print(" - MEMORY start:{:.3f} MB end:{:.3f} MB increase={:.3f} MB".format(memstart,memend, memend-memstart))
|
|
|
|
survexblockroot.save()
|
|
|
|
print(' - Loaded All Survex Blocks.')
|
|
|
|
poslineregex = re.compile(r"^\(\s*([+-]?\d*\.\d*),\s*([+-]?\d*\.\d*),\s*([+-]?\d*\.\d*)\s*\)\s*([^\s]+)$")
|
|
|
|
def LoadPositions():
|
|
"""First load the survex stations for entrances and fixed points (about 600) into the database.
|
|
Run cavern to produce a complete .3d file, then run 3dtopos to produce a table of
|
|
all survey point positions. Then lookup each position by name to see if we have it in the database
|
|
and if we do, then save the x/y/z coordinates. This gives us coordinates of the entrances.
|
|
If we don't have it in the database, print an error message and discard it.
|
|
"""
|
|
svx_t = 0
|
|
d3d_t = 0
|
|
def runcavern3d():
|
|
outputdir = Path(str(f'{topdata}.svx')).parent
|
|
|
|
# print(" - Regenerating stale cavern .log and .3d for '{}'\n days old: {:.1f} {:.1f} {:.1f}".
|
|
# format(topdata, (svx_t - d3d_t)/(24*3600), (cav_t - d3d_t)/(24*3600), (now - d3d_t)/(24*3600)))
|
|
|
|
file3d = Path(f'{topdata}.3d')
|
|
try:
|
|
sp = subprocess.run([settings.CAVERN, "--log", f"--output={outputdir}", f"{topdata}.svx"],
|
|
capture_output=True, check=False, text=True) #check=False means exception not raised
|
|
if sp.returncode != 0:
|
|
message = f' ! Error: cavern: creating {file3d} in runcavern3()'
|
|
DataIssue.objects.create(parser='survex', message=message)
|
|
print(message)
|
|
|
|
# find the errors in the 1623.log file
|
|
sp = subprocess.run(["grep", "error:", f"{topdata}.log"],
|
|
capture_output=True, check=False, text=True) #check=False means exception not raised
|
|
message = f' ! Error: cavern: {sp.stdout}'
|
|
DataIssue.objects.create(parser='survex', message=message)
|
|
print(message)
|
|
|
|
except:
|
|
message = " ! CalledProcessError 'cavern' in runcavern3() at {topdata}."
|
|
DataIssue.objects.create(parser='survex', message=message)
|
|
print(message)
|
|
|
|
if file3d.is_file():
|
|
message = " ! CalledProcessError. File permissions {file3d.stat().st_mode} on {str(file3d)}"
|
|
DataIssue.objects.create(parser='survex', message=message)
|
|
print(message)
|
|
|
|
if file3d.is_file(): # might be an old one though
|
|
try:
|
|
# print(" - Regenerating {} {}.3d in {}".format(settings.SURVEXPORT, topdata, settings.SURVEX_DATA))
|
|
sp = subprocess.run([settings.SURVEXPORT, '--pos', f'{file3d}'], cwd = settings.SURVEX_DATA,
|
|
capture_output=True, check=False, text=True)
|
|
if sp.returncode != 0:
|
|
print(f' ! Error: survexport creating {topdata}.pos in runcavern3().\n\n' + str(sp.stdout) + '\n\nreturn code: ' + str(sp.returncode))
|
|
except:
|
|
message = " ! CalledProcessError 'survexport' in runcavern3() at {file3d}."
|
|
DataIssue.objects.create(parser='entrances', message=message)
|
|
print(message)
|
|
else:
|
|
message = f" ! Failed to find {file3d} so aborting generation of new .pos, using old one if present"
|
|
DataIssue.objects.create(parser='entrances', message=message)
|
|
print(message)
|
|
|
|
topdata = os.fspath(Path(settings.SURVEX_DATA) / settings.SURVEX_TOPNAME)
|
|
print(' - Generating a list of Pos from %s.svx and then loading...' % (topdata))
|
|
|
|
found = 0
|
|
skip = {}
|
|
print("\n") # extra line because cavern overwrites the text buffer somehow
|
|
# cavern defaults to using same cwd as supplied input file
|
|
|
|
completed_process = subprocess.run(["which", "{}".format(settings.CAVERN)],
|
|
capture_output=True, check=True, text=True)
|
|
cav_t = os.path.getmtime(completed_process.stdout.strip())
|
|
|
|
svxpath = topdata + ".svx"
|
|
d3dpath = topdata + ".3d"
|
|
pospath = topdata + ".pos"
|
|
|
|
svx_t = os.path.getmtime(svxpath)
|
|
|
|
if os.path.isfile(d3dpath):
|
|
# always fails to find log file if a double directory, e.g. caves-1623/B4/B4/B4.svx Why ?
|
|
d3d_t = os.path.getmtime(d3dpath)
|
|
|
|
now = time.time()
|
|
if not os.path.isfile(pospath):
|
|
runcavern3d()
|
|
if not os.path.isfile(d3dpath):
|
|
runcavern3d()
|
|
elif d3d_t - svx_t > 0: # stale, 3d older than svx file
|
|
runcavern3d()
|
|
elif now - d3d_t> 60 *24*60*60: # >60 days old, re-run anyway
|
|
runcavern3d()
|
|
elif cav_t - d3d_t > 0: # new version of cavern
|
|
runcavern3d()
|
|
|
|
mappoints = {}
|
|
for pt in MapLocations().points():
|
|
svxid, number, point_type, label = pt
|
|
mappoints[svxid]=True
|
|
|
|
if not Path(pospath).is_file():
|
|
message = f" ! Failed to find {pospath} so aborting generation of entrance locations. "
|
|
DataIssue.objects.create(parser='entrances', message=message)
|
|
print(message)
|
|
return
|
|
|
|
posfile = open(pospath)
|
|
posfile.readline() #Drop header
|
|
try:
|
|
survexblockroot = SurvexBlock.objects.get(name=ROOTBLOCK)
|
|
except:
|
|
try:
|
|
survexblockroot = SurvexBlock.objects.get(id=1)
|
|
except:
|
|
message = ' ! FAILED to find root SurvexBlock'
|
|
print(message)
|
|
DataIssue.objects.create(parser='survex', message=message)
|
|
raise
|
|
for line in posfile.readlines():
|
|
r = poslineregex.match(line)
|
|
if r:
|
|
x, y, z, id = r.groups()
|
|
for sid in mappoints:
|
|
if id.endswith(sid):
|
|
blockpath = "." + id[:-len(sid)].strip(".")
|
|
# But why are we doing this? Why do we need the survexblock id for each of these ?
|
|
# ..because mostly they don't actually appear in any SVX file. We should match them up
|
|
# via the cave data, not by this half-arsed syntactic match which almost never works. PMS.
|
|
if False:
|
|
try:
|
|
sbqs = SurvexBlock.objects.filter(survexpath=blockpath)
|
|
if len(sbqs)==1:
|
|
sb = sbqs[0]
|
|
if len(sbqs)>1:
|
|
message = " ! MULTIPLE SurvexBlocks {:3} matching Entrance point {} {} '{}'".format(len(sbqs), blockpath, sid, id)
|
|
print(message)
|
|
DataIssue.objects.create(parser='survex', message=message)
|
|
sb = sbqs[0]
|
|
elif len(sbqs)<=0:
|
|
message = " ! ZERO SurvexBlocks matching Entrance point {} {} '{}'".format(blockpath, sid, id)
|
|
print(message)
|
|
DataIssue.objects.create(parser='survex', message=message)
|
|
sb = survexblockroot
|
|
except:
|
|
message = ' ! FAIL in getting SurvexBlock matching Entrance point {} {}'.format(blockpath, sid)
|
|
print(message)
|
|
DataIssue.objects.create(parser='survex', message=message)
|
|
try:
|
|
ss = SurvexStation(name=id, block=survexblockroot)
|
|
ss.x = float(x)
|
|
ss.y = float(y)
|
|
ss.z = float(z)
|
|
ss.save()
|
|
found += 1
|
|
except:
|
|
message = ' ! FAIL to create SurvexStation Entrance point {} {}'.format(blockpath, sid)
|
|
print(message)
|
|
DataIssue.objects.create(parser='survex', message=message)
|
|
raise
|
|
print(" - {} SurvexStation entrances found.".format(found))
|
|
|