mirror of
https://expo.survex.com/repositories/troggle/.git
synced 2024-12-21 07:52:17 +00:00
2841 lines
129 KiB
Python
2841 lines
129 KiB
Python
import copy
|
|
import os
|
|
import re
|
|
import subprocess
|
|
import sys
|
|
import time
|
|
from datetime import date, datetime, timezone
|
|
from pathlib import Path
|
|
|
|
import troggle.settings as settings
|
|
from troggle.core.models.caves import Cave, Entrance, GetCaveLookup
|
|
from troggle.core.models.logbooks import QM
|
|
from troggle.core.models.survex import SurvexBlock, SurvexFile, SurvexPersonRole, SurvexStation
|
|
from troggle.core.models.wallets import Wallet
|
|
from troggle.core.models.troggle import DataIssue, Expedition
|
|
from troggle.core.utils import chaosmonkey, get_process_memory
|
|
from troggle.core.utils import write_and_commit
|
|
|
|
from troggle.parsers.caves import create_new_cave, do_ARGE_cave, AREACODES, ARGEAREAS
|
|
from troggle.parsers.people import GetPersonExpeditionNameLookup, known_foreigner
|
|
|
|
"""Imports the tree of survex files following from a defined root .svx file
|
|
It also scans the Loser repo for all the svx files, which it loads individually afterwards.
|
|
"""
|
|
|
|
todo = """
|
|
- Obscure bug in the *team inheritance and rootblock initialization needs tracking down,
|
|
probably in the team cache which should NOT be global, but should be an instance variable of
|
|
LoadingSurvex
|
|
|
|
- Lots to do to cut down on unnecessary .save() calls to avoid hitting the db so much. Should
|
|
speed it up noticably.
|
|
|
|
- Learn to use Django .select_related() and .prefetch_related() to speed things up
|
|
https://zerotobyte.com/how-to-use-django-select-related-and-prefetch-related/
|
|
|
|
- LoadSurvexFile() Creates a new current survexfile
|
|
The survexblock passed-in is not necessarily the survex parent. FIX THIS.
|
|
|
|
- When Olly implements LEG in the 'dump3d --legs' utility, then we can use that to get the length of
|
|
all the legs in a survex block instead of adding them up oursleves. Which means that we can
|
|
ignore all the Units and offset stuff, that troggle will work with survex files with backsights,
|
|
repeated readings from distox etc.. Not actually useful for pre 2022 survey data,
|
|
but good future-proofing.
|
|
Also it will be a tiny bit more accurate as these leg lengths are after loop closure fixup.
|
|
"""
|
|
survexblockroot = None
|
|
ROOTBLOCK = "rootblock"
|
|
METRESINFEET = 3.28084
|
|
UNSEENS = "_unseens.svx"
|
|
|
|
IGNOREFILES = ["dummy_file"]
|
|
IGNOREPREFIX = ["surface", "kataster", "gpx", "deprecated"] #"fixedpts",
|
|
EXCEPTPREFIX = ["surface/terrain", "kataster/kataster-boundaries", "gpx/gpx_publish", "template", "docs", "deprecated", "subsections", "1623-and-1626-no-schoenberg-hs", "1623-and-1624-and-1626-and-1627", "1623-and-1626", "1623.svx", "1626.svx", "smk-system.svx"]
|
|
# ignorenoncave = [
|
|
# "caves-1623",
|
|
# "caves-1623/2007-NEU",
|
|
# "caves-1626",
|
|
# "caves-1624",
|
|
# "caves-1627",
|
|
# "fixedpts/gps/gps00raw",
|
|
# "",
|
|
# ]
|
|
|
|
stop_dup_warning = False
|
|
dup_includes = 0
|
|
debugprint = False # Turns on debug printout for just one *include file
|
|
debugprinttrigger = "!"
|
|
|
|
dataissues = []
|
|
|
|
class SurvexLeg:
|
|
"""No longer a models.Model subclass, so no longer a database table"""
|
|
|
|
tape = 0.0
|
|
compass = 0.0
|
|
clino = 0.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def datewallet(w, earliest):
|
|
"""Gets the date of the youngest survexblock associated with the wallet
|
|
REFACTOR this to do the whole date-getting task
|
|
|
|
Currently there is only one SurvexBlock, but this is in anticipation of
|
|
chnaging the schema to allow many.
|
|
"""
|
|
first = earliest
|
|
blocks = SurvexBlock.objects.filter(scanswallet=w) # only ONE I think ?!
|
|
for b in blocks:
|
|
if b.date:
|
|
if b.date < first:
|
|
first = b.date
|
|
if first == earliest:
|
|
# no date found
|
|
w.date = None
|
|
else:
|
|
w.date = first.isoformat()
|
|
return w.date
|
|
|
|
def set_walletdate(w):
|
|
earliest = datetime.now().date()
|
|
if not w.date(): # sets .walletdate as a side-effect if it gets it from JSON
|
|
d = datewallet(w, earliest) # Not in JSON, so checks all the survex blocks
|
|
w.walletdate = d
|
|
w.save()
|
|
|
|
def stash_data_issue(parser=None, message=None, url=None, sb=None):
|
|
"""Avoid hitting the database for error messages until the end of the import"""
|
|
global dataissues
|
|
dataissues.append((parser, message, url, sb))
|
|
|
|
def store_data_issues():
|
|
"""Take the stash and store it permanently in the database instead
|
|
|
|
use BULK creation here !"""
|
|
global dataissues
|
|
print(f" - Storing {len(dataissues)} Data Issues into database")
|
|
|
|
# make a list of objects, but don't commit to database yet
|
|
di_list = []
|
|
for issue in dataissues:
|
|
parser, message, url, sb = issue
|
|
if url is None:
|
|
if sb is not None:
|
|
url = get_offending_filename(sb)
|
|
di_list.append(DataIssue(parser=parser, message=message, url=url))
|
|
# Now commit to db
|
|
DataIssue.objects.bulk_create(di_list)
|
|
dataissues = [] # in database now, so empty cache
|
|
|
|
def get_offending_filename(path):
|
|
"""Used to provide the URL for a line in the DataErrors page
|
|
which reports problems on importing data into troggle
|
|
"""
|
|
return "/survexfile/" + path + ".svx"
|
|
|
|
|
|
trip_people_cache = {} # indexed by survexblock, so never needs cleaning out
|
|
def get_team_on_trip(survexblock):
|
|
"""Uses a cache to avoid a database query if it doesn't need to.
|
|
Only used for complete team."""
|
|
global trip_people_cache
|
|
|
|
if survexblock in trip_people_cache:
|
|
if len(trip_people_cache[survexblock]) > 0:
|
|
return trip_people_cache[survexblock]
|
|
|
|
qpeople = SurvexPersonRole.objects.filter(survexblock=survexblock) # not very good Django style
|
|
trip_people_cache[survexblock] = qpeople # this is a query list
|
|
return qpeople
|
|
|
|
def get_people_on_trip(survexblock):
|
|
"""Gets the displayable names of the people on a survexbock trip.
|
|
Only used for complete team.
|
|
Seems to be only used for error messages."""
|
|
qpeople = get_team_on_trip(survexblock) # qpeople is a Query List
|
|
|
|
people = []
|
|
for p in qpeople:
|
|
people.append(f"{p.personname}")
|
|
|
|
return list(set(people))
|
|
|
|
# THIS SHOULD NOT BE GLOBAL ! SHould be per instance of file loader
|
|
trip_person_record = {} # indexed by (survexblock, personexpedition) - so never needs cleaning out
|
|
trip_team_cache = {} # indexed by survexblock, so never needs cleaning out
|
|
def put_person_on_trip(survexblock, personexpedition, tm):
|
|
"""Uses a cache to avoid a database query if it doesn't need to.
|
|
Only used for a single person"""
|
|
global trip_person_record
|
|
global trip_team_cache
|
|
|
|
if (survexblock, personexpedition) in trip_person_record:
|
|
return True
|
|
|
|
try:
|
|
personrole = SurvexPersonRole( # does not commit to db yet
|
|
survexblock=survexblock,
|
|
person = personexpedition.person,
|
|
personexpedition=personexpedition,
|
|
personname=tm
|
|
)
|
|
except:
|
|
message = f"! *team '{tm}' FAIL, already created {survexblock.survexfile.path} ({survexblock}) "
|
|
print(self.insp + message)
|
|
stash_data_issue(
|
|
parser="survex", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
|
|
if survexblock not in trip_team_cache:
|
|
trip_team_cache[survexblock] = []
|
|
trip_team_cache[survexblock].append(personrole)
|
|
|
|
trip_person_record[(survexblock, personexpedition)] = 1
|
|
return False
|
|
|
|
def confirm_team_on_trip(survexblock):
|
|
global trip_team_cache
|
|
|
|
if survexblock not in trip_team_cache:
|
|
return
|
|
# Now commit to db
|
|
SurvexPersonRole.objects.bulk_create(trip_team_cache[survexblock])
|
|
trip_team_cache[survexblock] = [] # in database now, so empty cache
|
|
|
|
def check_team_cache(label=None):
|
|
global trip_team_cache
|
|
message = f"! check_team_cache() called.. "
|
|
print(message)
|
|
print(message, file=sys.stderr)
|
|
for block in trip_team_cache:
|
|
message = f"! *team CACHEFAIL, trip_team_cache {block.survexfile.path} ({block}). label:{label}"
|
|
print(message)
|
|
print(message, file=sys.stderr)
|
|
|
|
person_pending_cache = {} # indexed per survexblock, so robust wrt PUSH/POP begin/end
|
|
def add_to_pending(survexblock, tm):
|
|
"""Collects team names before we have a date so cannot validate against
|
|
expo attendance yet"""
|
|
global person_pending_cache
|
|
|
|
if survexblock not in person_pending_cache:
|
|
person_pending_cache[survexblock] = set()
|
|
|
|
person_pending_cache[survexblock].add(tm)
|
|
|
|
def get_team_pending(survexblock):
|
|
"""A set of *team names before we get to the *date line in a survexblock
|
|
"""
|
|
global person_pending_cache
|
|
|
|
if survexblock in person_pending_cache:
|
|
teamnames = person_pending_cache[survexblock] # a set of names
|
|
person_pending_cache[survexblock] = ()
|
|
return teamnames
|
|
return
|
|
|
|
class LoadingSurvex:
|
|
"""A 'survex block' is a *begin...*end set of cave data.
|
|
A survex file can contain many begin-end blocks, which can be nested, and which can *include
|
|
other survex files.
|
|
A 'scanswallet' is what we today call a "survey scans folder" or a "wallet".
|
|
"""
|
|
|
|
# python regex flags (?i) means case-insentitive, (?s) means . matches newline too
|
|
# see https://docs.python.org/3/library/re.html
|
|
rx_begin = re.compile(r"(?i)begin")
|
|
rx_begin2 = re.compile("(?i)begin$")
|
|
|
|
rx_end = re.compile(r"(?i)end$")
|
|
rx_end2 = re.compile("(?i)end$")
|
|
rx_title = re.compile(r"(?i)title$")
|
|
rx_title2 = re.compile("(?i)title$")
|
|
rx_fix = re.compile(r"(?i)fix$")
|
|
rx_ref = re.compile(r"(?i)ref$")
|
|
rx_data = re.compile(r"(?i)data$")
|
|
rx_flags = re.compile(r"(?i)flags$")
|
|
rx_alias = re.compile(r"(?i)alias$")
|
|
rx_entrance = re.compile(r"(?i)entrance$")
|
|
rx_date = re.compile(r"(?i)date$")
|
|
rx_units = re.compile(r"(?i)units$")
|
|
rx_team = re.compile(r"(?i)team$")
|
|
rx_set = re.compile(r"(?i)set$")
|
|
|
|
#rx_names = re.compile(r"(?i)names")
|
|
rx_flagsnot = re.compile(r"not\s")
|
|
rx_linelen = re.compile(r"[\d\-+.]+$")
|
|
instruments = "(bitch|bodger|bolt|bolter|bolting|book|clino|comp|compass|consultant|disto|distox|distox2|dog|dogsbody|drawing|drill|gps|helper|inst|instr|instrument|monkey|nagging|nail|nail_polish|nail_polish_bitch|nail_polish_monkey|nail_varnish|nail_varnish_bitch|note|paint|photo|pic|point|polish|powerdrill|rig|rigger|rigging|shoot|sketch|slacker|something|surface|tape|topodroid|unknown|useless|varnish|waiting_patiently)"
|
|
rx_teammem = re.compile(r"(?i)" + instruments + r"?(?:es|s)?\s+(.*)$")
|
|
rx_teamold = re.compile(r"(?i)(.*)\s+" + instruments + r"?(?:es|s)?$")
|
|
rx_teamabs = re.compile(r"(?i)^\s*(" + instruments + r")?(?:es|s)?\s*$")
|
|
rx_person = re.compile(r"(?i) and |/| / |, | , |&| & | \+ |^both$|^none$")
|
|
rx_qm = re.compile(
|
|
# r"(?i)^\s*QM(\d+)\s+(.+)\s+([\w\-\_]+)\.([\w\.\-]+)\s+(([\w\-]+)\.([\w\.\-]+)|\-)\s+(.+)$"
|
|
r"(?i)^\s*QM(\d+)\s+([^\s]+)\s+([^\s]+)\s+([^\s]+)\s+(.+)$"
|
|
)
|
|
# does not recognise non numeric suffix survey point ids
|
|
rx_qm0 = re.compile(r"(?i)^\s*QM(\d+)\s+(.+)$")
|
|
rx_qm_tick = re.compile(r"(?i)^\s*QM(\d+)\s+TICK\s([\d\-]+)\s(.*)$")
|
|
# remember there is also QM_PATTERN used in views.other and set in settings.py
|
|
rx_tapelng = re.compile(r"(?i).*(tape|length).*$")
|
|
|
|
rx_cave = re.compile(r"(?i)caves-(\d\d\d\d)/([-\d\w]+|\d\d\d\d-?\w+-\d+)")
|
|
rx_comment = re.compile(r"([^;]*?)\s*(?:;\s*(.*))?\n?$")
|
|
rx_comminc = re.compile(r"(?i)^\|\*include[\s]*([-\w/]*).*$") # inserted by linear collate ;|*include
|
|
rx_commcni = re.compile(r"(?i)^\|\*edulcni[\s]*([-\w/]*).*$") # inserted by linear collate ;|*edulcni
|
|
rx_include = re.compile(r"(?i)^\s*(\*include[\s].*)$")
|
|
rx_include2 = re.compile("(?i)include$")
|
|
rx_commref = re.compile(r"(?i)^\s*ref(?:erence)?[\s.:]*(\d+)\s*#\s*(X)?\s*(\d+)")
|
|
rx_ref_text = re.compile(r'(?i)^\s*\"[^"]*\"\s*$')
|
|
rx_star = re.compile(r"(?i)\s*\*[\s,]*(\w+)\s*(.*?)\s*(?:;.*)?$")
|
|
rx_starref = re.compile(r"(?i)^\s*\*ref[\s.:]*((?:19[6789]\d)|(?:20[0123]\d))\s*#?\s*(X)?\s*(.*?\d+.*?)$")
|
|
rx_argsref = re.compile(r"(?i)^[\s.:]*((?:19[6789]\d)|(?:20[012345]\d))\s*#?\s*(X)?\s*(.*?\d+.*?)$")
|
|
rx_badmerge = re.compile(r"(?i).*(\>\>\>\>\>)|(\=\=\=\=\=)|(\<\<\<\<\<).*$")
|
|
rx_ref2 = re.compile(r"(?i)\s*ref[.;]?")
|
|
rx_commteam = re.compile(r"(?i)\s*(Messteam|Zeichner)\s*[:]?(.*)")
|
|
rx_quotedtitle = re.compile(r'(?i)^"(.*)"$')
|
|
|
|
|
|
|
|
|
|
# This interprets the survex "*data normal" command which sets out the order of the fields in the data, e.g.
|
|
# *DATA normal from to length gradient bearing ignore ignore ignore ignore
|
|
datastardefault = {"type": "normal", "from": 0, "to": 1, "tape": 2, "compass": 3, "clino": 4}
|
|
flagsdefault = {"duplicate": False, "surface": False, "splay": False, "skiplegs": False, "splayalias": False}
|
|
|
|
datastar = {}
|
|
flagsstar = {}
|
|
fixes = {}
|
|
units = "metres"
|
|
unitsfactor = None
|
|
slength = 0.0
|
|
legsnumber = 0
|
|
depthbegin = 0
|
|
depthinclude = 0
|
|
unitsstack = []
|
|
legsnumberstack = []
|
|
slengthstack = []
|
|
teaminheritstack = []
|
|
teamcurrentstack = []
|
|
dateinheritstack = []
|
|
datecurrentstack = []
|
|
stackbegin = []
|
|
flagsstack = []
|
|
datastack = []
|
|
includestack = []
|
|
stacksvxfiles = []
|
|
svxfileslist = []
|
|
svxprim = {}
|
|
uniquefile = {} # each key is a survex path, and its value is a list of parent files
|
|
expos = {}
|
|
lineno = 0
|
|
insp = ""
|
|
callcount = 0
|
|
caverncount = 0
|
|
|
|
TREE = "tree"
|
|
ODDS = "oddments"
|
|
svxpass = TREE
|
|
includedfilename = ""
|
|
currentsurvexblock = None
|
|
currentsurvexfile = None
|
|
currentcave = None
|
|
caverndate = None
|
|
currentteam = set()
|
|
inheritteam = set()
|
|
currentdate = None
|
|
inheritdate = None
|
|
pending = []
|
|
adhocload = False
|
|
|
|
def __init__(self):
|
|
self.caveslist = GetCaveLookup()
|
|
pass
|
|
|
|
def LoadSurvexFallThrough(self, survexblock, line, cmd):
|
|
if cmd == "require":
|
|
pass # should we check survex version available for processing?
|
|
elif cmd in ["equate", "calibrate", "cs", "export", "case", "declination", "infer", "instrument", "sd"]:
|
|
pass # we ignore all these, which is fine.
|
|
else:
|
|
if cmd in ["include", "data", "flags", "title", "entrance", "set", "units", "alias", "ref"]:
|
|
message = (
|
|
f"! Warning. Unparsed [*{cmd}]: '{line}' {survexblock.survexfile.path} - not an error (probably)"
|
|
)
|
|
print(self.insp + message)
|
|
stash_data_issue(
|
|
parser="survex", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
else:
|
|
message = (
|
|
f"! Bad unrecognised svx command: [*{cmd}] {line} ({survexblock}) {survexblock.survexfile.path}"
|
|
)
|
|
print(self.insp + message)
|
|
stash_data_issue(
|
|
parser="survex", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
|
|
def get_team_inherited(self, survexblock): # survexblock only used for debug mesgs
|
|
"""See get_team_pending(survexblock) which gets called at the same time,
|
|
when we see a *date line"""
|
|
global person_pending_cache
|
|
|
|
if self.inheritteam:
|
|
message = (
|
|
f"- no *team INHERITING ({survexblock.parent})>({survexblock}) {survexblock.survexfile.path} '{self.inheritteam}'"
|
|
)
|
|
print(self.insp + message)
|
|
# stash_data_issue(
|
|
# parser="survex", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
# )
|
|
return self.inheritteam
|
|
|
|
def fix_undated(self, survexblock):
|
|
"""Called when we reach *end of a block OR when a QM is seen.
|
|
Checks to see if the block has no *date, in which case it uses the
|
|
inherited date.
|
|
This is fine if the inherited date is from the same SurvexFile,
|
|
but inheriting dates across *include files is almost certainly NOT
|
|
expected behaviour, even though it is syntactically "correct",
|
|
so triggers a Warning.
|
|
|
|
In fact, rather than give a warning, I think this is where troggle should diverge from
|
|
a strict interpretation of how survex works. So I will change this so that *date
|
|
is NOT inherited between different files.
|
|
"""
|
|
if survexblock.parent.name == "troggle_unseens":
|
|
# Bolluxed up if we try to inherit from this random junk, so don't.
|
|
return
|
|
|
|
if self.currentdate:
|
|
# already set
|
|
if not survexblock.date:
|
|
# error
|
|
message = (
|
|
f"! no survexblock.date but currentdate is set. ({survexblock})-{survexblock.survexfile.path} {self.currentdate=}"
|
|
)
|
|
print(self.insp + message)
|
|
stash_data_issue(
|
|
parser="survex", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
return
|
|
|
|
if self.inheritdate:
|
|
survexblock.date = self.inheritdate
|
|
self.currentdate = self.inheritdate # unecessary duplication
|
|
# Not an error, so not put in DataIssues, but is printed to debug output
|
|
message = (
|
|
f"- No *date. INHERITING date '{self.inheritdate:%Y-%m-%d}' from ({survexblock.parent})-{survexblock.parent.survexfile.path} to ({survexblock})-{survexblock.survexfile.path} {self.inheritdate:%Y-%m-%d}"
|
|
)
|
|
print(self.insp + message)
|
|
# stash_data_issue(
|
|
# parser="survex", message=message, url=None, sb=(survexblock.survexfile.path) # child
|
|
# )
|
|
if survexblock.survexfile != survexblock.parent.survexfile:
|
|
# This is noteworthy, however. FORBID inheriting dates between files. NOT documented survex behaviour !!
|
|
survexblock.date = None
|
|
self.currentdate = None # unecessary duplication
|
|
return None
|
|
|
|
# if survexblock.parent.name == "rootblock":
|
|
# # Not a sensible thing to inherit a date from, even if a date exists, which it shouldn't...
|
|
# message = (
|
|
# f"- No *date. But not sensible to inherit from rootblock. From ({survexblock.parent})-{survexblock.parent.survexfile.path} to ({survexblock})-{survexblock.survexfile.path} {self.inheritdate:%Y-%m-%d}"
|
|
# )
|
|
# print(self.insp + message)
|
|
# # stash_data_issue(
|
|
# # parser="survex", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
# # )
|
|
# return
|
|
# else:
|
|
# message = (
|
|
# f"- Warning *date '{self.inheritdate:%Y-%m-%d}' INHERITED from DIFFERENT file:\n ({survexblock.parent})-{survexblock.parent.survexfile.path} to ({survexblock})-{survexblock.survexfile.path} {self.inheritdate:%Y-%m-%d}\n {self.stackbegin} {self.inheritdate:%Y-%m-%d}"
|
|
# )
|
|
# print(self.insp + message)
|
|
# stash_data_issue(
|
|
# parser="survex", message=message, url=None, sb=(survexblock.survexfile.path) # not the parent
|
|
# )
|
|
# return self.inheritdate
|
|
else:
|
|
# This is not an error in the Expo dataset.
|
|
# Many files just holding *include lines do not have dates.
|
|
# Hardly _any_ of the ARGE survex files have dates !
|
|
pass
|
|
# message = f" ! No survexblock.date inheritable in '{survexblock}' in '{survexblock.survexfile.path}', setting to 1976"
|
|
# print(self.insp + message)
|
|
# stash_data_issue(
|
|
# parser="survex", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
# )
|
|
# expoyear = "1976"
|
|
return
|
|
|
|
def fix_anonymous(self, survexblock):
|
|
"""Called when we reach *end of a block
|
|
Checks to see if the block has no team attached, in which case it uses the
|
|
inherited team.
|
|
This is fine if the inherited team is from the same SurvexFile,
|
|
but inheriting team across *include files is almost certainly NOT
|
|
expected behaviour, even though it is syntactically "correct".
|
|
|
|
If the block has no date, then it is assumed to be an abstract container,
|
|
with no relevant team, and anyway we can't attach a PersonExpedition without
|
|
knowing the year. Unless its parent has an identified expo"""
|
|
|
|
if survexblock.parent.name == "troggle_unseens":
|
|
# Bolluxed up if we try to inherit from this random junk, so don't.
|
|
return
|
|
|
|
expo = survexblock.expedition # may be None if no *date yet
|
|
if not expo:
|
|
expo = survexblock.parent.expedition # immediate parent works mostly
|
|
if not expo:
|
|
return
|
|
|
|
if not self.currentteam: # i.e. if it is a dated block and has no team
|
|
if teamnames := self.get_team_inherited(survexblock):# WALRUS
|
|
for tm in teamnames:
|
|
personexpedition = GetPersonExpeditionNameLookup(expo).get(tm.lower())
|
|
if personexpedition:
|
|
put_person_on_trip(survexblock, personexpedition, tm)
|
|
return
|
|
|
|
def LoadSurvexTeam(self, survexblock, line):
|
|
"""Interpeting the *team fields means interpreting older style survex as well as current survex standard,
|
|
*team Insts Anthony Day - this is how most of our files specify the team member
|
|
*team "Anthony Day" notes pictures tape - this is how the survex documentation says it should be done
|
|
We have a huge variety of abbreviations and mispellings. The most laconic being
|
|
*team gb, bl
|
|
|
|
personrole is used to record that a person was on a survex trip, NOT the role they played.
|
|
(NB PersonLogEntry is a logbook thing, not a survex thing. )
|
|
"""
|
|
|
|
def record_team_member(tm, survexblock):
|
|
tm = tm.strip("\"'").strip()
|
|
# Refactor. The dict GetPersonExpeditionNameLookup(expo) indexes by name and has values of personexpedition
|
|
# This is convoluted
|
|
|
|
# we need the current expedition, but if there has been no date yet in the survex file, we don't know which one it is.
|
|
# so we can't validate whether the person was on expo or not.
|
|
# we will have to attach them to the survexblock anyway, and then do a
|
|
# later check on whether they are valid when we get the date.
|
|
|
|
self.currentteam.add(tm) # used in push/pop block code
|
|
expo = survexblock.expedition # may be None if no *date yet
|
|
|
|
if expo:
|
|
personexpedition = GetPersonExpeditionNameLookup(expo).get(tm.lower())
|
|
if personexpedition:
|
|
put_person_on_trip(survexblock, personexpedition, tm)
|
|
|
|
elif known_foreigner(tm): # note, not using .lower()
|
|
message = f"- *team {expo.year} '{tm}' known foreigner on *team {survexblock.survexfile.path} ({survexblock}) in '{line}'"
|
|
print(self.insp + message)
|
|
# stash_data_issue(parser='survex', message=message, url=None, sb=(survexblock.survexfile.path))
|
|
else:
|
|
# we know the date and expo, but can't find the person
|
|
message = f"! *team {expo.year} '{tm}' FAIL personexpedition lookup on *team {survexblock.survexfile.path} ({survexblock}) in '{line}'"
|
|
print(self.insp + message)
|
|
stash_data_issue(
|
|
parser="survex", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
else:
|
|
add_to_pending(survexblock, tm)
|
|
# don't know the date yet, so cannot query the table about validity.
|
|
# assume the person is valid. It will get picked up with the *date appears
|
|
# There are hundreds of these..
|
|
message = (
|
|
f"- Team before Date: {line} ({survexblock}) {survexblock.survexfile.path}"
|
|
)
|
|
# print(self.insp + message)
|
|
# stash_data_issue(
|
|
# parser="survex team", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
# )
|
|
|
|
mteammember = self.rx_teammem.match(line) # matches the role at the beginning
|
|
if not mteammember:
|
|
moldstyle = self.rx_teamold.match(line) # matches the role at the the end of the string
|
|
if moldstyle:
|
|
for tm in self.rx_person.split(moldstyle.group(1)):
|
|
if tm:
|
|
record_team_member(tm, survexblock)
|
|
# seems to be working
|
|
# msg = "! OLD tm='{}' line: '{}' ({}) {}".format(tm, line, survexblock, survexblock.survexfile.path)
|
|
# print(msg, file=sys.stderr)
|
|
else:
|
|
message = f"! *team {survexblock.survexfile.path} ({survexblock}) Weird '{mteammember.group(1)}' oldstyle line: '{line}'"
|
|
print(self.insp + message)
|
|
stash_data_issue(
|
|
parser="survex", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
else:
|
|
nullmember = self.rx_teamabs.match(line) # matches empty role line. Ignore these.
|
|
if not nullmember:
|
|
message = f"! *team {survexblock.survexfile.path} ({survexblock}) Bad line: '{line}'"
|
|
print(self.insp + message)
|
|
stash_data_issue(
|
|
parser="survex", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
else:
|
|
for tm in self.rx_person.split(mteammember.group(2)):
|
|
if tm:
|
|
record_team_member(tm, survexblock)
|
|
else:
|
|
if mteammember.group(2).lower() not in ("none", "both"):
|
|
message = f"! Weird *team '{mteammember.group(2)}' newstyle line: '{line}' ({survexblock}) {survexblock.survexfile.path}"
|
|
print(self.insp + message)
|
|
stash_data_issue(
|
|
parser="survex", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
|
|
def LoadSurvexSet(self, survexblock, line):
|
|
"""survex *set can reset the character for space, decinmal point, field separator
|
|
and lots of other stuff which would stuff this parser completely. The '*set names ...' is
|
|
innocuous, so we ignore that. All the others need swift attention."""
|
|
item, *_ = line.strip().split(" ") # unpack tuples idiom in python 3
|
|
if item.lower() == "names":
|
|
# we don't care as we treat all chars as names anyway.
|
|
# print(f"*set names - do not care: '{line}' {survexblock.survexfile.path}", file=sys.stderr)
|
|
return
|
|
else:
|
|
message = (
|
|
f"! SERIOUS Warning. Unparsed [*set]: '{line}' {survexblock.survexfile.path} "
|
|
)
|
|
print(self.insp + message)
|
|
stash_data_issue(
|
|
parser="survex", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
|
|
def LoadSurvexFix(self, survexblock, line):
|
|
"""*fix is a station geolocation, units depend on a previous *cs setting
|
|
NOTE 'line' is not the full line, it is 'arg' and the comments have been stripped !
|
|
SO we have to recognise the '*fix' too
|
|
"""
|
|
# *fix|36|reference|36359.40|82216.08|2000.00\n
|
|
rx_fixline = re.compile(r"(?i)^\s*[*]fix\s+([\w\d_\.\-]+)\s+(?:reference)?\s*([\d\.]*)\s+([\d\.]*)\s+([\d\.]*)\s*;?(.*)$")
|
|
|
|
line = line.replace("\n","")
|
|
#fixline = self.rx_fixline.match(line)
|
|
fixline = rx_fixline.match(line)
|
|
if not fixline:
|
|
display = line.replace(" ","|")
|
|
message = f'BAD fix regex {display}++{survexblock.parent}:{survexblock}@{survexblock.survexfile}'
|
|
print(self.insp + message)
|
|
stash_data_issue(parser="survex", message=message)
|
|
else:
|
|
fixdata = fixline.groups()
|
|
#print(fixline.group(1), fixline.group(5))
|
|
#print(f"'{line}'")
|
|
name = fixdata[0]
|
|
if (survexblock, name) in self.fixes:
|
|
message = f"! Duplicate *FIX: id '{line}' ({survexblock}) {survexblock.survexfile.path}"
|
|
print(self.insp + message)
|
|
stash_data_issue(parser="survex", message=message)
|
|
|
|
# do not store duplicates, they will be gpx/publish stuff
|
|
return
|
|
|
|
try:
|
|
#_, _, alt, *rest = (fixdata + [None]*5)[:5]
|
|
name, _, _, alt, comment = (list(fixdata) + [None]*5)[:5]
|
|
fixid = str(survexblock.id)+ ":"+ name
|
|
self.fixes[fixid] = (survexblock, name, alt, comment)
|
|
message = f"{name}, {fixdata=}, last:{fixline.groups()[-1]}"
|
|
except Exception as e:
|
|
print(f'BAD fix comment {e}', file=sys.stderr)
|
|
print(f'BAD fix comment {name}, {fixdata=}\n{line.replace(" ","|")}\n{survexblock.survexfile.path}:{survexblock}', file=sys.stderr)
|
|
|
|
|
|
def LoadSurvexEntrance(self, survexblock, line):
|
|
# Not using this
|
|
pass
|
|
|
|
def LoadSurvexAlias(self, survexblock, line):
|
|
# *alias station - ..
|
|
splayalias = re.match(r"(?i)\s*station\s*\-\s*\.\.\s*$", line)
|
|
if splayalias:
|
|
self.flagsstar["splayalias"] = True
|
|
print(line)
|
|
else:
|
|
message = f"! Bad *ALIAS: '{line}' ({survexblock}) {survexblock.survexfile.path}"
|
|
print(self.insp + message)
|
|
stash_data_issue(parser="survex", message=message)
|
|
|
|
def LoadSurvexUnits(self, survexblock, line):
|
|
# all for 4 survex files with measurements in feet. bugger.
|
|
# Won't need this once we move to using cavern or d3dump output for lengths
|
|
tapeunits = self.rx_tapelng.match(line) # tape|length
|
|
if not tapeunits:
|
|
return
|
|
convert = re.match(r"(?i)(\w*)\s*([\.\d]+)\s*(\w*)", line)
|
|
if convert:
|
|
factor = convert.groups()[1]
|
|
self.unitsfactor = float(factor)
|
|
if debugprint:
|
|
message = (
|
|
f"! *UNITS NUMERICAL conversion [{factor}x] '{line}' ({survexblock}) {survexblock.survexfile.path}"
|
|
)
|
|
print(self.insp + message)
|
|
stash_data_issue(parser="survexunits", message=message)
|
|
|
|
feet = re.match(r"(?i).*feet$", line)
|
|
metres = re.match(r"(?i).*(METRIC|METRES|METERS)$", line)
|
|
if feet:
|
|
self.units = "feet"
|
|
elif metres:
|
|
self.units = "metres"
|
|
else:
|
|
message = f"! *UNITS in YARDS!? - not converted '{line}' ({survexblock}) {survexblock.survexfile.path}"
|
|
print(self.insp + message)
|
|
stash_data_issue(parser="survexunits", message=message)
|
|
|
|
def get_expo_from_year(self, year, line, survexblock):
|
|
# cacheing to save DB query on every block
|
|
if year in self.expos:
|
|
expo = self.expos[year]
|
|
else:
|
|
expeditions = Expedition.objects.filter(year=year)
|
|
if len(expeditions) > 1:
|
|
message = (
|
|
f"! More than one expedition in year {year} '{line}' ({survexblock}) {survexblock.survexfile.path}"
|
|
)
|
|
print(self.insp + message)
|
|
stash_data_issue(
|
|
parser="survex", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
if expeditions:
|
|
expo = expeditions[0]
|
|
self.expos[year] = expo
|
|
else:
|
|
expo = Expedition.objects.get(year="1976")
|
|
message = f"! DATE INCORRECT. There is no expedition for the year {year}. {survexblock.survexfile.path} ({survexblock}) - set to 1976."
|
|
print(self.insp + message)
|
|
stash_data_issue(parser='survex', message=message, url=None, sb=(survexblock.survexfile.path))
|
|
return expo
|
|
|
|
def LoadSurvexDate(self, survexblock, line):
|
|
"""We now have a valid date for this survexblock, so we now know the expo
|
|
it relates to and can use GetPersonExpeditionNameLookup(expo) to check whether
|
|
the people are correct.
|
|
|
|
Note that a *team line can come before AND after a *date line"""
|
|
|
|
def setdate_on_survexblock(year):
|
|
"""Either *date comes before any *team, in which case there are no prior
|
|
PersonRoles attached, or
|
|
*team came before this *date, in which case the names are only in 'pending'"""
|
|
global trip_person_record
|
|
|
|
expo = self.get_expo_from_year(year, line, survexblock)
|
|
survexblock.expedition = expo
|
|
|
|
team = get_team_on_trip(survexblock) # should be empty, should only be in 'pending'
|
|
# team = SurvexPersonRole.objects.filter(survexblock=survexblock)
|
|
if len(team) > 0:
|
|
message = f"! *team {expo.year} Multiple *date in one block? Already someone on team when *date seen. {survexblock.survexfile.path} ({survexblock}) in '{line}'"
|
|
print(self.insp + message)
|
|
stash_data_issue(parser='survex', message=message, url=None, sb=(survexblock.survexfile.path))
|
|
|
|
if teamnames := get_team_pending(survexblock): # WALRUS https://docs.python.org/3/whatsnew/3.8.html#assignment-expressions
|
|
for tm in teamnames:
|
|
if known_foreigner(tm):
|
|
message = f"- *team {expo.year} '{tm}' known foreigner *date (misordered) {survexblock.survexfile.path} ({survexblock}) in '{line}'"
|
|
print(self.insp + message)
|
|
# stash_data_issue(parser='survex', message=message, url=None, sb=(survexblock.survexfile.path))
|
|
else:
|
|
pe = GetPersonExpeditionNameLookup(expo).get(tm.lower())
|
|
if pe:
|
|
put_person_on_trip(survexblock, pe, tm)
|
|
|
|
else:
|
|
message = f"! *team {year} '{tm}' FAIL personexpedition lookup on *date {survexblock.survexfile.path} ({survexblock}) "
|
|
print(self.insp + message)
|
|
stash_data_issue(
|
|
parser="survex",
|
|
message=message,
|
|
url=None, sb=(survexblock.survexfile.path),
|
|
)
|
|
|
|
oline = line
|
|
perps = get_people_on_trip(survexblock) # What, you don't know Judge Dredd slang ?
|
|
|
|
if len(line) > 10:
|
|
if line[10] == "-": # ie a range, just look at first date
|
|
line = line[0:10]
|
|
else:
|
|
message = f"! DATE Warning LONG DATE '{oline}' ({survexblock}) {survexblock.survexfile.path}"
|
|
print(self.insp+message)
|
|
stash_data_issue(parser='xSvxDate', message=message, url=None, sb=(survexblock.survexfile.path))
|
|
|
|
|
|
if len(line) == 10:
|
|
year = line[:4]
|
|
# TO DO set to correct Austrian timezone Europe/Vienna ?
|
|
# %m and %d need leading zeros. Source svx files require them.
|
|
survexblock.date = datetime.strptime(line.replace(".", "-"), "%Y-%m-%d")
|
|
elif len(line) == 7:
|
|
year = line[:4]
|
|
message = f"! DATE Warning only accurate to the month, setting to 1st '{oline}' ({survexblock}) {survexblock.survexfile.path} {perps}"
|
|
print(self.insp + message)
|
|
stash_data_issue(
|
|
parser="xSvxDate", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
survexblock.date = datetime.strptime(line.replace(".", "-"), "%Y-%m") # sets to first of month
|
|
elif len(line) == 4:
|
|
year = line[:4]
|
|
message = f"! DATE WARNING only accurate to the YEAR, setting to 1st January '{oline}' ({survexblock}) {survexblock.survexfile.path} {perps}"
|
|
print(self.insp + message)
|
|
stash_data_issue(
|
|
parser="xSvxDate", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
survexblock.date = datetime.strptime(line, "%Y") # sets to January 1st
|
|
elif len(line) == 9 or len(line) == 8:
|
|
year = line[:4]
|
|
message = f"! DATE format WARNING, single digit day or month number,'{oline}' [{line[-5]}][{line[-2]}] ({survexblock}) {survexblock.survexfile.path}"
|
|
print(self.insp + message)
|
|
stash_data_issue(
|
|
parser="xSvxDate", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
if line[-2] == "-" or line[-2] == ".":
|
|
line = line[:-1] + '0' + line[-1]
|
|
survexblock.date = datetime.strptime(line.replace(".", "-"), "%Y-%m-%d")
|
|
print(f"! DATE -2 '{line}' '{survexblock.date}'")
|
|
elif line[-5] == "-" or line[-5] == ".":
|
|
line = line[:-4] + '0' + line[-4:]
|
|
survexblock.date = datetime.strptime(line.replace(".", "-"), "%Y-%m-%d")
|
|
print(f"! DATE -5 '{line}' '{survexblock.date}'")
|
|
else:
|
|
year = line[:4]
|
|
message = (
|
|
f"! DATE Error SHORT LINE '{line}' '{oline}-{survexblock}' ({type(survexblock)}) {survexblock.survexfile.path}"
|
|
)
|
|
print(self.insp + message)
|
|
stash_data_issue(
|
|
parser="xSvxDate", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
else:
|
|
# these errors are reporting the wrong survexblock, which is actually a SurvexFile (!)
|
|
# see To Do notes on how to trigger this. Still needs investigating..
|
|
message = (
|
|
f"! DATE Error unrecognised '{oline}-{survexblock}' ({type(survexblock)}) {survexblock.survexfile.path}"
|
|
)
|
|
print(self.insp + message)
|
|
stash_data_issue(
|
|
parser="survex", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
print(f" {type(survexblock)=}") # survexblock.parent fails as a SurvexFile has no .parent ...ugh.
|
|
print(f" {survexblock.survexfile=}")
|
|
# Not setting 'year' crashes entire import on databaseReset.
|
|
year = line[:4]
|
|
perps = get_people_on_trip(survexblock)
|
|
# raise
|
|
|
|
try:
|
|
setdate_on_survexblock(year)
|
|
except NameError:
|
|
print(f">> why is year not set ?! {survexblock.survexfile.path}")
|
|
setdate_on_survexblock("1976")
|
|
if survexblock.date:
|
|
# do not actually need a distict variable 'currentdate' but it makes the code clearer
|
|
self.currentdate = survexblock.date
|
|
survexblock.save()
|
|
|
|
def LoadSurvexLeg(self, survexblock, sline, comment, svxline):
|
|
"""This reads compass, clino and tape data but only keeps the tape lengths,
|
|
the rest is discarded after error-checking.
|
|
Now skipping the error checking - returns as soon as the leg is not one we count.
|
|
|
|
REPLACE ALL THIS by reading the .log output of cavern for the file.
|
|
But we need the lengths per Block, not by File. dump3d will do lengths per block.
|
|
"""
|
|
|
|
# catch bug when saving a new single survex file
|
|
if 'survexfile' not in locals():
|
|
survexfile = survexblock.survexfile
|
|
print(f"LoadSurvexLeg() UNSET 'survexfile' variable, setting to {survexblock.survexfile}")
|
|
|
|
invalid_clino = 180.0
|
|
invalid_compass = 720.0
|
|
invalid_tape = 0.0
|
|
|
|
if self.flagsstar["skiplegs"]:
|
|
if debugprint:
|
|
print("skip in ", self.flagsstar, survexblock.survexfile.path)
|
|
return
|
|
|
|
if debugprint:
|
|
print(f"! LEG datastar type:{self.datastar['type'].upper()}++{survexblock.survexfile.path}\n{sline} ")
|
|
if self.datastar["type"] == "passage":
|
|
return
|
|
if self.datastar["type"] == "cartesian":
|
|
return
|
|
if self.datastar["type"] == "nosurvey":
|
|
return
|
|
if self.datastar["type"] == "diving":
|
|
return
|
|
if self.datastar["type"] == "cylpolar":
|
|
return
|
|
if debugprint:
|
|
print(
|
|
f" !! LEG data lineno:{self.lineno}\n !! sline:'{sline}'\n !! datastar['tape']: {self.datastar['tape']}"
|
|
)
|
|
|
|
if self.datastar["type"] != "normal":
|
|
return
|
|
|
|
ls = sline.lower().split()
|
|
# NORMAL, so there should be 5 fields
|
|
# from the content, this is clearly reading fixedpts/gps/gps00raw.svx, but not reporting it by that name
|
|
if len(ls) < 5:
|
|
print("! Fewer than 5 fields in NORMAL in ", survexblock.survexfile.path, survexfile, survexfile.primary)
|
|
print(" datastar NORMAL:", self.datastar)
|
|
print(f" Line (split): {ls}, comment: {comment}")
|
|
print(f" Line: {sline}\nsvxline: {svxline}")
|
|
message = f" ! Not 5 fields in line '{sline.lower()}' {self.datastar=} {ls=} in\n{survexblock}\n{survexblock.survexfile}\n{survexblock.survexfile.path}"
|
|
stash_data_issue(
|
|
parser="survexleg", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
|
|
datastar = self.datastar # shallow copy: alias but the things inside are the same things
|
|
survexleg = SurvexLeg()
|
|
|
|
# skip all splay legs
|
|
try:
|
|
if "splayalias" in self.flagsstar:
|
|
if ls[datastar["from"]] == "-" or ls[datastar["to"]] == "-":
|
|
if debugprint:
|
|
print("Aliased splay in ", survexblock.survexfile.path)
|
|
return
|
|
|
|
if ls[datastar["from"]] == ".." or ls[datastar["from"]] == ".":
|
|
if debugprint:
|
|
print("Splay in ", survexblock.survexfile.path)
|
|
return
|
|
if ls[datastar["to"]] == ".." or ls[datastar["to"]] == ".":
|
|
if debugprint:
|
|
print("Splay in ", survexblock.survexfile.path)
|
|
return
|
|
|
|
if ls[datastar["to"]] == "-":
|
|
message = f" ! Suspected splay, not declared, in line {ls} in {survexblock.survexfile.path}"
|
|
print(self.insp + message)
|
|
stash_data_issue(
|
|
parser="survexleg", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
return
|
|
except:
|
|
message = f" ! datastar parsing from/to incorrect in line {ls} in {survexblock.survexfile.path}"
|
|
print(self.insp + message)
|
|
stash_data_issue(
|
|
parser="survexleg", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
return
|
|
|
|
try:
|
|
tape = ls[datastar["tape"]]
|
|
except:
|
|
message = f" ! datastar parsing incorrect in line {ls} in {survexblock.survexfile.path}"
|
|
print(self.insp + message)
|
|
stash_data_issue(
|
|
parser="survexleg", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
survexleg.tape = invalid_tape
|
|
return
|
|
# e.g. '29/09' or '(06.05)' in the tape measurement
|
|
# tape = tape.replace("(","") # edited original file (only one) instead
|
|
# tape = tape.replace(")","") # edited original file (only one) instead
|
|
# tape = tape.replace("/",".") # edited original file (only one) instead.
|
|
try:
|
|
if self.unitsfactor:
|
|
tape = float(tape) * self.unitsfactor
|
|
if debugprint:
|
|
message = f" ! Units: Length scaled {tape}m '{ls}' in ({survexblock.survexfile.path}) units:{self.units} factor:{self.unitsfactor}x"
|
|
print(self.insp + message)
|
|
stash_data_issue(
|
|
parser="survexleg", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
if self.units == "feet":
|
|
tape = float(tape) / METRESINFEET
|
|
if debugprint:
|
|
message = f" ! Units: converted to {tape:.3f}m from {self.units} '{ls}' in ({survexblock.survexfile.path})"
|
|
print(self.insp + message)
|
|
stash_data_issue(
|
|
parser="survexleg", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
survexleg.tape = float(tape)
|
|
self.legsnumber += 1
|
|
except ValueError:
|
|
message = f" ! Value Error: Tape misread in line'{ls}' in {survexblock.survexfile.path} units:{self.units}"
|
|
print(self.insp + message)
|
|
stash_data_issue(
|
|
parser="survexleg", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
survexleg.tape = invalid_tape
|
|
try:
|
|
survexblock.legslength += survexleg.tape
|
|
self.slength += survexleg.tape
|
|
except ValueError:
|
|
message = (
|
|
f" ! Value Error: Tape length not added '{ls}' in {survexblock.survexfile.path} units:{self.units}"
|
|
)
|
|
print(self.insp + message)
|
|
stash_data_issue(
|
|
parser="survexleg", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
|
|
try:
|
|
lcompass = ls[datastar["compass"]]
|
|
except:
|
|
message = f" ! Value Error: Compass not found in line {ls} in {survexblock.survexfile.path}"
|
|
print(self.insp + message)
|
|
stash_data_issue(
|
|
parser="survexleg", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
lcompass = invalid_compass
|
|
|
|
try:
|
|
lclino = ls[datastar["clino"]]
|
|
except:
|
|
print(("! Clino misread in", survexblock.survexfile.path))
|
|
print((" datastar:", datastar))
|
|
print((" Line:", ls))
|
|
message = f" ! Value Error: Clino misread in line '{sline.lower()}' {datastar=} {self.datastar=} {ls=} in\n{survexblock}\n{survexblock.survexfile}\n{survexblock.survexfile.path}"
|
|
stash_data_issue(
|
|
parser="survexleg", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
lclino = invalid_clino
|
|
|
|
if lclino == "up":
|
|
survexleg.clino = 90.0
|
|
lcompass = invalid_compass
|
|
elif lclino == "down":
|
|
survexleg.clino = -90.0
|
|
lcompass = invalid_compass
|
|
elif lclino == "-" or lclino == "level":
|
|
survexleg.clino = -90.0
|
|
|
|
try:
|
|
survexleg.compass = float(lcompass)
|
|
except ValueError:
|
|
print(("! Compass misread in", survexblock.survexfile.path))
|
|
print((" datastar:", datastar))
|
|
print((" Line:", ls))
|
|
message = " ! Value Error: lcompass:'{}' line {} in '{}'".format(lcompass, ls, survexblock.survexfile.path)
|
|
stash_data_issue(
|
|
parser="survexleg", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
survexleg.compass = invalid_compass
|
|
|
|
# delete the object to save memory
|
|
survexleg = None
|
|
|
|
def LoadSurvexRef(self, survexblock, args):
|
|
"""Interpret the *ref record, and all the many variants
|
|
|
|
todo: check the year, ie that the *ref 2017#56 wallet is for a survexblock dated in 2017.
|
|
"""
|
|
def check_reused_wallet():
|
|
if walletnum == "00":
|
|
# we don't mind if lost of survex files refer to the index wallet.
|
|
# this is a way of saying that there is no wallet - someone has investigated.
|
|
pass
|
|
message = f" ! Wallet *REF {refscan} in {survexblock.survexfile.path} - re-uses #00 '{survexblock.scanswallet.walletname}'"
|
|
print(self.insp + message)
|
|
print(self.insp + message, file=sys.stderr)
|
|
else:
|
|
message = f" ! Wallet *REF {refscan} in {survexblock.survexfile.path} - Already a DIFFERENT wallet is set for this block '{survexblock.scanswallet.walletname}'"
|
|
print(self.insp + message)
|
|
stash_data_issue(parser="ref", message=message, url=url)
|
|
return
|
|
|
|
url = get_offending_filename(survexblock.survexfile.path)
|
|
# *REF but also ; Ref years from 1960 to 2039
|
|
refline = self.rx_ref_text.match(args)
|
|
if refline:
|
|
# a textual reference such as "1996-1999 Not-KH survey book pp 92-95"
|
|
print(f"{self.insp} *REF quoted text so ignored:{args} in {survexblock.survexfile.path}")
|
|
return
|
|
|
|
if len(args) < 4:
|
|
message = f" ! Empty or BAD *REF statement '{args}' in '{survexblock.survexfile.path}'"
|
|
print(self.insp + message)
|
|
stash_data_issue(parser="survex", message=message, url=url)
|
|
return
|
|
|
|
argsgps = self.rx_argsref.match(args)
|
|
if argsgps:
|
|
yr, letterx, walletnum = argsgps.groups()
|
|
else:
|
|
perps = get_people_on_trip(survexblock)
|
|
message = f" ! Wallet *REF bad in '{survexblock.survexfile.path}' malformed id '{args}' {perps}"
|
|
print(self.insp + message)
|
|
stash_data_issue(parser="ref", message=message, url=url)
|
|
return
|
|
|
|
if letterx:
|
|
message = f" ! Wallet *REF has LETTER in '{survexblock.survexfile.path}' malformed id '{args}' {perps}"
|
|
print(self.insp + message)
|
|
stash_data_issue(parser="ref", message=message, url=url)
|
|
if len(walletnum) < 2:
|
|
walletnum = "0" + walletnum
|
|
if not (int(yr) > 1960 and int(yr) < 2050):
|
|
message = " ! Wallet year out of bounds {yr} '{refscan}' {survexblock.survexfile.path}"
|
|
print(self.insp + message)
|
|
stash_data_issue(parser="ref", message=message, url=url)
|
|
|
|
refscan = f"{yr}#{walletnum}"
|
|
try:
|
|
if int(walletnum) > 99:
|
|
message = f" ! Wallet *REF {refscan} - very big (more than 99) so probably wrong in '{survexblock.survexfile.path}'"
|
|
print(self.insp + message)
|
|
stash_data_issue(parser="ref", message=message, url=url)
|
|
except:
|
|
message = f" ! Wallet *REF {refscan} - not numeric in '{survexblock.survexfile.path}'"
|
|
print(self.insp + message)
|
|
stash_data_issue(parser="ref", message=message, url=url)
|
|
|
|
# Look to see if we have a record of this wallet already - which would be unexpected
|
|
manywallets = Wallet.objects.filter(
|
|
walletname=refscan
|
|
) # assumes all wallets found in earlier pass of data import
|
|
if manywallets:
|
|
if len(manywallets) > 1:
|
|
message = f" ! Wallet *REF {refscan} - more than one found {len(manywallets)} wallets in db with same id {survexblock.survexfile.path}"
|
|
print(self.insp + message)
|
|
stash_data_issue(parser="ref", message=message, url=url)
|
|
|
|
if survexblock.scanswallet:
|
|
if survexblock.scanswallet.walletname == refscan:
|
|
# this might happen when a survex file is re-parsed after editing, but if it is the same thing then OK.
|
|
pass
|
|
else:
|
|
check_reused_wallet()
|
|
|
|
else:
|
|
survexblock.scanswallet = manywallets[0] # this is a ForeignKey field
|
|
survexblock.save()
|
|
# This is where we chould check that the wallet JSON contains a link to the survexfile
|
|
# and that the JSON date and walletdate are set correctly to the survexblock date.
|
|
set_walletdate(survexblock.scanswallet)
|
|
else:
|
|
perps = get_people_on_trip(survexblock)
|
|
message = f" ! Wallet *REF bad in '{survexblock.survexfile.path}' '{refscan}' NOT in database i.e. wallet does not exist {perps}."
|
|
print(self.insp + message)
|
|
stash_data_issue(parser="ref", message=message, url=url)
|
|
|
|
def LoadSurvexDataNormal(self, survexblock, args):
|
|
"""Sets the order for data elements in this and following blocks, e.g.
|
|
*data normal from to compass clino tape
|
|
*data normal from to tape compass clino
|
|
We are only collecting length data so we are disinterested in from, to, LRUD etc.
|
|
"""
|
|
# datastardefault = { # included here as reference to help understand the code
|
|
# "type":"normal",
|
|
# "t":"leg",
|
|
# "from":0,
|
|
# "to":1,
|
|
# "tape":2,
|
|
# "compass":3,
|
|
# "clino":4}
|
|
datastar = copy.deepcopy(self.datastardefault)
|
|
if args == "":
|
|
# naked '*data' which is relevant only for passages. Ignore. Continue with previous settings.
|
|
return
|
|
# DEFAULT | NORMAL | CARTESIAN| NOSURVEY |PASSAGE | TOPOFIL | CYLPOLAR | DIVING
|
|
ls = args.lower().split()
|
|
if ls[0] == "default":
|
|
self.datastar = copy.deepcopy(self.datastardefault)
|
|
elif ls[0] == "normal" or ls[0] == "topofil":
|
|
if not ("from" in datastar and "to" in datastar):
|
|
message = (
|
|
f" ! - Unrecognised *data normal statement '{args}' {survexblock.name}"
|
|
)
|
|
print(message)
|
|
print(message, file=sys.stderr)
|
|
stash_data_issue(
|
|
parser="survex", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
return
|
|
else:
|
|
datastar = self.datastardefault
|
|
# ls = ["normal", "from", "to", "tape", "compass", "clino" ]
|
|
for i in range(1, len(ls)): # len[0] is "normal"
|
|
if ls[i].lower() == "newline":
|
|
message = f" ! - ABORT *data statement has NEWLINE in it in {survexblock.survexfile.path}. Not parsed by troggle. '{args}'"
|
|
print(message)
|
|
print(message, file=sys.stderr)
|
|
stash_data_issue(
|
|
parser="survex", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
return False
|
|
|
|
if ls[i] in ["bearing", "compass"]:
|
|
datastar["compass"] = i - 1
|
|
if ls[i] in ["clino", "gradient"]:
|
|
datastar["clino"] = i - 1
|
|
if ls[i] in ["tape", "length"]:
|
|
datastar["tape"] = i - 1
|
|
self.datastar = copy.deepcopy(datastar)
|
|
return
|
|
elif ls[0] == "passage" or ls[0] == "nosurvey" or ls[0] == "diving" or ls[0] == "cylpolar":
|
|
# message = " ! - *data {} blocks ignored. {}|{}" '{}' .format(ls[0].upper(), survexblock.name, args)
|
|
# print(message)
|
|
# print(message,file=sys.stderr)
|
|
# stash_data_issue(parser='survex', message=message)
|
|
self.datastar["type"] = ls[0]
|
|
elif ls[0] == "cartesian": # We should not ignore this ?! Default for Germans ?
|
|
# message = " ! - *data {} blocks ignored. {}|{}" '{}' .format(ls[0].upper(), survexblock.name, args)
|
|
# print(message)
|
|
# print(message,file=sys.stderr)
|
|
# stash_data_issue(parser='survex', message=message)
|
|
self.datastar["type"] = ls[0]
|
|
else:
|
|
message = f" ! - Unrecognised *data statement '{args}' {survexblock.name}"
|
|
print(message)
|
|
print(message, file=sys.stderr)
|
|
stash_data_issue(
|
|
parser="survex", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
self.datastar["type"] = ls[0]
|
|
|
|
def LoadSurvexFlags(self, args):
|
|
# Valid flags are DUPLICATE, SPLAY, and SURFACE, and a flag may be preceded with NOT to turn it off.
|
|
# Default values are NOT any of them
|
|
self.flagsstar = copy.deepcopy(self.flagsdefault)
|
|
flags = []
|
|
|
|
args = self.rx_flagsnot.sub("not", args)
|
|
argslist = args.split()
|
|
for s in argslist:
|
|
flags.append(s)
|
|
if debugprint:
|
|
print(
|
|
f" ^ flagslist:{flags}",
|
|
)
|
|
|
|
if "duplicate" in flags:
|
|
self.flagsstar["duplicate"] = True
|
|
if "surface" in flags:
|
|
self.flagsstar["surface"] = True
|
|
if "splay" in flags:
|
|
self.flagsstar["splay"] = True
|
|
|
|
if "notduplicate" in flags:
|
|
self.flagsstar["duplicate"] = False
|
|
if "notsurface" in flags:
|
|
self.flagsstar["surface"] = False
|
|
if "notsplay" in flags:
|
|
self.flagsstar["splay"] = False
|
|
|
|
# if self.flagsstar["duplicate"] == True or self.flagsstar["surface"] == True or self.flagsstar["splay"] == True:
|
|
# actually we do want to count duplicates as this is for "effort expended in surveying underground"
|
|
if self.flagsstar["surface"] is True or self.flagsstar["splay"] is True:
|
|
self.flagsstar["skiplegs"] = True
|
|
if debugprint:
|
|
print(
|
|
f" $ flagslist:{flags}",
|
|
)
|
|
|
|
|
|
def set_primary(self, headpath):
|
|
"""This sets the primarysurvexfile. This is correct as it should be set on the first file
|
|
in the directory, where first is defined by the *include ordering. Which is what we
|
|
are doing.
|
|
"""
|
|
|
|
if not headpath:
|
|
# This is normal for .svx file in the root of the :loser: repo
|
|
return self.currentsurvexfile
|
|
|
|
if headpath.lower() not in self.svxprim:
|
|
primary = self.currentsurvexfile
|
|
self.svxprim[headpath.lower()] = primary
|
|
return self.svxprim[headpath.lower()]
|
|
|
|
def IdentifyCave(self, cavepath, svxid, depth):
|
|
"""Given a file path for a survex file, e.g. /1626/107/107.svx, or a survex-block path,
|
|
return the cave object
|
|
kataster
|
|
fixedpts/gps
|
|
and everything at top level, directly in caves-1623/ not in a subdir
|
|
NOTE self.cavelist is a superset of GetCaveLookup, which already contains both uppercase and lowercase aliases
|
|
|
|
why is this called with cavepath="caves-1623/2023-kt-02" when this is a cave where the files are in "caves-1623/2023-kt-02/"
|
|
cavepath = 'surface/1623' when svxis is 'surface/1623/2004-18to298.svx'
|
|
"""
|
|
if cavepath == "caves-1623/99ob02": # nothing special about this cave, just used as a marker to dump the cavelist to file
|
|
for key in self.caveslist:
|
|
cave = self.caveslist[key]
|
|
if type(cave) != Cave:
|
|
print(f"BAD CAVE TYPE '{type(cave)}'{cave}' -- {key}'")
|
|
for key in self.caveslist:
|
|
cave = self.caveslist[key]
|
|
print(f"Cave<{cave}> -- {key}")
|
|
|
|
for f in IGNOREFILES:
|
|
if svxid.lower().startswith(f):
|
|
return False
|
|
for i in IGNOREPREFIX:
|
|
if cavepath.lower().startswith(i) or cavepath[11:].lower().startswith(i):
|
|
# message = (f" - {cavepath} is an <IGNOREPREFIX> (while looking at '{svxid}.svx' )")
|
|
# print(message, file=sys.stderr)
|
|
return False
|
|
|
|
if cavepath.lower() in self.caveslist: # primed with GetCaveLookup
|
|
return self.caveslist[cavepath.lower()]
|
|
|
|
rx_svxcollection = re.compile(r"(?i)caves-(\d\d\d\d)/(.*)$")
|
|
# rx_cave = re.compile(r"(?i)caves-(\d\d\d\d)/([-\d\w]+|\d\d\d\d-?\w+-\d+)")
|
|
path_match = self.rx_cave.search(cavepath)
|
|
if path_match:
|
|
area = path_match.group(1)
|
|
caveid = path_match.group(2)
|
|
sluggy = f"{area}-{caveid}".lower() # GetCaveLookup is all UPPER() and all lower() but not mixed
|
|
# if this comes from editing a survex file, we may already have loaded 3-digit aliases for 1623- from old wallets,
|
|
# so be careful here..
|
|
seek = {sluggy, sluggy.replace("1623-","")} # {} is a set
|
|
for s in seek:
|
|
if s in self.caveslist:
|
|
self.caveslist[cavepath] = self.caveslist[s] # set "caves-1626/107/107" as index to cave 1626-107
|
|
return self.caveslist[s]
|
|
|
|
if cavepath[6:10] in ARGEAREAS:
|
|
#print(f"ARGE {area=} {caveid=} {cavepath} - {cavepath[11:]}", file=sys.stderr)
|
|
return do_ARGE_cave(sluggy, caveid, area, svxid)
|
|
|
|
cave = create_new_cave(cavepath, svxid, f"Cave mentioned only in a survex file {svxid=}") # uses the pending code
|
|
self.caveslist[cavepath.lower()] = cave
|
|
return cave
|
|
else:
|
|
path_match = rx_svxcollection.search(svxid)
|
|
if path_match:
|
|
# message = f" ! Recognised survex file in area {path_match.group(1)} which is not a cave at {svxid=}"
|
|
# stash_data_issue(parser="survex", message=message, url=None, sb=(svxid))
|
|
# print(message, file=sys.stderr)
|
|
return False
|
|
else: # probably a top level file immediately in the loser directory. No worries.
|
|
message = f" ! Warning: no cave identifiable for '{svxid}.svx' {cavepath=} "
|
|
print("\n" + message)
|
|
stash_data_issue(parser="survex", message=message, url="{svxid}.svx", sb=(svxid))
|
|
return False
|
|
|
|
def LoadSurvexFile(self, svxid):
|
|
"""Creates SurvexFile in the database, and SurvexDirectory if needed
|
|
Creates a new current survexfile and valid .survexdirectory
|
|
Inspects the parent folder of the survexfile and uses that to decide if this is
|
|
a cave we know.
|
|
|
|
If we see a duplicate cave, this is TOO LATE. It has already been included into the
|
|
long linear file. We prevent duplication when the long linear file is created, so
|
|
if we see a duplicate here, it is a serious error.
|
|
|
|
The survexblock passed-in is not necessarily the parent. FIX THIS.
|
|
"""
|
|
global dup_includes
|
|
|
|
if debugprint:
|
|
print(f" # datastack in LoadSurvexFile:{svxid} 'type':", end="")
|
|
for dict in self.datastack:
|
|
print(f"'{dict['type'].upper()}' ", end="")
|
|
print("")
|
|
|
|
depth = " " * self.depthbegin
|
|
print("{:2}{} - NEW survexfile:'{}'".format(self.depthbegin, depth, svxid))
|
|
headpath = os.path.dirname(svxid)
|
|
|
|
newfile, created = SurvexFile.objects.update_or_create(path=svxid)
|
|
if not created:
|
|
dup_includes += 1
|
|
message = f" ! DUPLICATE SurvexFile '{svxid}' create attempt in LoadSurvexFile()"
|
|
print(message)
|
|
# print(message, file=sys.stderr)
|
|
stash_data_issue(parser="survex", message=message, url=f"/survexfile/{svxid}")
|
|
|
|
self.currentsurvexfile = newfile
|
|
return # abort as everything already done for object creation
|
|
|
|
newfile.save() # until we do this there is no internal id so no foreign key works
|
|
self.currentsurvexfile = newfile
|
|
newfile.primary = self.set_primary(headpath)
|
|
|
|
# refactor this !
|
|
cave = self.IdentifyCave(headpath, svxid, depth) # cave already exists in db?
|
|
if cave:
|
|
newfile.cave = cave
|
|
# print(f"\n - New directory '{newdirectory}' for cave '{cave}'",file=sys.stderr)
|
|
if not cave.survex_file:
|
|
cave.survex_file = svxid + ".svx"
|
|
cave.save()
|
|
# message = f" - '{cave}' had no survex_file set - setting '{svxid}.svx' writing to {cave.filename})"
|
|
message = f" - '{cave}' has no survex_file set - need to set to '{svxid}.svx' in {cave.filename})"
|
|
print("\n",message,file=sys.stderr)
|
|
stash_data_issue(parser="survex", message=message)
|
|
|
|
# try:
|
|
# cave_file = cave.file_output()
|
|
# write_and_commit([cave_file], f"{cave} Update of cave.survex_file when parsing {svxid}.svx")
|
|
# except
|
|
# raise
|
|
|
|
if not newfile.primary:
|
|
message = f" ! .primary NOT SET in new SurvexFile {svxid} "
|
|
print(message)
|
|
print(message, file=sys.stderr)
|
|
stash_data_issue(parser="survex", message=message)
|
|
self.currentsurvexfile.save() # django insists on this although it is already saved !?
|
|
|
|
|
|
def ProcessIncludeLine(self, included):
|
|
"""As we read the long linear file, we come across lines telling us that the
|
|
content from this point on is from a particular included file
|
|
"""
|
|
global debugprint
|
|
svxid = included.groups()[0]
|
|
if svxid.lower() == debugprinttrigger.lower():
|
|
debugprint = True
|
|
self.LoadSurvexFile(svxid)
|
|
self.stacksvxfiles.append(self.currentsurvexfile)
|
|
|
|
def ProcessEdulcniLine(self, edulcni):
|
|
"""As we read the long linear file, we come across lines telling us that the
|
|
we are about to pop back out of the contents of an included file
|
|
Saves the current survexfile object in the db to include the data parsed from it"""
|
|
global debugprint
|
|
svxid = edulcni.groups()[0]
|
|
if debugprint:
|
|
depth = " " * self.depthbegin
|
|
print(f"{self.depthbegin:2}{depth} - Edulcni survexfile:'{svxid}'")
|
|
if svxid.lower() == debugprinttrigger.lower():
|
|
debugprint = False
|
|
self.currentsurvexfile.save()
|
|
self.currentsurvexfile = self.stacksvxfiles.pop()
|
|
|
|
def TickSurvexQM(self, survexblock, qmtick):
|
|
"""Interpret the specially formatted comment which is a QM TICKED statement"""
|
|
# Now we need to find the correct QM object. It will be in the same block and have the same number.
|
|
|
|
try:
|
|
# could try to search on blockname instead?
|
|
# but the QMn TICK has to be in the same block anyway
|
|
qm = QM.objects.filter(block=survexblock, number=int(qmtick.group(1)))
|
|
except:
|
|
# raise
|
|
message = f' ! QM TICK find FAIL QM{qmtick.group(1)} date:"{qmtick.group(2)}" qmlist:"{qm}" in "{survexblock.survexfile.path}" + completion_description:"{qmtick.group(3)}" '
|
|
print(message)
|
|
stash_data_issue(
|
|
parser="survex", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
if len(qm) > 1:
|
|
message = f' ! QM TICK MULTIPLE found FAIL QM{qmtick.group(1)} date:"{qmtick.group(2)}" in "{survexblock.survexfile.path}" + completion_description:"{qmtick.group(3)}" '
|
|
print(message)
|
|
stash_data_issue(
|
|
parser="survex", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
qm[0].ticked = True
|
|
# qm[0].ticked_date = qmtick.group(2) # not in data model yet
|
|
qm[0].completion_description = qmtick.group(3)
|
|
qm[0].save()
|
|
|
|
def LoadSurvexQM(self, survexblock, qmline):
|
|
"""Interpret the specially formatted comment which is a QM definition"""
|
|
# r"(?i)^\s*QM(\d+)\s+(.+)\s+([\w\-\_]+)\.([\w\.\-]+)\s+(([\w\-]+)\.([\w\.\-]+)|\-)\s+(.+)$"
|
|
# r"(?i)^\s*QM(\d+)\s+([^\s]+)\s+([^\s]+)\s+([^\s]+)\s+(.+)$"
|
|
# rx_qm_tick QMnn TICK date comment
|
|
# (r"(?i)^\s*QM(\d+)\s+TICK\s([\d\-]+)\s(.*)$")
|
|
|
|
insp = self.insp
|
|
# create a short, hopefully-unique name for this block to be used in the QM id
|
|
if len(survexblock.name) < 7:
|
|
blockname = survexblock.name
|
|
else:
|
|
blockname = survexblock.name[:6] + survexblock.name[-1:]
|
|
# logslug = f'D{int(qmyear)}_{blockname}_{int(qm_no):03d}'
|
|
qm_ticked = False # default
|
|
|
|
qm_no = qmline.group(1) # this is NOT unique across multiple survex files
|
|
qm_grade = qmline.group(2).strip().upper() # TICK or [a-dA-DvVxX?]
|
|
if qm_grade == "TICK":
|
|
self.TickSurvexQM(survexblock, qmline)
|
|
return
|
|
|
|
if qm_grade not in ["A", "B", "C", "D", "X"]: # "V", "?" not allowed in survex file QMs
|
|
message = f" ! QM{qm_no} INVALID code '{qm_grade}' [{blockname}] '{survexblock.survexfile.path}'"
|
|
print(insp + message)
|
|
stash_data_issue(
|
|
parser="survex", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
qm_nearest = qmline.group(3)
|
|
# if qmline.group(3): # usual closest survey station
|
|
# qm_nearest = qmline.group(3)
|
|
# if qmline.group(4):
|
|
# qm_nearest = qm_nearest + "." + qmline.group(4)
|
|
|
|
resolution_station_name = qmline.group(4)
|
|
if (resolution_station_name=="-"):
|
|
pass
|
|
else:
|
|
qm_ticked = True
|
|
# print(f"{survexblock.survexfile.cave} {survexblock}:{qm_no}{qm_grade} {qmline.group(4)}", file=sys.stderr)
|
|
|
|
qm_notes = qmline.group(5)
|
|
# qm_notes = qmline.group(8)
|
|
|
|
# Spec of QM in SVX files:
|
|
# ;Serial number grade(A/B/C/D/V/X) nearest-station resolution-station description
|
|
# ;QM1 a hobnob_hallway_2.42 hobnob-hallway_3.42 junction of keyhole passage
|
|
# ;QM1 a hobnob_hallway_2.42 - junction of keyhole passage
|
|
|
|
#;QM1 A B6 - see plan drawing there is definitely a QM
|
|
|
|
# NB none of the SurveyStations are in the DB now, so if we want to link to aSurvexStation
|
|
# we would have to create one. But that is not obligatory and no QMs loaded from CSVs have one
|
|
|
|
# Older troggle/CSV assumes a logbook entry 'found_by' for each QM, with a date.
|
|
# We don't need this anymore so we don't need to create a placeholder logbook entry.
|
|
|
|
|
|
if survexblock.survexfile.cave:
|
|
survexblock.survexfile.cave.slug()
|
|
|
|
self.fix_undated(survexblock) # null-op if already set
|
|
try:
|
|
expoyear = str(survexblock.date.year)
|
|
except:
|
|
print(f">> why is survexblock not set ?! in LoadSurvexQM()/n {survexblock.survexfile.path}")
|
|
expoyear = settings.EPOCH.year # 1970
|
|
|
|
|
|
|
|
try:
|
|
qm = QM.objects.create(
|
|
number=qm_no,
|
|
# nearest_station=a_survex_station_object, # can be null
|
|
resolution_station_name=resolution_station_name,
|
|
nearest_station_name=qm_nearest,
|
|
ticked=qm_ticked,
|
|
grade=qm_grade.upper(),
|
|
location_description=qm_notes,
|
|
block=survexblock, # only set for survex-imported QMs
|
|
blockname=blockname, # only set for survex-imported QMs
|
|
expoyear=expoyear,
|
|
cave=survexblock.survexfile.cave,
|
|
)
|
|
qm.save
|
|
except:
|
|
qms = QM.objects.filter(
|
|
number=qm_no,
|
|
# nearest_station=a_survex_station_object, # can be null
|
|
resolution_station_name=resolution_station_name,
|
|
nearest_station_name=qm_nearest,
|
|
ticked=qm_ticked,
|
|
grade=qm_grade.upper(),
|
|
location_description=qm_notes,
|
|
block=survexblock, # only set for survex-imported QMs
|
|
blockname=blockname, # only set for survex-imported QMs
|
|
expoyear=expoyear,
|
|
cave=survexblock.survexfile.cave,
|
|
)
|
|
message = f" ! QM{qm_no} FAIL to create {qm_nearest} in'{survexblock.survexfile.path}' found {len(qms)}:{qms}"
|
|
print(insp + message)
|
|
stash_data_issue(
|
|
parser="survex", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
|
|
|
|
def ProcessQM(self, survexblock, qml, comment):
|
|
"""Process the line beginning
|
|
;QM
|
|
which is a QM new declaration or a QM TICK closing declaration.
|
|
|
|
It _should_ recognise a non-numeric survey station ID, but currently doesn't.
|
|
Valid QM types are [a-dA-DvVxX?] A-D, V for Vertical, X for horrible and ? for unknown
|
|
"""
|
|
# rx_qm : r"(?i)^\s*QM(\d+)\s+?(.+)\s+([\w\-\_]+)(\.([\w\.\-]+)?)\s+(([\w\-]+)\.([\w\.\-]+)|\-)\s+(.+)$)
|
|
qmline = self.rx_qm.match(comment)
|
|
if qmline:
|
|
self.LoadSurvexQM(survexblock, qmline)
|
|
else:
|
|
message = f' ! QM Unrecognised as valid in "{survexblock.survexfile.path}" QM{qml.group(1)} "{qml.group(2)}" : regex failure typo?'
|
|
print(message)
|
|
stash_data_issue(
|
|
parser="survex", message=message, url=None, sb=(survexblock.survexfile.path)
|
|
)
|
|
|
|
def LoadSurvexComment(self, survexblock, comment):
|
|
# ignore all comments except ;ref, ; wallet and ;QM and ;*include (for collated survex file)
|
|
# rx_ref2 = re.compile(r'(?i)\s*ref[.;]?')
|
|
|
|
# This _should_ also check that the QM survey point exists in the block
|
|
depth = " " * self.depthbegin
|
|
|
|
refline = self.rx_commref.match(comment)
|
|
if refline:
|
|
comment = self.rx_ref2.sub("", comment.strip())
|
|
print(f"{self.depthbegin:2}{depth} - rx_ref2 -- {comment=} in {survexblock.survexfile.path} :: {survexblock}")
|
|
self.LoadSurvexRef(survexblock, comment)
|
|
|
|
# handle
|
|
# ; Messteam: Jörg Haussmann, Robert Eckardt, Thilo Müller
|
|
# ; Zeichner: Thilo Müller
|
|
# But none of these will be valid teammembers because they are not actually on our expo
|
|
|
|
team = self.rx_commteam.match(comment)
|
|
if team:
|
|
# print(f'rx_commteam -- {comment=} in {survexblock.survexfile.path} :: {survexblock}')
|
|
pass
|
|
|
|
|
|
# rx_qm0 = re.compile(r"(?i)^\s*QM(\d+)\s+(.+)$")
|
|
qml = self.rx_qm0.match(comment)
|
|
if qml:
|
|
self.ProcessQM(survexblock, qml, comment)
|
|
|
|
included = self.rx_comminc.match(comment)
|
|
# ;|*include means 'we have been included'; whereas *include means 'proceed to include'
|
|
# No test here to check that this file has not already been included. Ouch.
|
|
if included:
|
|
self.ProcessIncludeLine(included)
|
|
|
|
edulcni = self.rx_commcni.match(comment)
|
|
# ;*edulcni means we are returning from an included file
|
|
if edulcni:
|
|
self.ProcessEdulcniLine(edulcni)
|
|
|
|
def get_cave(self, path):
|
|
"""Read the file path to a survex file and guesses the cave
|
|
"""
|
|
path_match = re.search(r"caves-(\d\d\d\d)/(\d+|\d\d\d\d-?\w+-\d+)/", path)
|
|
if path_match:
|
|
pos_cave = f"{path_match.group(1)}-{path_match.group(2)}"
|
|
cave = getCaveByReference(pos_cave)
|
|
return cave
|
|
return None
|
|
|
|
def LinearLoad(self, survexblock, path, collatefilename):
|
|
"""Loads a single survex file. Usually used to import all the survex files which have been collated
|
|
into a single file: either the tree or the _unseens.
|
|
Also used for loading a single file which has been edited online.
|
|
Loads the begin/end blocks using a stack for labels.
|
|
Uses the python generator idiom to avoid loading the whole file (21MB) into memory.
|
|
"""
|
|
blkid = None
|
|
pathlist = None
|
|
args = None
|
|
oldflags = None
|
|
blockcount = 0
|
|
self.lineno = 0
|
|
slengthtotal = 0.0
|
|
nlegstotal = 0
|
|
self.relativefilename = path
|
|
#self.IdentifyCave(path, svxid, depth) # this will produce null for survex files which are geographic collections
|
|
|
|
self.currentsurvexfile = survexblock.survexfile
|
|
self.currentsurvexfile.save() # django insists on this although it is already saved !?
|
|
|
|
self.datastar = copy.deepcopy(self.datastardefault)
|
|
self.flagsstar = copy.deepcopy(self.flagsdefault)
|
|
|
|
def tickle():
|
|
nonlocal blockcount
|
|
|
|
blockcount += 1
|
|
if blockcount % 40 == 0:
|
|
print(".", file=sys.stderr, end="")
|
|
if blockcount % 1600 == 0:
|
|
print("\n", file=sys.stderr, end="")
|
|
mem = get_process_memory()
|
|
print(f" - MEM: {mem:7.2f} MB in use", file=sys.stderr)
|
|
print(" ", file=sys.stderr, end="")
|
|
sys.stderr.flush()
|
|
|
|
def printbegin():
|
|
nonlocal blkid
|
|
nonlocal pathlist
|
|
|
|
depth = " " * self.depthbegin
|
|
self.insp = depth
|
|
if debugprint:
|
|
print(f"{self.depthbegin:2}{depth} - Begin for :'{blkid}'")
|
|
pathlist = ""
|
|
for id in self.stackbegin:
|
|
if len(id) > 0:
|
|
pathlist += "." + id
|
|
|
|
def printend():
|
|
nonlocal args
|
|
|
|
depth = " " * self.depthbegin
|
|
if debugprint:
|
|
print(f"{self.depthbegin:2}{depth} - End from:'{args}'")
|
|
print(
|
|
"{:2}{} - LEGS: {} (n: {}, length:{} units:{})".format(
|
|
self.depthbegin, depth, self.slength, self.slength, self.legsnumber, self.units
|
|
)
|
|
)
|
|
|
|
def pushblock():
|
|
nonlocal blkid
|
|
if debugprint:
|
|
print(f" # datastack at 1 *begin {blkid} 'type':", end="")
|
|
for dict in self.datastack:
|
|
print(f"'{dict['type'].upper()}' ", end="")
|
|
print("")
|
|
print(f"'{self.datastar['type'].upper()}' self.datastar ")
|
|
# ------------ * DATA
|
|
self.datastack.append(copy.deepcopy(self.datastar))
|
|
# ------------ * DATA
|
|
if debugprint:
|
|
print(f" # datastack at 2 *begin {blkid} 'type':", end="")
|
|
for dict in self.datastack:
|
|
print(f"'{dict['type'].upper()}' ", end="")
|
|
print("")
|
|
print(f"'{self.datastar['type'].upper()}' self.datastar ")
|
|
|
|
# ------------ * FLAGS
|
|
self.flagsstack.append(copy.deepcopy(self.flagsstar))
|
|
# ------------ * FLAGS
|
|
pass
|
|
|
|
def popblock():
|
|
nonlocal blkid
|
|
nonlocal oldflags
|
|
if debugprint:
|
|
print(f" # datastack at *end '{blkid} 'type':", end="")
|
|
for dict in self.datastack:
|
|
print(f"'{dict['type'].upper()}' ", end="")
|
|
print("")
|
|
print(f"'{self.datastar['type'].upper()}' self.datastar ")
|
|
# ------------ * DATA
|
|
self.datastar = copy.deepcopy(self.datastack.pop())
|
|
# ------------ * DATA
|
|
if debugprint:
|
|
print(f" # datastack after *end '{blkid} 'type':", end="")
|
|
for dict in self.datastack:
|
|
print(f"'{dict['type'].upper()}' ", end="")
|
|
print("")
|
|
print(f"'{self.datastar['type'].upper()}' self.datastar ")
|
|
|
|
# ------------ * FLAGS
|
|
self.flagsstar = copy.deepcopy(self.flagsstack.pop())
|
|
# ------------ * FLAGS
|
|
if debugprint:
|
|
if oldflags["skiplegs"] != self.flagsstar["skiplegs"]:
|
|
print(f" # POP 'any' flag now:'{self.flagsstar['skiplegs']}' was:{oldflags['skiplegs']} ")
|
|
|
|
def starstatement(star, fullline):
|
|
"""Interprets a survex comamnd where * is the first character on the line, e.g. *begin"""
|
|
nonlocal survexblock
|
|
nonlocal blkid
|
|
nonlocal pathlist
|
|
nonlocal args
|
|
nonlocal oldflags
|
|
nonlocal slengthtotal
|
|
nonlocal nlegstotal
|
|
|
|
cmd, args = star.groups()
|
|
cmd = cmd.lower()
|
|
|
|
# ------------------------BEGIN
|
|
if self.rx_begin.match(cmd):
|
|
blkid = args.lower()
|
|
# PUSH state ++++++++++++++
|
|
self.depthbegin += 1
|
|
self.stackbegin.append(blkid)
|
|
self.unitsstack.append((self.units, self.unitsfactor))
|
|
self.legsnumberstack.append(self.legsnumber)
|
|
self.slengthstack.append(self.slength)
|
|
self.teaminheritstack.append(self.inheritteam)
|
|
self.teamcurrentstack.append(self.currentteam)
|
|
self.dateinheritstack.append(self.inheritdate)
|
|
self.datecurrentstack.append(self.currentdate)
|
|
pushblock()
|
|
# PUSH state ++++++++++++++
|
|
self.legsnumber = 0
|
|
self.slength = 0.0
|
|
self.units = "metres"
|
|
self.inheritteam = self.currentteam
|
|
self.currentteam = set() # zero the current team when we start a new block
|
|
self.inheritdate = self.currentdate
|
|
self.currentdate = None # zero the current date when we start a new block
|
|
printbegin()
|
|
newsurvexblock = SurvexBlock(
|
|
name=blkid,
|
|
parent=survexblock,
|
|
survexfile=self.currentsurvexfile,
|
|
legsall=0,
|
|
legslength=0.0,
|
|
)
|
|
newsurvexblock.save()
|
|
print(f"SB: #{newsurvexblock.id} '{newsurvexblock}' parent:{newsurvexblock.parent} f:{newsurvexblock.survexfile}")
|
|
newsurvexblock.title = (
|
|
"(" + survexblock.title + ")"
|
|
) # copy parent inititally, overwrite if it has its own
|
|
survexblock = newsurvexblock
|
|
survexblock.save() # django insists on this , but we want to save at the end !
|
|
tickle()
|
|
|
|
# ---------------------------END
|
|
elif self.rx_end.match(cmd):
|
|
survexblock.legsall = self.legsnumber
|
|
survexblock.legslength = self.slength
|
|
printend()
|
|
slengthtotal += self.slength
|
|
nlegstotal += self.legsnumber
|
|
|
|
self.fix_undated(survexblock)
|
|
self.fix_anonymous(survexblock)
|
|
try:
|
|
survexblock.parent.save() # django insists on this although it is already saved !?
|
|
except:
|
|
print(survexblock.parent, file=sys.stderr)
|
|
raise
|
|
try:
|
|
survexblock.save() # save to db at end of block
|
|
except:
|
|
print(survexblock, file=sys.stderr)
|
|
raise
|
|
confirm_team_on_trip(survexblock)
|
|
# POP state ++++++++++++++
|
|
popblock()
|
|
self.inheritteam = self.teaminheritstack.pop()
|
|
self.currentteam = self.teamcurrentstack.pop()
|
|
self.inheritdate = self.dateinheritstack.pop()
|
|
self.currentdate = self.datecurrentstack.pop()
|
|
self.legsnumber = self.legsnumberstack.pop()
|
|
self.units, self.unitsfactor = self.unitsstack.pop()
|
|
self.slength = self.slengthstack.pop()
|
|
blkid = self.stackbegin.pop()
|
|
self.currentsurvexblock = survexblock.parent
|
|
survexblock = survexblock.parent
|
|
oldflags = self.flagsstar
|
|
self.depthbegin -= 1
|
|
# POP state ++++++++++++++
|
|
|
|
# -----------------------------
|
|
elif self.rx_title.match(cmd):
|
|
quotedtitle = self.rx_quotedtitle.match(args)
|
|
if quotedtitle:
|
|
survexblock.title = quotedtitle.groups()[0]
|
|
else:
|
|
survexblock.title = args
|
|
elif self.rx_ref.match(cmd):
|
|
self.LoadSurvexRef(survexblock, args)
|
|
elif self.rx_flags.match(cmd):
|
|
oldflags = self.flagsstar
|
|
self.LoadSurvexFlags(args)
|
|
if debugprint:
|
|
if oldflags["skiplegs"] != self.flagsstar["skiplegs"]:
|
|
print(f" # CHANGE 'any' flag now:'{self.flagsstar['skiplegs']}' was:{oldflags['skiplegs']} ")
|
|
|
|
elif self.rx_data.match(cmd):
|
|
if self.LoadSurvexDataNormal(survexblock, args):
|
|
pass
|
|
else:
|
|
# Abort, we do not cope with this *data format
|
|
return
|
|
elif self.rx_alias.match(cmd):
|
|
self.LoadSurvexAlias(survexblock, args)
|
|
elif self.rx_entrance.match(cmd):
|
|
self.LoadSurvexEntrance(survexblock, args)
|
|
elif self.rx_date.match(cmd):
|
|
self.LoadSurvexDate(survexblock, args)
|
|
elif self.rx_fix.match(cmd):
|
|
self.LoadSurvexFix(survexblock, fullline) # but we want the comment on this line
|
|
elif self.rx_units.match(cmd):
|
|
self.LoadSurvexUnits(survexblock, args)
|
|
elif self.rx_team.match(cmd):
|
|
self.LoadSurvexTeam(survexblock, args)
|
|
elif self.rx_set.match(cmd): #and self.rx_names.match(cmd):
|
|
self.LoadSurvexSet(survexblock, args)
|
|
elif self.rx_include.match(cmd):
|
|
message = f" ! -ERROR *include command not expected here {path}. Re-run a full Survex import."
|
|
print(message)
|
|
print(message, file=sys.stderr)
|
|
stash_data_issue(
|
|
parser="survex",
|
|
message=message,
|
|
)
|
|
else:
|
|
self.LoadSurvexFallThrough(survexblock, args, cmd)
|
|
|
|
# this is a python generator idiom.
|
|
# see https://realpython.com/introduction-to-python-generators/
|
|
# this is the first use of generators in troggle (Oct.2022) and saves 21 MB of memory
|
|
with open(collatefilename, "r") as fcollate:
|
|
for svxline in fcollate:
|
|
self.lineno += 1
|
|
sline, comment = self.rx_comment.match(svxline).groups()
|
|
if comment:
|
|
# ; at beginning of line
|
|
# this catches the ;|*include NEWFILE and ;|*edulcni ENDOFFILE lines too
|
|
self.LoadSurvexComment(survexblock, comment)
|
|
else:
|
|
# detect a merge failure inserted by version control
|
|
mfail = self.rx_badmerge.match(sline)
|
|
if mfail:
|
|
message = f"\n ! - ERROR version control merge failure\n - '{sline}'\n"
|
|
message = (
|
|
message + f" - line {self.lineno} in {blkid} in {survexblock}\n - NERD++ needed to fix it"
|
|
)
|
|
print(message)
|
|
print(message, file=sys.stderr)
|
|
stash_data_issue(parser="survex", message=message)
|
|
continue # skip this line
|
|
|
|
if not sline:
|
|
continue # skip blank lines
|
|
|
|
# detect a star command
|
|
star = self.rx_star.match(sline)
|
|
if star:
|
|
# yes we are reading a *command
|
|
starstatement(star, svxline)
|
|
else: # not a *cmd so we are reading data OR a ";" rx_comment failed. We hope.
|
|
self.LoadSurvexLeg(survexblock, sline, comment, svxline)
|
|
|
|
self.legsnumber = nlegstotal
|
|
self.slength = slengthtotal
|
|
|
|
def PushdownStackScan(self, survexblock, path, finname, flinear, fcollate):
|
|
"""Follows the *include links in all the survex files from the root file (usually 1623.svx)
|
|
and reads only the *include and *begin and *end statements. It produces a linearised
|
|
list of the include tree and detects blocks included more than once.
|
|
"""
|
|
global stop_dup_warning
|
|
|
|
def process_line(svxline):
|
|
self.lineno += 1
|
|
# detect a merge failure inserted by version control
|
|
mfail = self.rx_badmerge.match(svxline)
|
|
if mfail:
|
|
message = f"\n!! - ERROR version control merge failure\n - '{svxline}'\n"
|
|
message = message + f" - in '{path}' at line {self.lineno}\n"
|
|
message = (
|
|
message + f" - line {self.lineno} {survexblock}\n - Parsing aborted. NERD++ needed to fix it"
|
|
)
|
|
print(message)
|
|
print(message, file=sys.stderr)
|
|
stash_data_issue(parser="survex", message=message, url=None, sb=(path))
|
|
return # skip this survex file and all things *included in it
|
|
|
|
includestmt = self.rx_include.match(svxline)
|
|
if not includestmt:
|
|
fcollate.write(f"{svxline.strip()}\n")
|
|
|
|
sline, comment = self.rx_comment.match(svxline.strip()).groups()
|
|
star = self.rx_star.match(sline)
|
|
if star: # yes we are reading a *cmd
|
|
cmd, args = star.groups()
|
|
cmd = cmd.lower()
|
|
if self.rx_include2.match(cmd):
|
|
# rx_include2 = re.compile(r"(?i)include$")
|
|
# if re.match(r"(?i)include$", cmd):
|
|
includepath = os.path.normpath(os.path.join(os.path.split(path)[0], re.sub(r"\.svx$", "", args))) # normalises path syntax
|
|
if self.never_seen(includepath, path):
|
|
fullpath = os.path.join(settings.SURVEX_DATA, includepath + ".svx")
|
|
self.RunSurvexIfNeeded(os.path.join(settings.SURVEX_DATA, includepath), path)
|
|
if os.path.isfile(fullpath):
|
|
# --------------------------------------------------------
|
|
self.depthinclude += 1
|
|
# fininclude = open(fullpath,'r')
|
|
finincludename = fullpath
|
|
fcollate.write(f";|*include {includepath}\n")
|
|
flinear.write(f"{self.depthinclude:2} {indent} *include {includepath}\n")
|
|
push = includepath.lower()
|
|
self.includestack.append(push)
|
|
# -----------------
|
|
self.PushdownStackScan(survexblock, includepath, finincludename, flinear, fcollate)
|
|
# -----------------
|
|
pop = self.includestack.pop()
|
|
if pop != push:
|
|
message = "!! ERROR mismatch *include pop!=push {}".format(pop, push, self.includestack)
|
|
print(message)
|
|
print(message, file=flinear)
|
|
print(message, file=sys.stderr)
|
|
stash_data_issue(parser="survex", message=message, url=None, sb=(path))
|
|
flinear.write(f"{self.depthinclude:2} {indent} *edulcni {pop}\n")
|
|
fcollate.write(f";|*edulcni {pop}\n")
|
|
# fininclude.close()
|
|
self.depthinclude -= 1
|
|
# --------------------------------------------------------
|
|
else:
|
|
message = f" ! ERROR *include file '{includepath}' not found, listed in '{fin.name}'"
|
|
print(message)
|
|
print(message, file=sys.stderr)
|
|
stash_data_issue(parser="survex", message=message, url=None, sb=(path))
|
|
elif self.rx_begin2.match(cmd):
|
|
#elif re.match(r"(?i)begin$", cmd):
|
|
self.depthbegin += 1
|
|
depth = " " * self.depthbegin
|
|
if args:
|
|
pushargs = args
|
|
else:
|
|
pushargs = " "
|
|
self.stackbegin.append(pushargs.lower())
|
|
flinear.write(f" {self.depthbegin:2} {depth} *begin {args}\n")
|
|
pass
|
|
elif self.rx_end2.match(cmd):
|
|
# elif re.match(r"(?i)end$", cmd):
|
|
depth = " " * self.depthbegin
|
|
flinear.write(f" {self.depthbegin:2} {depth} *end {args}\n")
|
|
if not args:
|
|
args = " "
|
|
popargs = self.stackbegin.pop()
|
|
if popargs != args.lower():
|
|
message = (
|
|
f"!! ERROR mismatch in BEGIN/END labels pop!=push '{popargs}'!='{args}'\n{self.stackbegin}"
|
|
)
|
|
print(message)
|
|
print(message, file=flinear)
|
|
print(message, file=sys.stderr)
|
|
stash_data_issue(parser="survex", message=message, url=None, sb=(path))
|
|
|
|
self.depthbegin -= 1
|
|
pass
|
|
elif self.rx_title2.match(cmd):
|
|
# elif re.match(r"(?i)title$", cmd):
|
|
depth = " " * self.depthbegin
|
|
flinear.write(f" {self.depthbegin:2} {depth} *title {args}\n")
|
|
pass
|
|
|
|
indent = " " * self.depthinclude
|
|
sys.stderr.flush()
|
|
self.callcount += 1
|
|
|
|
if self.callcount % 10 == 0:
|
|
print(".", file=sys.stderr, end="")
|
|
if self.callcount % 500 == 0:
|
|
print("\n ", file=sys.stderr, end="")
|
|
|
|
if path in self.svxfileslist:
|
|
# We have already used os.normpath() so this is OK. "/../" and "//" have been simplified already.
|
|
message = f" * Warning. Duplicate detected. We have already seen this *include '{path}' from another survex file. Detected at callcount:{self.callcount} depth:{self.depthinclude}"
|
|
print(message)
|
|
print(message, file=flinear)
|
|
# print(message,file=sys.stderr)
|
|
stash_data_issue(parser="survex", message=message, url=None, sb=(path))
|
|
if self.svxfileslist.count(path) > 2:
|
|
message = f" ! ERROR. Should have been caught before this. Survex file already *included 2x. Probably an infinite loop so fix your *include statements that include this. Aborting. {path}"
|
|
print(message)
|
|
print(message, file=flinear)
|
|
print(message,file=sys.stderr)
|
|
stash_data_issue(parser="survex", message=message, url=None, sb=(path))
|
|
return
|
|
return
|
|
try:
|
|
# python generator idiom again. Not important here as these are small files
|
|
with open(finname, "r") as fin:
|
|
for svxline in fin:
|
|
process_line(svxline)
|
|
|
|
self.svxfileslist.append(path)
|
|
|
|
except UnicodeDecodeError:
|
|
# some bugger put an umlaut in a non-UTF survex file ?!
|
|
message = f" ! ERROR *include file '{path}' in '{survexblock}' has UnicodeDecodeError. Omitted."
|
|
print(message)
|
|
print(message, file=sys.stderr)
|
|
stash_data_issue(parser="survex", message=message, url=None, sb=(path))
|
|
return # skip this survex file and all things *included in it
|
|
except:
|
|
message = f" ! ERROR *include file '{path}' in '{survexblock}' has unexpected error on opening or reading file. OMITTED!"
|
|
print(message)
|
|
print(message, file=sys.stderr)
|
|
stash_data_issue(parser="survex", message=message, url=None, sb=(path))
|
|
raise
|
|
return # skip this survex file and all things *included in it
|
|
|
|
def never_seen(self, incpath, parent):
|
|
"""The _unseen files may include survex files we have already seen, and we do not
|
|
want to process them again. For the _unseens this is not an error, but for the main
|
|
*include tree it is an error.
|
|
"""
|
|
if incpath in self.uniquefile:
|
|
self.uniquefile[incpath].append(parent)
|
|
|
|
if self.svxpass == self.TREE:
|
|
message = (
|
|
f" DUP: skipping non-unique survex filepath, '{incpath}' - #{len(self.uniquefile[incpath])} '{self.uniquefile[incpath]}'"
|
|
)
|
|
print(message)
|
|
stash_data_issue(parser='survex', message=message)
|
|
for p in self.uniquefile[incpath]:
|
|
if p in self.uniquefile:
|
|
print(f"{p} <- {self.uniquefile[p]}")
|
|
return False
|
|
else:
|
|
self.uniquefile[incpath] = [parent]
|
|
return True
|
|
|
|
def RunSurvexIfNeeded(self, fullpath, calledpath):
|
|
now = time.time()
|
|
cav_t = now - 365 * 24 * 3600
|
|
log_t = now - 365 * 24 * 3600
|
|
svx_t = now - 365 * 24 * 3600
|
|
|
|
def runcavern():
|
|
"""regenerates the .3d file from the .svx if it is older than the svx file, or older than the software,
|
|
or randomly using chaosmonkey() just to keep things ticking over.
|
|
"""
|
|
try:
|
|
print(
|
|
f" - Regenerating stale (or chaos-monkeyed) cavern .log and .3d for '{fullpath}'\n at '{logpath}'\n"
|
|
)
|
|
print(
|
|
f"days svx old: {(svx_t - log_t)/(24*3600):.1f} cav:{(cav_t - log_t)/(24*3600):.1f} log old: { (now - log_t)/(24*3600):.1f}"
|
|
)
|
|
|
|
outputdir = Path(str(f"{fullpath}.svx")).parent
|
|
sp = subprocess.run(
|
|
[settings.CAVERN, "--log", f"--output={outputdir}", f"{fullpath}.svx"],
|
|
capture_output=True,
|
|
check=False,
|
|
text=True,
|
|
)
|
|
if sp.returncode != 0:
|
|
message = f" ! Error when running {settings.CAVERN}: {fullpath}"
|
|
url = f"/survexfile{fullpath}.svx".replace(str(settings.SURVEX_DATA), "")
|
|
stash_data_issue(parser="survex", message=message, url=url)
|
|
print(message)
|
|
print(
|
|
"stderr:\n\n" + str(sp.stderr) + "\n\n" + str(sp.stdout) + "\n\nreturn code: " + str(sp.returncode)
|
|
)
|
|
self.caverncount += 1
|
|
|
|
# should also collect all the .err files too and create a DataIssue for each one which
|
|
# - is nonzero in size AND
|
|
# - has Error greater than 5% anywhere, or some other more serious error
|
|
|
|
errpath = Path(fullpath + ".err")
|
|
if errpath.is_file():
|
|
if errpath.stat().st_size == 0:
|
|
errpath.unlink() # delete empty closure error file
|
|
except:
|
|
message = f' ! FAIL running cavern on survex file "{fullpath}" specified in *include in {calledpath} '
|
|
stash_data_issue(parser="survex", message=message)
|
|
print(message)
|
|
|
|
svxpath = Path(fullpath + ".svx")
|
|
logpath = Path(fullpath + ".log")
|
|
Path(svxpath).parent
|
|
|
|
if not svxpath.is_file():
|
|
message = f' ! BAD. "{fullpath}" is not a file, specified in *include in {calledpath} '
|
|
stash_data_issue(parser="survex", message=message)
|
|
print(message)
|
|
return
|
|
|
|
if not logpath.is_file(): # always run if logfile not there
|
|
runcavern()
|
|
return
|
|
|
|
self.caverndate = now - 2 * 365 * 24 * 3600
|
|
|
|
if not self.caverndate:
|
|
sp = subprocess.run(["which", f"{settings.CAVERN}"], capture_output=True, check=False, text=True)
|
|
if sp.returncode != 0:
|
|
message = f' ! Error running "which" on {settings.CAVERN}'
|
|
stash_data_issue(parser="survex", message=message)
|
|
print(message)
|
|
print(
|
|
"stderr:\n\n" + str(sp.stderr) + "\n\n" + str(sp.stdout) + "\n\nreturn code: " + str(sp.returncode)
|
|
)
|
|
self.caverndate = os.path.getmtime(sp.stdout.strip())
|
|
else:
|
|
self.caverndate = now - 2 * 365 * 24 * 3600
|
|
cav_t = self.caverndate
|
|
log_t = os.path.getmtime(logpath)
|
|
svx_t = os.path.getmtime(svxpath)
|
|
now = time.time()
|
|
|
|
if svx_t - log_t > 0: # stale, svx file is newer than log
|
|
runcavern()
|
|
return
|
|
if now - log_t > 60 * 24 * 60 * 60: # >60 days, re-run anyway
|
|
runcavern()
|
|
return
|
|
if cav_t - log_t > 0: # new version of cavern
|
|
runcavern()
|
|
return
|
|
if chaosmonkey(350): # one in every 350 runs
|
|
runcavern()
|
|
|
|
|
|
def FindAndLoadSurvex():
|
|
"""Follows the *include links successively to find survex files
|
|
This proceeds in 3 phases:
|
|
1. The root survex file is read and all the *include files are found, using PushdownStackScan()
|
|
2. All the other survex files in the :loser: repo are found, and their *includes found,
|
|
using another PushdownStackScan() [duplicates omitted]
|
|
3. The combined expanded file containing all the survex data is parsed as a single file,
|
|
using LinearLoad()
|
|
"""
|
|
|
|
def make_survexblockroot():
|
|
survexfileroot = SurvexFile(
|
|
path=settings.SURVEX_TOPNAME, cave=None)
|
|
survexfileroot.save()
|
|
survexblockroot = SurvexBlock(
|
|
name=ROOTBLOCK, survexfile=survexfileroot, legsall=0, legslength=0.0)
|
|
# crashes here sometimes on MariaDB complaining that cave_id should not be null. But it should be.
|
|
# django.db.utils.IntegrityError: (1048, "Column 'cave_id' cannot be null")
|
|
# fix by restarting db on server
|
|
# sudo service mariadb stop
|
|
# sudo service mariadb start
|
|
survexblockroot.save()
|
|
return survexblockroot
|
|
|
|
print(" - redirecting stdout to svxblks.log...")
|
|
stdout_orig = sys.stdout
|
|
# Redirect sys.stdout to the file
|
|
sys.stdout = open("svxblks.log", "w")
|
|
|
|
print(f" - Scanning Survex Blocks tree from {settings.SURVEX_TOPNAME}.svx ...", file=sys.stderr)
|
|
survexblockroot = make_survexblockroot()
|
|
survexfileroot = survexblockroot.survexfile # i.e. SURVEX_TOPNAME only
|
|
collatefilename = "_" + survexfileroot.path + ".svx"
|
|
|
|
svx_scan = LoadingSurvex()
|
|
svx_scan.callcount = 0
|
|
svx_scan.depthinclude = 0
|
|
fullpathtotop = str(Path(survexfileroot.path).parent / survexfileroot.path)
|
|
|
|
print(f" - RunSurvexIfNeeded cavern on '{fullpathtotop}'", file=sys.stderr)
|
|
svx_scan.RunSurvexIfNeeded(fullpathtotop, fullpathtotop)
|
|
svx_scan.uniquefile[str(survexfileroot)] = ["0"]
|
|
|
|
indent = ""
|
|
fcollate = open(collatefilename, "w")
|
|
|
|
mem0 = get_process_memory()
|
|
print(f" - MEM:{mem0:7.2f} MB START '{survexfileroot}'", file=sys.stderr)
|
|
flinear = open("svxlinear.log", "w")
|
|
flinear.write(f" - MEM:{mem0:7.2f} MB START '{survexfileroot.path}'\n")
|
|
print(" ", file=sys.stderr, end="")
|
|
|
|
finrootname = Path(settings.SURVEX_DATA, survexfileroot.path + ".svx")
|
|
fcollate.write(f";*include {survexfileroot.path}\n")
|
|
flinear.write(f"{svx_scan.depthinclude:2} {indent} *include {survexfileroot.path}\n")
|
|
|
|
import cProfile
|
|
import pstats
|
|
from pstats import SortKey
|
|
|
|
pr = cProfile.Profile()
|
|
pr.enable()
|
|
svx_scan.svxpass = svx_scan.TREE
|
|
# ----------------------------------------------------------------
|
|
svx_scan.PushdownStackScan(survexblockroot, survexfileroot.path, finrootname, flinear, fcollate)
|
|
# ----------------------------------------------------------------
|
|
svx_scan.svxpass = ""
|
|
pr.disable()
|
|
with open("PushdownStackScan.prof", "w") as f:
|
|
ps = pstats.Stats(pr, stream=f)
|
|
ps.sort_stats(SortKey.CUMULATIVE)
|
|
ps.print_stats()
|
|
|
|
flinear.write(f"{svx_scan.depthinclude:2} {indent} *edulcni {survexfileroot.path}\n")
|
|
fcollate.write(f";*edulcni {survexfileroot.path}\n")
|
|
mem1 = get_process_memory()
|
|
flinear.write(f"\n - MEM:{mem1:.2f} MB STOP {survexfileroot.path}\n")
|
|
flinear.write(f" - MEM:{mem1 - mem0:.3f} MB ADDITIONALLY USED\n")
|
|
flinear.write(f" - {len(svx_scan.svxfileslist):,} survex files in linear include list \n")
|
|
flinear.write(f" - {len(svx_scan.uniquefile):,} unique survex files in linear include list \n")
|
|
for j in svx_scan.svxfileslist:
|
|
if j not in svx_scan.uniquefile:
|
|
flinear.write(f" - '{j}' {type(j)} not in unique list \n")
|
|
for f in svx_scan.uniquefile:
|
|
# flinear.write(f" - '{f}' {type(f)} {svx_scan.uniquefile[f]} \n")
|
|
if len(svx_scan.uniquefile[f]) > 1:
|
|
flinear.write(f" - '{f}' {type(f)} {svx_scan.uniquefile[f]} dup survex files \n")
|
|
|
|
print(f"\n - {svx_scan.caverncount:,} runs of survex 'cavern' refreshing .3d files", file=sys.stderr)
|
|
print(f" - {len(svx_scan.svxfileslist):,} survex files from tree in linear include list", file=sys.stderr)
|
|
print(f" - {len(svx_scan.uniquefile):,} unique survex files from tree in linear include list", file=sys.stderr)
|
|
mem1 = get_process_memory()
|
|
print(f" - MEM:{mem1:7.2f} MB END ", file=sys.stderr)
|
|
print(f" - MEM:{mem1 - mem0:7.3f} MB ADDITIONALLY USED", file=sys.stderr)
|
|
|
|
#
|
|
# Process all the omitted files in :loser: with some exceptions
|
|
unseens = set()
|
|
b = []
|
|
|
|
for p in Path(settings.SURVEX_DATA).rglob("*.svx"):
|
|
if p.is_file():
|
|
po = p.relative_to(Path(settings.SURVEX_DATA))
|
|
pox = po.with_suffix("")
|
|
if str(pox) not in svx_scan.svxfileslist:
|
|
# print(f"[{pox}]", file=sys.stderr)
|
|
unseens.add(pox)
|
|
else:
|
|
b.append(pox)
|
|
|
|
if len(b) != len(svx_scan.svxfileslist):
|
|
print(
|
|
f" ! Mismatch. {len(b)} survex files found which should be {len(svx_scan.svxfileslist)} in main tree)",
|
|
file=sys.stderr,
|
|
)
|
|
|
|
# These exceptions WILL nevertheless be parsed if the are *included by any file which is not excepted
|
|
unseensroot = re.sub(r"\.svx$", "", UNSEENS)
|
|
excpts = EXCEPTPREFIX
|
|
excpts.append(unseensroot)
|
|
removals = set()
|
|
for x in unseens:
|
|
for o in excpts:
|
|
if str(x).strip().startswith(o):
|
|
removals.add(x)
|
|
# special fix for .svx file not actually in survex format
|
|
unseens.remove(Path("fixedpts/gps/gps00raw"))
|
|
|
|
for x in removals:
|
|
unseens.remove(x)
|
|
print(f" x NOT parsing {x}")
|
|
print(
|
|
f"\n - {len(unseens)} survex files found which were not included in main tree. ({len(svx_scan.svxfileslist)} in main tree)",
|
|
file=sys.stderr,
|
|
)
|
|
print(f" -- (but ignoring {len(removals)} of them)", file=sys.stderr)
|
|
|
|
s_date = date.today().isoformat().replace('-','.')
|
|
print(f" -- Now loading the previously-omitted survex files as {UNSEENS} *date {s_date}", file=sys.stderr)
|
|
print(f" - (except: {excpts})", file=sys.stderr)
|
|
|
|
with open(Path(settings.SURVEX_DATA, UNSEENS), "w") as u:
|
|
u.write(
|
|
f"; {len(unseens):,} survex files not *included by {settings.SURVEX_TOPNAME} (which are {len(svx_scan.svxfileslist):,} files)\n"
|
|
)
|
|
u.write(f"; autogenerated by parser/survex.py from databasereset.py on '{datetime.now(timezone.utc)}'\n")
|
|
u.write(f"; omitting any file beginning with {excpts}\n\n")
|
|
u.write("*begin troggle_unseens\n")
|
|
u.write("*team something Nobody\n")
|
|
u.write(f"*date {s_date}\n")
|
|
u.write("*title \"Collated unseen and unlinked survex files\"\n")
|
|
for x in sorted(unseens):
|
|
u.write(f" *include {x}\n")
|
|
u.write("*end troggle_unseens\n")
|
|
|
|
survexfileroot = survexblockroot.survexfile # i.e. SURVEX_TOPNAME only
|
|
|
|
omit_scan = LoadingSurvex()
|
|
omit_scan.callcount = 0
|
|
omit_scan.depthinclude = 0
|
|
fullpathtotop = str(Path(settings.SURVEX_DATA, UNSEENS))
|
|
|
|
# copy the lists to prime the next pass through the files
|
|
omit_scan.svxfileslist = svx_scan.svxfileslist[:]
|
|
|
|
svx_scan.svxfileslist = [] # free memory
|
|
svx_scan = None # Hmm. Does this actually delete all the instance variables if they are lists, dicts etc.?
|
|
|
|
omit_scan.uniquefile[unseensroot] = ["0"]
|
|
|
|
mem0 = get_process_memory()
|
|
print(f" - MEM:{mem0:7.2f} MB START '{unseensroot}'", file=sys.stderr)
|
|
# flinear = open('svxlinear.log', 'w')
|
|
flinear.write(f" - MEM:{mem0:7.2f} MB START '{unseensroot}'\n")
|
|
print(" ", file=sys.stderr, end="")
|
|
|
|
# this is a bit tricky as some unseen files will *include files we have already seen, which
|
|
# we should not process again.
|
|
finrootname = fullpathtotop
|
|
fcollate.write(f";*include {UNSEENS}\n")
|
|
flinear.write(f"{omit_scan.depthinclude:2} {indent} *include {unseensroot}\n")
|
|
omit_scan.svxpass = omit_scan.ODDS
|
|
# ----------------------------------------------------------------
|
|
omit_scan.PushdownStackScan(survexblockroot, unseensroot, finrootname, flinear, fcollate)
|
|
# ----------------------------------------------------------------
|
|
omit_scan.svxpass = ""
|
|
|
|
flinear.write(f"{omit_scan.depthinclude:2} {indent} *edulcni {unseensroot}\n")
|
|
fcollate.write(f";*edulcni {UNSEENS}\n")
|
|
|
|
mem1 = get_process_memory()
|
|
flinear.write(f"\n - MEM:{mem1:.2f} MB STOP {UNSEENS} Unseen Oddments\n")
|
|
flinear.write(f" - MEM:{mem1 - mem0:.3f} MB ADDITIONALLY USED Unseen Oddments\n")
|
|
flinear.write(f" - {len(omit_scan.svxfileslist):,} survex files in linear include list Unseen Oddments \n")
|
|
|
|
flinear.close()
|
|
fcollate.close()
|
|
|
|
print(
|
|
f"\n - {omit_scan.caverncount:,} runs of survex 'cavern' refreshing .3d files in the unseen list",
|
|
file=sys.stderr,
|
|
)
|
|
|
|
print(
|
|
f" - {len(omit_scan.svxfileslist):,} survex files in linear include list including previously unseen ones \n",
|
|
file=sys.stderr,
|
|
)
|
|
omit_scan = None # Hmm. Does this actually delete all the instance variables if they are lists, dicts etc.?
|
|
|
|
mem1 = get_process_memory()
|
|
print(f" - MEM:{mem1:7.2f} MB END ", file=sys.stderr)
|
|
print(f" - MEM:{mem1 - mem0:7.3f} MB ADDITIONALLY USED", file=sys.stderr)
|
|
|
|
# Before doing this, it would be good to identify the *equate and *entrance we need that are relevant to the
|
|
# entrance locations currently loaded after this by LoadPos(), but could better be done before ?
|
|
# look in MapLocations() for how we find the entrances
|
|
|
|
print("\n - Loading All Survex Blocks (LinearLoad)", file=sys.stderr)
|
|
svx_load = LoadingSurvex()
|
|
|
|
# pr2 = cProfile.Profile()
|
|
# pr2.enable()
|
|
print(" ", file=sys.stderr, end="")
|
|
# ----------------------------------------------------------------
|
|
svx_load.LinearLoad(survexblockroot, survexfileroot.path, collatefilename)
|
|
# ----------------------------------------------------------------
|
|
# pr2.disable()
|
|
# with open('LinearLoad.prof', 'w') as f:
|
|
# ps = pstats.Stats(pr2, stream=f)
|
|
# ps.sort_stats(SortKey.CUMULATIVE)
|
|
# ps.print_stats()
|
|
|
|
mem1 = get_process_memory()
|
|
print(f"\n - MEM:{mem1:7.2f} MB STOP", file=sys.stderr)
|
|
print(f" - MEM:{mem1 - mem0:7.3f} MB ADDITIONALLY USED", file=sys.stderr)
|
|
|
|
# Close the logging file, Restore sys.stdout to our old saved file handle
|
|
sys.stdout.close()
|
|
print("+", file=sys.stderr)
|
|
sys.stderr.flush()
|
|
sys.stdout = stdout_orig
|
|
|
|
legsnumber = svx_load.legsnumber
|
|
fixnumber = len(svx_load.fixes)
|
|
mem1 = get_process_memory()
|
|
|
|
print(f" - Number of SurvexDirectories: {len(svx_load.svxprim):,}")
|
|
tf = SurvexFile.objects.all().count() - len(removals)
|
|
print(f" - Number of SurvexFiles: {tf:,}")
|
|
print(f" - Number of Survex *fix: {fixnumber:,}")
|
|
print(f" - Number of Survex legs: {legsnumber:,}")
|
|
|
|
for f in svx_load.fixes:
|
|
survexblock, name, altitude, comment = svx_load.fixes[f]
|
|
s = survexblock
|
|
spath = s.parent.survexfile
|
|
# sprevious = None
|
|
# while s.parent != sprevious:
|
|
# spath += str(s.parent) + ":" + spath
|
|
# sprevious = s
|
|
# if not s.parent:
|
|
# break
|
|
# s = s.parent
|
|
ff = survexblock.survexfile
|
|
if comment:
|
|
# print(f"FIX {survexblock} {altitude} {comment}")
|
|
if re.match(r"(?i)[^s]*srtm[\s\S]*", comment.lower()):
|
|
print(f"SRTM {ff}.svx::{survexblock} - {spath}.svx - alt={altitude} '{comment}'")
|
|
if re.match(r"(?i)[^s]*radost[\s\S]*", comment.lower()):
|
|
print(f"RDST {ff}.svx::{survexblock} - {spath}.svx - alt={altitude} '{comment}'")
|
|
|
|
svx_load = None
|
|
|
|
|
|
|
|
return legsnumber
|
|
|
|
def display_contents(blocks):
|
|
for b in blocks:
|
|
print(f"B {b} {b.parent=} {b.expedition=}")
|
|
sfs = SurvexFile.objects.filter(survexblock=b)
|
|
for sf in sfs:
|
|
print(f" SF {sf}")
|
|
# print(f" SD {sf.survexdirectory} {sf.survexdirectory.cave}")
|
|
# print(f" SD {sf.survexdirectory} {sf.survexdirectory.path}")
|
|
|
|
ws = Wallet.objects.filter(survexblock=b)
|
|
for w in ws:
|
|
print(f" W {w}")
|
|
sfs = QM.objects.filter(block=b)
|
|
for sf in sfs:
|
|
print(f" QM {sf}")
|
|
sfs = SurvexStation.objects.filter(block=b)
|
|
for sf in sfs:
|
|
print(f" SS {sf}")
|
|
|
|
def parse_one_file(fpath): # --------------------------------------in progress-------------------
|
|
"""Parse just one file. Use when re-loading after editing.
|
|
|
|
NOTE: *include lines are ignored.
|
|
But this is fine, they will already be in the system, UNLESS a new *include line is edited in
|
|
without also opening that file in the online editor.
|
|
|
|
In the initial file parsing in databaseReset, the *include expansion is done
|
|
in an earlier stange than LinearLoad(). By the time LinearLoad() is called,
|
|
all the *include expansion has happened.
|
|
|
|
There are two cases:
|
|
1. the path is for an existing cave 1626-359:
|
|
svxpath = 'caves-1626/359/new_passage'
|
|
or
|
|
svxpath = 'caves-1623/161/triassic/new_passage'
|
|
or possibly
|
|
svxpath = 'caves-1623/161/new_series/new_passage'
|
|
|
|
|
|
2. the path is for an entirely new cave which does not exist '2030-BL-99'
|
|
svxpath = 'caves-1626/2030-BL-99/first_explore'
|
|
|
|
This creates its own LoadingSurvex() class instance called svx_load
|
|
"""
|
|
debugprint = True
|
|
def find_cave_from_path(svxpath):
|
|
""" Seems simple enough.. but needs refactoring with Class method IdentifyCave()
|
|
This will normally be called from MakeRoot only when creating a new survex file
|
|
from inside the survexfile editor page by overwriting the URL in the
|
|
browser bar.
|
|
"""
|
|
cavelist = GetCaveLookup()
|
|
rx_cavepath = re.compile(r"(?i)caves-(\d\d\d\d)/([-\d\w]+|\d\d\d\d-?\w+-\d+)/?.*")
|
|
print(f"find_cave_from_path({svxpath})")
|
|
path_match = rx_cavepath.search(svxpath)
|
|
if path_match:
|
|
area = path_match.group(1)
|
|
caveid = path_match.group(2)
|
|
caveslug = f"{area}-{caveid}".lower() # GetCaveLookup is all UPPER() and all lower() but not mixed
|
|
|
|
print(f"find_cave_from_path({svxpath}): {caveslug=}")
|
|
if caveslug.lower() in cavelist:
|
|
print(f"find_cave_from_path({svxpath}): {caveslug=} YES {cavelist[caveslug.lower()]=}")
|
|
return cavelist[caveslug.lower()]
|
|
else:
|
|
print(f"find_cave_from_path({svxpath}) FAIL with correct format, trying with short-form...")
|
|
rx_alias = re.compile(r"(?i)([-\d\w]+|\d\d\d\d-?\w+-\d+)/?.*")
|
|
print(f"find_cave_from_path({svxpath}) attempting short form alias")
|
|
path_match = rx_alias.search(svxpath)
|
|
if path_match:
|
|
caveid = path_match.group(1)
|
|
print(f"find_cave_from_path({svxpath}): {caveid=}")
|
|
if caveid.lower() in cavelist:
|
|
print(f"find_cave_from_path({svxpath}): {caveid=} YES {cavelist[caveid.lower()]=}")
|
|
return cavelist[caveid.lower()]
|
|
|
|
def make_fileroot(svxpath):
|
|
"""Returns a SurvexFile, not a file_object.path
|
|
Used by the online survex file editor when re-parsing
|
|
or tries to find the primary survex file for this cave
|
|
"""
|
|
cave = find_cave_from_path(svxpath)
|
|
if cave:
|
|
cave_svxpath = cave.survex_file[:-4] # remove .svx
|
|
fileroot = SurvexFile.objects.get(path=cave_svxpath)
|
|
print(f" - Setting the root survexfile for this import: {svxpath} to be that for cave {cave}")
|
|
return fileroot
|
|
|
|
# make a dummy SurvexFile object, which will be removed later
|
|
dummyroot = SurvexFile(path=svxpath)
|
|
dummyroot.save()
|
|
print(f" - Making/finding a new dummy root survexfile for this import: {svxpath}")
|
|
print(f" - new fileroot {type(dummyroot)} for {svxpath} with cave {cave}\n - {dummyroot.primary=} {dummyroot.path=} {dummyroot.cave=} ")
|
|
return dummyroot
|
|
|
|
|
|
|
|
def parse_new_svx(fpath, svx_load, svxfileroot=None):
|
|
"""We need a dummy survex block which has the survexfile being parsed
|
|
as its .survexfile field. But it is used in two ways, it is also
|
|
set as the parent block for the new blocks being created. This has to be fixed
|
|
later.
|
|
This all needs refactoring.
|
|
|
|
We also need to re-plumb the fileroot after importing, so that
|
|
the new survexfile appears in the survexdirectory lists?
|
|
|
|
Rather than just arbitrarily creating something,
|
|
we should see if this is a known cave first. If it isn't dont' bother
|
|
as it might be a fixedpts survex file not a cave survex file
|
|
|
|
"""
|
|
if svxfileroot == None:
|
|
# Not seen this survexfile before, so it does not exist
|
|
# but we don't create it yet..
|
|
svxfileroot = make_fileroot(fpath)
|
|
|
|
print(f"## parse_new_svx(): {svxfileroot=} from {fpath}")
|
|
# It is vital that the block has attached the survexfile object which is being parsed.
|
|
block_dummy = SurvexBlock( name="",
|
|
survexfile=svxfileroot, legsall=0, legslength=0.0
|
|
)
|
|
|
|
block_dummy.name=f"#{block_dummy.id}_{str(Path(str(svxfileroot)))}",
|
|
#svxfileroot.save()
|
|
block_dummy.save()
|
|
print(f" - block_dummy now '{block_dummy}' {type(block_dummy)} id={block_dummy.id} f:{block_dummy.survexfile}\n -- {block_dummy.name=}")
|
|
|
|
# ----------------------------------------------------------------
|
|
svx_load.LoadSurvexFile(fpath) # otherwise only called for *include files
|
|
svx_load.LinearLoad(block_dummy, svxfileroot.path, fname)
|
|
# ----------------------------------------------------------------
|
|
|
|
# Now we don't need or want the dummy any more
|
|
|
|
block_dummy.delete()
|
|
|
|
global svx_load
|
|
print(f"\n - Loading One Survex file '{fpath}'", file=sys.stderr)
|
|
svx_load = LoadingSurvex()
|
|
|
|
fname = Path(settings.SURVEX_DATA, (fpath + ".svx"))
|
|
|
|
svxs = SurvexFile.objects.filter(path=fpath)
|
|
if svxs:
|
|
if len(svxs)>1:
|
|
print(f" ! Mistake? More than one survex file object in database with the same file-path {svxs}")
|
|
print(f" - Aborting file parsing & import into database.")
|
|
return False
|
|
print(f" - Pre-existing survexfile {svxs}.")
|
|
existingsvx = SurvexFile.objects.get(path=fpath)
|
|
existingcave = existingsvx.cave
|
|
print(f" - survexfile id={existingsvx.id} {existingsvx} {existingcave}")
|
|
|
|
sbs = existingsvx.survexblock_set.all()
|
|
existingparent = None
|
|
parents =set()
|
|
if sbs:
|
|
for sb in sbs:
|
|
# print(f" - {sb.id} checking survex block {sb=}")
|
|
try:
|
|
if sb.parent:
|
|
parents.add(sb.parent)
|
|
# print(f" - adding {sb.parent=}")
|
|
except:
|
|
print(f" ! FAILURE to access sb.parent {sb=}\n ! {sb.parent_id=} ")# \n{dir(sb)}
|
|
# even though the parent_id exists.. hmm.
|
|
for sb in sbs:
|
|
# print(f" - {sb.id} {sb.pk} {sb}")
|
|
sb_keep = sb
|
|
if sb not in parents:
|
|
# print(f" - {sb.id} Deleting survex block {sb=}")
|
|
sb.delete()
|
|
|
|
if parents:
|
|
# print(f" - parents get {parents}")
|
|
if len(parents) > 1:
|
|
print(f" - WARNING more than one parent survex block!")
|
|
existingparent = parents.pop() # removes it
|
|
parents.add(existingparent) # restores it
|
|
|
|
print(f" - Reloading and parsing this survexfile '{fpath}' Loading...")
|
|
# Logic is that we need an SB which links to the survexfile we are parsing for the parser
|
|
# to work, but we delete all those before we start parsing. Urk.
|
|
#===========
|
|
parse_new_svx(fpath, svx_load, svxfileroot=existingsvx)
|
|
#===========
|
|
|
|
print(f" - survexfile id={existingsvx.id} update ")
|
|
|
|
if parents:
|
|
print(f" - parents set {parents}")
|
|
sbs = existingsvx.survexblock_set.all()
|
|
if len(sbs)<1:
|
|
print(f" ! No survex blocks found. Parser failure...")
|
|
for sb in sbs:
|
|
print(f" - {sb.id} re-setting survex block parent {sb=}")
|
|
sb.parent = existingparent # should be all the same
|
|
sb.save()
|
|
|
|
else:
|
|
print(f" - Not seen this survexfile before '{fpath}' Loading. ..")
|
|
#===========
|
|
parse_new_svx(fpath,svx_load)
|
|
#===========
|
|
|
|
svx_load = None
|
|
return True
|
|
|
|
|
|
|
|
def set_survexblocks():
|
|
"""This sets the links directly to each survexblock.
|
|
NB ARGE inserts survexblocks from several different expeditions into one survexfile
|
|
Also a survex block e.g. "1623" will include lots of different expeditons
|
|
|
|
from models/survex.py:
|
|
|
|
name = models.CharField(max_length=100) ..often blank, no #NoName..
|
|
date = models.DateField( ..inherited
|
|
title = models.CharField(max_length=200) ..inherited
|
|
parent = models.ForeignKey("SurvexBlock"
|
|
|
|
expedition = models.ForeignKey("Expedition",
|
|
survexfile = models.ForeignKey("SurvexFile",
|
|
scanswallet = models.ForeignKey("Wallet",
|
|
|
|
legsall = models.IntegerField(null=True)
|
|
legslength = models.FloatField(null=True)
|
|
"""
|
|
|
|
# Need to find the optimal Django way of doing this query.
|
|
# It's a mess now
|
|
# for b in SurvexBlock.objects.all():
|
|
# if not b.date:
|
|
# print(f" Block {b} on {b.survexfile} HAS NULL DATE ")
|
|
|
|
cache = {}
|
|
allsvx = SurvexFile.objects.all()
|
|
for s in allsvx:
|
|
if s.path:
|
|
cache[s.path] = s
|
|
|
|
wallets = Wallet.objects.all()
|
|
for wallet in wallets:
|
|
|
|
if svxfiles := wallet.survexfiles(): # reads from JSON, should be cached already
|
|
for svx in svxfiles:
|
|
if svx:
|
|
if svx.endswith(".svx"):
|
|
svx = svx.replace(".svx","")
|
|
if svx in cache:
|
|
sfile = cache[svx]
|
|
# try:
|
|
# # there are survex files we ignore when troggle parses, and some of these are referred to in wallets
|
|
# sfile = SurvexFile.objects.get(path=svx) #.select_related("survexblocks")
|
|
# # print(sfile)
|
|
# except:
|
|
# continue
|
|
blocks = SurvexBlock.objects.filter(survexfile=sfile)
|
|
for b in blocks:
|
|
if b.date: # many are NULL, e.g. ARGE, so have no wallet
|
|
if b.scanswallet == wallet:
|
|
pass
|
|
elif b.scanswallet:
|
|
if b.date > date(2024, 1, 1) and b.date < date(2025, 1, 1):
|
|
print(f"2024-check: not set{wallet} on {b.survexfile} for block {b} as set explicitly to {b.scanswallet}")
|
|
else:
|
|
b.scanswallet = wallet
|
|
b.save()
|
|
if b.date > date(2024, 1, 1) and b.date < date(2025, 1, 1):
|
|
print(f"2024-check: setting {wallet} on {b.survexfile} for block {b}")
|
|
|
|
def survexifywallets():
|
|
"""Gets the caves from the list of survexblocks
|
|
|
|
We seem to hve a LOT of blocks with no atatched scnaswallet. Is this because we are
|
|
not inheriting *ref properly in the survexfile ?
|
|
"""
|
|
print(f" - Update wallets with survex data")
|
|
|
|
start = time.time()
|
|
# if there is a wallet for a block, add the people to the wallet
|
|
sprsall = SurvexPersonRole.objects.all().select_related("person").select_related("survexblock")
|
|
for spr in sprsall:
|
|
w = spr.survexblock.scanswallet
|
|
if w:
|
|
w.persons.add(spr.person)
|
|
|
|
duration = time.time() - start
|
|
print(f" - {duration:7.2f} s to add people to wallets ", file=sys.stderr)
|
|
start = time.time()
|
|
|
|
|
|
set_survexblocks() # reads JSON, sets survexblocks if survexfiles specified on wallet JSON
|
|
|
|
duration = time.time() - start
|
|
print(f" - {duration:7.2f} s to set survexblock:wallet using JSON survexfiles ", file=sys.stderr)
|
|
start = time.time()
|
|
|
|
for w in Wallet.objects.all():
|
|
blocks = SurvexBlock.objects.filter(scanswallet=w).select_related("survexfile")
|
|
for b in blocks:
|
|
if b.survexfile.cave:
|
|
w.caves.add(b.survexfile.cave)
|
|
w.save()
|
|
|
|
duration = time.time() - start
|
|
print(f" - {duration:7.2f} s to add caves to wallets ", file=sys.stderr)
|
|
start = time.time()
|
|
|
|
|
|
# Find the survex blocks which are 'ours' i.e. ignore all those (ARGE etc) without expo people attached.
|
|
cuccblocks = set()
|
|
for spr in SurvexPersonRole.objects.all():
|
|
cuccblocks.add(spr.survexblock)
|
|
|
|
# Because we have just run set_survexblocks(w), this should only complain if there is no *ref and no wallet that links to its parent file
|
|
sentinelbad = Wallet.objects.get(walletname="1983#00")
|
|
for b in cuccblocks:
|
|
if b.date > date(2001, 1, 1): # do we care about older ones? 1999 certainly has different wallet system
|
|
if not b.scanswallet:
|
|
if b.parent.scanswallet:
|
|
if b.parent.scanswallet != sentinelbad:
|
|
b.scanswallet = b.parent.scanswallet
|
|
continue
|
|
message = f" ! *REF missing {b.date} {b.survexfile}.svx : '{b}'" # msg appears if a *ref "something in quotes" actually does exist.
|
|
if b.date > date(2019, 1, 1) and b.date < date(2020, 1, 1):
|
|
print(message, file=sys.stderr)
|
|
url = get_offending_filename(b.survexfile.path)
|
|
DataIssue.objects.update_or_create(parser="ref", message=message, url=url)
|
|
|
|
duration = time.time() - start
|
|
print(f" - {duration:7.2f} s to check missing *ref on survexblocks ", file=sys.stderr)
|
|
start = time.time()
|
|
|
|
|
|
def LoadSurvexBlocks():
|
|
global dup_includes
|
|
mem1 = get_process_memory()
|
|
print(f" - MEM:{mem1:7.2f} MB now ", file=sys.stderr)
|
|
start = time.time()
|
|
|
|
|
|
print(" - Flushing All Survex Blocks...")
|
|
# why does this increase memory use by 20 MB ?!
|
|
# We have foreign keys, Django needs to load the related objects
|
|
# in order to resolve how the relation should handle the deletion:
|
|
# https://docs.djangoproject.com/en/dev/ref/models/fields/#django.db.models.ForeignKey.on_delete
|
|
SurvexBlock.objects.all().delete()
|
|
SurvexFile.objects.all().delete()
|
|
SurvexPersonRole.objects.all().delete()
|
|
SurvexStation.objects.all().delete()
|
|
mem1 = get_process_memory()
|
|
print(f" - MEM:{mem1:7.2f} MB now. Foreign key objects loaded on deletion. ", file=sys.stderr)
|
|
|
|
print(" - Flushing survex Data Issues ")
|
|
global dataissues
|
|
dataissues = []
|
|
DataIssue.objects.filter(parser="survex").delete()
|
|
DataIssue.objects.filter(parser="xSvxDate").delete()
|
|
DataIssue.objects.filter(parser="survexleg").delete()
|
|
DataIssue.objects.filter(parser="survexunits").delete()
|
|
DataIssue.objects.filter(parser="survex team").delete()
|
|
DataIssue.objects.filter(parser="ref").delete()
|
|
# DataIssue.objects.filter(parser="xEntrances").delete()
|
|
print(" - survex Data Issues flushed")
|
|
mem1 = get_process_memory()
|
|
print(f" - MEM:{mem1:7.2f} MB now ", file=sys.stderr)
|
|
print(" - Loading Survex Blocks...")
|
|
memstart = get_process_memory()
|
|
# ----------------------------------------------------------------
|
|
FindAndLoadSurvex()
|
|
# ----------------------------------------------------------------
|
|
memend = get_process_memory()
|
|
print(f" - MEMORY start:{memstart:.3f} MB end:{memend:.3f} MB increase={memend - memstart:.3f} MB")
|
|
|
|
global person_pending_cache
|
|
for sb in person_pending_cache:
|
|
if len(person_pending_cache[sb]) > 0:
|
|
print(f" ")
|
|
message = f" ! PENDING team list not emptied {sb.survexfile.path} {len(person_pending_cache[sb])} people: {person_pending_cache[sb]}"
|
|
stash_data_issue(parser="survex", message=message, url=None, sb=(sb.survexfile.path))
|
|
print(message)
|
|
# duration = time.time() - start
|
|
# print(f" - TIME: {duration:7.2f} s", file=sys.stderr)
|
|
store_data_issues()
|
|
# duration = time.time() - start
|
|
# print(f" - TIME: {duration:7.2f} s", file=sys.stderr)
|
|
if dup_includes > 0:
|
|
print(f" - ERROR: There are {dup_includes} duplicate *includes in the final list. See DataIssues report.")
|
|
print(" - Loaded All Survex Blocks.")
|
|
|
|
|
|
|