import sys
import os
import re
import time
import copy
import subprocess

from pathlib import Path
from datetime import datetime, timedelta, date, timezone

from django.utils.timezone import get_current_timezone
from django.utils.timezone import make_aware

import troggle.settings as settings
from troggle.core.models.caves import Cave, Entrance, QM, LogbookEntry
from troggle.core.utils import get_process_memory, chaosmonkey
from troggle.parsers.people import GetPersonExpeditionNameLookup, known_foreigner
from troggle.parsers.logbooks import GetCaveLookup
from troggle.core.models.troggle import DataIssue, Expedition
from troggle.core.models.survex import SurvexPersonRole, Wallet, SurvexDirectory, SurvexFile, SurvexBlock, SurvexStation

'''Imports the tree of survex files following from a defined root .svx file
It also scans the Loser repo for all the svx files, which it loads individually afterwards.
'''

todo = '''
        
-#BUG, if *date comes after *team, the person's date is not set at all. 
It needs re-setting at the end of the block.

- LoadSurvexFile() Creates a new current survexfile and valid .survexdirectory
        The survexblock passed-in is not necessarily the parent. FIX THIS.
        
- When Olly implements LEG in the  'dump3d --legs' utility, then we can use that to get the length of
all the legs in a survex block instead of adding them up oursleves. Which means that we can
ignore all the  Units and offset stuff, that troggle will work with survex files with backsights,
repeated readings from distox etc.. Not actually useful for pre 2022 survey data, but good future-proofing.
Also it will be a tiny bit more accurate as these leg lengths are after loop closure fixup.
        
'''
survexblockroot = None
survexomitsroot = None
ROOTBLOCK = "rootblock"
OMITBLOCK = "omitblock"
METRESINFEET = 3.28084

stop_dup_warning = False
debugprint = False # Turns on debug printout for just one *include file
debugprinttrigger = "!"
# debugprinttrigger = "caves-1623/40/old/EisSVH"

class MapLocations(object):
    """Class used only for identifying teh entrance locations
    """
    p = [
        ("laser.0_7", "BNase", "Reference", "Bräuning Nase laser point"),
        ("226-96", "BZkn", "Reference", "Bräuning Zinken trig point"),
        ("vd1","VD1","Reference", "VD1 survey point"),
        ("laser.kt114_96","HSK","Reference", "Hinterer Schwarzmooskogel trig point"), 
        ("2000","Nipple","Reference", "Nipple (Weiße Warze)"),
        ("3000","VSK","Reference", "Vorderer Schwarzmooskogel summit"),
        ("topcamp", "OTC", "Reference", "Old Top Camp"),
        ("laser.0", "LSR0", "Reference", "Laser Point 0"),
        ("laser.0_1", "LSR1", "Reference", "Laser Point 0/1"),
        ("laser.0_3", "LSR3", "Reference", "Laser Point 0/3"),
        ("laser.0_5", "LSR5", "Reference", "Laser Point 0/5"),
        ("225-96", "BAlm", "Reference", "Bräuning Alm trig point")
    ]
    def points(self):
        for ent in Entrance.objects.all():
            if ent.best_station():
                # print(f"{ent.filename}", end=", ")
                try:
                    k = ent.caveandentrance_set.all()[0].cave
                except:
                    message = f" ! Failed to get Cave linked to Entrance:{ent.name} from:{ent.filename} best:{ent.best_station()} {ent.caveandentrance_set.all()}"
                    DataIssue.objects.create(parser='entrances', message=message)
                    print(message)
                    continue # skip this entrance
                try:
                    areaName = k.getArea().short_name
                except:
                    message = f" ! Failed to get Area on cave '{k}' linked to Entrance:{ent.name} from:{ent.filename} best:{ent.best_station()}"
                    DataIssue.objects.create(parser='entrances', message=message)
                    print(message)
                    raise
                self.p.append((ent.best_station(), f"{areaName}-{str(ent)[5:]}", ent.needs_surface_work(), str(ent)))
        message = f" -  {len(self.p)} entrances linked to caves."
        print(message)
        return self.p

    def __str__(self):
        return f"{len(self.p)} map locations"
        
def get_offending_filename(path):
    """Used to provide the URL for a line in the DataErrors page
    whcih reports problems on importing data into troggle
    """
    return "/survexfile/" + path + ".svx"

class SurvexLeg():
    """No longer a models.Model subclass, so no longer a database table
    """
    tape        = 0.0
    compass     = 0.0
    clino       = 0.0

def get_people_on_trip(survexblock):
    qpeople = SurvexPersonRole.objects.filter(survexblock=survexblock)
    people = []
    for p in qpeople:
        people.append(f'{p.personname}')
    return list(set(people))

    
class LoadingSurvex():
    """A 'survex block' is a *begin...*end set of cave data.
    A survex file can contain many begin-end blocks, which can be nested, and which can *include
    other survex files.
    A 'scanswallet' is what we today call a "survey scans folder" or a "wallet".
    """
    # python regex flags (?i) means case-insentitive, (?s) means . matches newline too
    # see https://docs.python.org/3/library/re.html
    rx_begin   = re.compile(r'(?i)begin')
    rx_end     = re.compile(r'(?i)end$')
    rx_title   = re.compile(r'(?i)title$')
    rx_ref     = re.compile(r'(?i)ref$')
    rx_data    = re.compile(r'(?i)data$')
    rx_flags   = re.compile(r'(?i)flags$')
    rx_alias   = re.compile(r'(?i)alias$')
    rx_entrance   = re.compile(r'(?i)entrance$')
    rx_date    = re.compile(r'(?i)date$')
    rx_units   = re.compile(r'(?i)units$')
    rx_team    = re.compile(r'(?i)team$')
    rx_set     = re.compile(r'(?i)set$')

    rx_names   = re.compile(r'(?i)names')
    rx_flagsnot= re.compile(r"not\s")
    rx_linelen = re.compile(r"[\d\-+.]+$")
    instruments = "(bitch|bodger|bolt|bolter|bolting|book|clino|comp|compass|consultant|disto|distox|distox2|dog|dogsbody|drawing|drill|gps|helper|inst|instr|instrument|monkey|nagging|nail|nail_polish|nail_polish_bitch|nail_polish_monkey|nail_varnish|nail_varnish_bitch|note|paint|photo|pic|point|polish|powerdrill|rig|rigger|rigging|sketch|slacker|something|tape|topodroid|unknown|useless|varnish|waiting_patiently)"
    rx_teammem = re.compile(r"(?i)"+instruments+"?(?:es|s)?\s+(.*)$")
    rx_teamold = re.compile(r"(?i)(.*)\s+"+instruments+"?(?:es|s)?$")
    rx_teamabs = re.compile(r"(?i)^\s*("+instruments+")?(?:es|s)?\s*$")
    rx_person  = re.compile(r"(?i) and |/| / |, | , |&| & | \+ |^both$|^none$")
    rx_qm      = re.compile(r'(?i)^\s*QM(\d+)\s+?([a-dA-DxX])\s+([\w\-\_]+)\.([\w\.\-]+)\s+(([\w\-]+)\.([\w\.\-]+)|\-)\s+(.+)$')
    # does not recognise non numeric suffix survey point ids
    rx_qm0     = re.compile(r'(?i)^\s*QM(\d+)\s+(.+)$')
    rx_qm_tick = re.compile(r'(?i)^\s*QM(\d+)\s+TICK\s([\d\-]+)\s(.*)$')
#   remember there is also QM_PATTERN used in views.other and set in settings.py
    rx_tapelng = re.compile(r'(?i).*(tape|length).*$')

    rx_cave    = re.compile(r'(?i)caves-(\d\d\d\d)/([-\d\w]+|\d\d\d\d-?\w+-\d+)')
    rx_comment = re.compile(r'([^;]*?)\s*(?:;\s*(.*))?\n?$')
    rx_comminc = re.compile(r'(?i)^\|\*include[\s]*([-\w/]*).*$') # inserted by linear collate ;*include
    rx_commcni = re.compile(r'(?i)^\|\*edulcni[\s]*([-\w/]*).*$') # inserted by linear collate ;*edulcni
    rx_include = re.compile(r'(?i)^\s*(\*include[\s].*)$')
    rx_commref = re.compile(r'(?i)^\s*ref(?:erence)?[\s.:]*(\d+)\s*#\s*(X)?\s*(\d+)')
    rx_ref_text= re.compile(r'(?i)^\s*\"[^"]*\"\s*$')
    rx_star    = re.compile(r'(?i)\s*\*[\s,]*(\w+)\s*(.*?)\s*(?:;.*)?$')
    rx_starref = re.compile(r'(?i)^\s*\*ref[\s.:]*((?:19[6789]\d)|(?:20[0123]\d))\s*#?\s*(X)?\s*(.*?\d+.*?)$')
    rx_argsref = re.compile(r'(?i)^[\s.:]*((?:19[6789]\d)|(?:20[012345]\d))\s*#?\s*(X)?\s*(.*?\d+.*?)$')
    rx_badmerge= re.compile(r'(?i).*(\>\>\>\>\>)|(\=\=\=\=\=)|(\<\<\<\<\<).*$')
    rx_ref2 = re.compile(r'(?i)\s*ref[.;]?')
    rx_commteam = re.compile(r'(?i)\s*(Messteam|Zeichner)\s*[:]?(.*)')
 

    # This interprets the survex "*data normal" command which sets out the order of the fields in the data, e.g.
    # *DATA normal from to length gradient bearing ignore ignore ignore ignore
    datastardefault = {"type":"normal", "from":0, "to":1, "tape":2, "compass":3, "clino":4}
    flagsdefault = {"duplicate":False, "surface":False, "splay":False, "skiplegs":False, "splayalias":False}

    datastar ={}
    flagsstar = {}
    units = "metres"
    unitsfactor = None
    slength = 0.0
    legsnumber = 0
    depthbegin = 0
    depthinclude = 0
    unitsstack = []
    legsnumberstack = []
    slengthstack = []
    personexpedstack = []
    stackbegin =[]
    flagsstack =[]
    datastack =[]
    includestack = []
    stacksvxfiles = []
    svxfileslist = []
    svxdirs = {} 
    uniquename = {}
    expos = {}
    survexdict = {} # each key is a directory, and its value is a list of files
    lineno = 0
    insp = ""
    callcount = 0
    caverncount = 0
    ignoreprefix = ["surface", "kataster", "fixedpts", "gpx"]
    ignorenoncave = ["caves-1623", "caves-1623/2007-NEU","caves-1626", "caves-1624", "caves-1627", "fixedpts/gps/gps00raw", ""]
    includedfilename =""
    currentsurvexblock = None
    currentsurvexfile = None
    currentcave = None
    caverndate = None
    currentpersonexped = []
    pending = []

    def __init__(self):
        self.caveslist = GetCaveLookup()
        pass
        
    def LoadSurvexFallThrough(self, survexblock, line, cmd):
        if cmd == "require":
            pass # should we check survex version available for processing?
        elif cmd in ["equate", "fix", "calibrate", "cs", "export", "case", 
                "declination", "infer","instrument", "sd"]:
            pass # we ignore all these, which is fine.
        else:
            if cmd in ["include", "data", "flags", "title", "entrance","set", "units", "alias", "ref"]:
                message = f"! Warning. Unparsed [*{cmd}]: '{line}' {survexblock.survexfile.path} - not an error (probably)"
                print(self.insp+message)
                DataIssue.objects.create(parser='survex', message=message,  url=get_offending_filename(survexblock.survexfile.path))
            else:
                message = f"! Bad unrecognised svx command: [*{cmd}] {line} ({survexblock}) {survexblock.survexfile.path}"
                print(self.insp+message)
                DataIssue.objects.create(parser='survex', message=message,  url=get_offending_filename(survexblock.survexfile.path))

    def LoadSurvexTeam(self, survexblock, line):
        """Interpeting the *team fields means interpreting older style survex as well as current survex standard,
        *team Insts Anthony Day   - this is how most of our files specify the team member
        *team "Anthony Day" notes pictures tape    - this is how the survex documentation says it should be done
        We have a huge variety of abbreviations and mispellings. The most laconic being 
        *team gb, bl
        
        personrole is used to record that a person was on a survex trip, NOT the role they played.
        (NB PersonTrip is a logbook thing, not a survex thing. Yes they could be merged, maybe.)
        """
        def record_team_member(tm, survexblock):
            tm = tm.strip('\"\'').strip()
            # Refactor. The dict GetPersonExpeditionNameLookup(expo) indexes by name and has values of personexpedition
            # This is convoluted, the whole personexpedition concept is unnecessary.
            
            # we need the current expedition, but if there has been no date yet in the survex file, we don't know which one it is.
            # so we can't validate whether the person was on expo or not.
            # we will have to attach them to the survexblock anyway, and then do a 
            # later check on whether they are valid when we get the date.
            
             
            expo = survexblock.expedition # may be None if no *date yet
            # this syntax was bizarre.. made more obvious
            if expo:
                if  not survexblock.expeditionday: # *date has been set
                    # should not happen
                    message = f"! *team {expo.year} expo ok, expedition day not in  *team {survexblock.survexfile.path} ({survexblock}) "
                    print(self.insp+message)
                    DataIssue.objects.create(parser='survex', message=message, url=get_offending_filename(survexblock.survexfile.path))
                    
                personexpedition =  GetPersonExpeditionNameLookup(expo).get(tm.lower())
                if personexpedition: 
                    personrole, created = SurvexPersonRole.objects.update_or_create(survexblock=survexblock, personexpedition=personexpedition, personname=tm)
                    personrole.person=personexpedition.person
                    personrole.expeditionday = survexblock.expeditionday 
                    self.currentpersonexped.append(personexpedition) # used in push/pop block code
                    personrole.save()
                elif known_foreigner(tm): # note, not using .lower()
                    message = f"- *team {expo.year} '{tm}' known foreigner on *team {survexblock.survexfile.path} ({survexblock})  in '{line}'"
                    print(self.insp+message)
                    # DataIssue.objects.create(parser='survex', message=message, url=get_offending_filename(survexblock.survexfile.path))
                else:
                    # we know the date and expo, but can't find the person
                    message = f"! *team {expo.year} '{tm}' FAIL personexpedition lookup on *team {survexblock.survexfile.path} ({survexblock})  in '{line}'"
                    print(self.insp+message)
                    DataIssue.objects.create(parser='survex', message=message, url=get_offending_filename(survexblock.survexfile.path))
            else:
                personexpedition = None
                personrole, created = SurvexPersonRole.objects.update_or_create(survexblock=survexblock, personexpedition=personexpedition, personname=tm)
                # don't know the date yet, so cannot query the table about validity. 
                # assume the person is valid. It will get picked up with the *date appears
                personrole.save()

                   
    
        mteammember = self.rx_teammem.match(line) # matches the role  at the beginning 
        if not mteammember:
            moldstyle = self.rx_teamold.match(line) # matches the role at the the end of the string
            if moldstyle:
                for tm in self.rx_person.split(moldstyle.group(1)):
                    if tm:
                        record_team_member(tm, survexblock)
                        # seems to be working
                        # msg = "! OLD tm='{}' line: '{}' ({}) {}".format(tm, line, survexblock, survexblock.survexfile.path)
                        # print(msg,  file=sys.stderr)
                    else:
                        message = f"! *team {survexblock.survexfile.path} ({survexblock}) Weird '{mteammember.group(1)}' oldstyle line: '{line}'"
                        print(self.insp+message)
                        DataIssue.objects.create(parser='survex', message=message, url=get_offending_filename(survexblock.survexfile.path))
            else:
                nullmember = self.rx_teamabs.match(line) # matches empty role line. Ignore these.
                if not nullmember:
                    message = f"! *team {survexblock.survexfile.path} ({survexblock}) Bad line: '{line}'"
                    print(self.insp+message)
                    DataIssue.objects.create(parser='survex', message=message, url=get_offending_filename(survexblock.survexfile.path))
        else:
            for tm in self.rx_person.split(mteammember.group(2)):
                if tm:
                    record_team_member(tm, survexblock)
                else:
                    if not mteammember.group(2).lower() in ('none', 'both'):
                        message = f"! Weird *team '{mteammember.group(2)}' newstyle line: '{line}' ({survexblock}) {survexblock.survexfile.path}"
                        print(self.insp+message)
                        DataIssue.objects.create(parser='survex', message=message, url=get_offending_filename(survexblock.survexfile.path))

    def LoadSurvexEntrance(self, survexblock, line):
        # Not using this yet
        pass
        
    def LoadSurvexAlias(self, survexblock, line):
        # *alias station - ..
        splayalias = re.match("(?i)station\s*\-\s*\.\.\s*$",line)
        if splayalias:
            self.flagsstar["splayalias"] = True
        else:
            message = f"! Bad *ALIAS: '{line}' ({survexblock}) {survexblock.survexfile.path}"
            print(self.insp+message)
            DataIssue.objects.create(parser='survex', message=message)

    def LoadSurvexUnits(self, survexblock, line):
        # all for 4 survex files with measurements in feet. bugger.
        # Won't need this once we move to using cavern or d3dump output for lengths
        tapeunits = self.rx_tapelng.match(line) # tape|length
        if not tapeunits:
            return
        convert = re.match("(?i)(\w*)\s*([\.\d]+)\s*(\w*)",line)
        if convert:
            factor = convert.groups()[1]
            self.unitsfactor = float(factor)
            if debugprint:
                message = f"! *UNITS NUMERICAL conversion [{factor}x] '{line}' ({survexblock}) {survexblock.survexfile.path}"
                print(self.insp+message)
                DataIssue.objects.create(parser='survexunits', message=message)

        feet = re.match("(?i).*feet$",line)
        metres = re.match("(?i).*(METRIC|METRES|METERS)$",line)
        if feet:
            self.units = "feet"
        elif metres:
            self.units = "metres"
        else:
            message = f"! *UNITS in YARDS!? - not converted '{line}' ({survexblock}) {survexblock.survexfile.path}"
            print(self.insp+message)
            DataIssue.objects.create(parser='survexunits', message=message)
    
    def get_expo_from_year(self, year):
        # cacheing to save DB query on every block 
        if year in self.expos:
            expo = self.expos[year]
        else:
            expeditions = Expedition.objects.filter(year=year)
            if len(expeditions) != 1 :
                message = f"! More than one expedition in year {year} '{line}' ({survexblock}) {survexblock.survexfile.path}"
                print(self.insp+message)
                DataIssue.objects.create(parser='survex', message=message, url=get_offending_filename(survexblock.survexfile.path))
               
            expo= expeditions[0]
            self.expos[year]= expo   
        return expo       
    
    def LoadSurvexDate(self, survexblock, line):
        # we should make this a date RANGE for everything?
            
        def setdate_on_survexblock(year):
            # We are assuming that deferred *team people are in the same block. Otherwise, ouch.
            expo = self.get_expo_from_year(year)
            survexblock.expedition = expo
            survexblock.expeditionday = expo.get_expedition_day(survexblock.date)
            survexblock.save()
            
            team = SurvexPersonRole.objects.filter(survexblock=survexblock)
            for pr in team:
                if not pr.expeditionday: # *date and *team in 'wrong' order. All working now.
                    
                    pr.expeditionday = survexblock.expeditionday 
                    pr.save()
                    
                    if not pr.personexpedition: # again, we didn't know the date until now
                        pe = GetPersonExpeditionNameLookup(expo).get(pr.personname.lower())
                        if pe:
                            # message = "!  {} ({}) Fixing undated personexpedition '{}'".format(survexblock.survexfile.path, survexblock, p.personname)
                            # print(self.insp+message)
                            # DataIssue.objects.create(parser='survex', message=message)
                            pr.personexpedition = pe
                            pr.person = pr.personexpedition.person
                            pr.save()
                            self.currentpersonexped.append(pe) # used in push/pop block code
                        elif known_foreigner(pr.personname): # note, not using .lower()
                            message = f"- *team {expo.year} '{pr.personname}' known foreigner on *date {survexblock.survexfile.path} ({survexblock})  in '{line}'"
                            print(self.insp+message)
                            # DataIssue.objects.create(parser='survex', message=message, url=get_offending_filename(survexblock.survexfile.path))
                        else:
                            message = f"! *team {year} '{pr.personname}' FAIL personexpedition lookup on *date {survexblock.survexfile.path} ({survexblock})  '{pr.personname}'"
                            print(self.insp+message)
                            DataIssue.objects.create(parser='survex', message=message, url=get_offending_filename(survexblock.survexfile.path))
                       
        oline = line
        if len(line) > 10: 
            # message = "! DATE Warning LONG DATE '{}' ({}) {}".format(oline, survexblock, survexblock.survexfile.path)
            # print(self.insp+message)
            # DataIssue.objects.create(parser='survex', message=message,  url=get_offending_filename(survexblock.survexfile.path))
            if line[10] == "-": # ie a range, just look at first date
                line = line[0:10]
        if len(line) == 10: 
            year = line[:4]
            # TO DO set to correct Austrian timezone Europe/Vienna ?
            # %m and %d need leading zeros. Source svx files require them.
            survexblock.date = datetime.strptime(line.replace('.','-'), '%Y-%m-%d')
            setdate_on_survexblock(year)
        elif len(line) == 7: 
            year = line[:4]
            perps = get_people_on_trip(survexblock) # What, you don't know Judge Dredd slang ?
            message = f"! DATE Warning only accurate to the month, setting to 1st '{oline}' ({survexblock}) {survexblock.survexfile.path} {perps}"
            print(self.insp+message)
            DataIssue.objects.create(parser='svxdate', message=message,  url=get_offending_filename(survexblock.survexfile.path))
            survexblock.date = datetime.strptime(line.replace('.','-'), '%Y-%m') # sets to first of month
            setdate_on_survexblock(year)
        elif len(line) == 4: 
            year = line[:4]
            perps = get_people_on_trip(survexblock)
            message = f"! DATE WARNING only accurate to the YEAR, setting to 1st January '{oline}' ({survexblock}) {survexblock.survexfile.path} {perps}"
            print(self.insp+message)
            DataIssue.objects.create(parser='svxdate', message=message,  url=get_offending_filename(survexblock.survexfile.path))
            survexblock.date = datetime.strptime(line, '%Y') # sets to January 1st
            setdate_on_survexblock(year)
        else:
            # these errors are reporting the wrong survexblock, which is actually a SurvexFile (!)
            message = f"! DATE Error unrecognised '{oline}-{survexblock}' ({type(survexblock)}) {survexblock.survexfile.path}"
            print(self.insp+message)
            DataIssue.objects.create(parser='survex', message=message,  url=get_offending_filename(survexblock.survexfile.path))
            print(f"  {type(survexblock)=}") # survexblock.parent fails as a SurvexFile has no .parent ...ugh.
            print(f"  {survexblock.survexpath=}")
            print(f"  {survexblock.survexfile=}")
            #raise

    def LoadSurvexLeg(self, survexblock, sline, comment, svxline):
        """This reads compass, clino and tape data but only keeps the tape lengths,
        the rest is discarded after error-checking.
        Now skipping the error checking - returns as soon as the leg is not one we count.
        
        REPLACE ALL THIS by reading the .log output of cavern for the file. 
        But we need the lengths per Block, not by File.  dump3d will do lengths per block.
        """
        invalid_clino = 180.0
        invalid_compass = 720.0
        invalid_tape = 0.0

        if self.flagsstar["skiplegs"]:
            if debugprint:
                print("skip in ", self.flagsstar, survexblock.survexfile.path)
            return

        if debugprint:
            print(f"! LEG datastar type:{self.datastar['type'].upper()}++{survexblock.survexfile.path}\n{sline} ")
        if self.datastar["type"] == "passage":
            return
        if self.datastar["type"] == "cartesian":
            return
        if self.datastar["type"] == "nosurvey":
            return
        if self.datastar["type"] == "diving":
            return
        if self.datastar["type"] == "cylpolar":
            return
        if debugprint:
            print(f" !! LEG data lineno:{self.lineno}\n !! sline:'{sline}'\n !! datastar['tape']: {self.datastar['tape']}")
           
        if self.datastar["type"] != "normal": 
            return
 
        ls = sline.lower().split()
        # NORMAL, so there should be 5 fields
        # from the content, this is clearly reading fixedpts/gps/gps00raw.svx, but not reporting it by that name
        if len(ls) < 5:
            print("! Fewer than 5 fields in NORMAL in ", survexblock.survexfile.path, survexfile, survexfile.parent)
            print("  datastar NORMAL:", self.datastar)
            print(f"  Line (split): {ls}, comment: {comment}")
            print(f"  Line: {sline}\nsvxline: {svxline}")
            message = f' ! Not 5 fields in line \'{sline.lower()}\' {self.datastar=} {ls=} in\n{survexblock}\n{survexblock.survexfile}\n{survexblock.survexfile.path}'
            DataIssue.objects.create(parser='survexleg', message=message, url=get_offending_filename(survexblock.survexfile.path))

        datastar = self.datastar # shallow copy: alias but the things inside are the same things
        survexleg = SurvexLeg()
 
        # skip all splay legs
        try:
            if ls[datastar["from"]] == ".." or ls[datastar["from"]] == ".":
                if debugprint:
                    print("Splay in ", survexblock.survexfile.path)
                return
            if ls[datastar["to"]] == ".." or ls[datastar["to"]] == ".":
                if debugprint:
                    print("Splay in ", survexblock.survexfile.path)
                return
            if self.flagsstar["splayalias"]:
                if ls[datastar["from"]] == "-":
                    if debugprint:
                        print("Aliased splay in ", survexblock.survexfile.path)
                    return
                if ls[datastar["to"]] == "-":
                    if debugprint:
                        print("Aliased splay in ", survexblock.survexfile.path)
                    return
        except:
            message = f' ! datastar parsing from/to incorrect in line {ls} in {survexblock.survexfile.path}'
            print(self.insp+message)
            DataIssue.objects.create(parser='survexleg', message=message, url=get_offending_filename(survexblock.survexfile.path))
            return

        try:
            tape = ls[datastar["tape"]]
        except:
            message = f' ! datastar parsing incorrect in line {ls} in {survexblock.survexfile.path}'
            print(self.insp+message)
            DataIssue.objects.create(parser='survexleg', message=message,  url=get_offending_filename(survexblock.survexfile.path))
            survexleg.tape = invalid_tape
            return
        # e.g. '29/09' or '(06.05)' in the tape measurement
        # tape = tape.replace("(","")  # edited original file (only one) instead
        # tape = tape.replace(")","")  # edited original file (only one) instead
        # tape = tape.replace("/",".") # edited original file (only one) instead.
        try:
            if self.unitsfactor:
                tape = float(tape) * self.unitsfactor
                if debugprint:
                    message = f" ! Units: Length scaled {tape}m '{ls}' in ({survexblock.survexfile.path})  units:{self.units} factor:{self.unitsfactor}x"
                    print(self.insp+message)
                    DataIssue.objects.create(parser='survexleg', message=message, url=get_offending_filename(survexblock.survexfile.path))
            if self.units =="feet":
                tape = float(tape) / METRESINFEET
                if debugprint:
                    message = f" ! Units: converted  to {tape:.3f}m from {self.units} '{ls}' in ({survexblock.survexfile.path})"
                    print(self.insp+message)
                    DataIssue.objects.create(parser='survexleg', message=message, url=get_offending_filename(survexblock.survexfile.path))
            survexleg.tape = float(tape)
            self.legsnumber += 1
        except ValueError:
            message = f" ! Value Error: Tape misread in line'{ls}' in {survexblock.survexfile.path} units:{self.units}"
            print(self.insp+message)
            DataIssue.objects.create(parser='survexleg', message=message, url=get_offending_filename(survexblock.survexfile.path))
            survexleg.tape = invalid_tape
        try:
            survexblock.legslength += survexleg.tape
            self.slength   += survexleg.tape
        except ValueError:
            message = f" ! Value Error: Tape length not added '{ls}' in {survexblock.survexfile.path} units:{self.units}"
            print(self.insp+message)
            DataIssue.objects.create(parser='survexleg', message=message, url=get_offending_filename(survexblock.survexfile.path))

        try:
            lcompass = ls[datastar["compass"]]
        except:
            message = f' ! Value Error: Compass not found in line {ls} in {survexblock.survexfile.path}'
            print(self.insp+message)
            DataIssue.objects.create(parser='survexleg', message=message, url=get_offending_filename(survexblock.survexfile.path))
            lcompass = invalid_compass

        try:
            lclino = ls[datastar["clino"]]
        except:
            print(("! Clino misread in", survexblock.survexfile.path))
            print(("  datastar:", datastar))
            print(("  Line:", ls))
            message = f' ! Value Error: Clino misread in line \'{sline.lower()}\' {datastar=} {self.datastar=} {ls=} in\n{survexblock}\n{survexblock.survexfile}\n{survexblock.survexfile.path}'
            DataIssue.objects.create(parser='survexleg', message=message, url=get_offending_filename(survexblock.survexfile.path))
            lclino = invalid_clino

        if lclino == "up":
            survexleg.clino = 90.0
            lcompass = invalid_compass
        elif lclino == "down":
            survexleg.clino = -90.0
            lcompass = invalid_compass
        elif lclino == "-" or lclino == "level":
            survexleg.clino = -90.0

        try:
            survexleg.compass = float(lcompass)
        except ValueError:
            print(("! Compass misread in", survexblock.survexfile.path))
            print(("  datastar:", datastar))
            print(("  Line:", ls))
            message = " ! Value Error: lcompass:'{}' line {} in '{}'".format(lcompass, 
                    ls, survexblock.survexfile.path)
            DataIssue.objects.create(parser='survexleg', message=message, url=get_offending_filename(survexblock.survexfile.path))
            survexleg.compass = invalid_compass

        # delete the object to save memory
        survexleg = None
        
       
    def LoadSurvexRef(self, survexblock, args):
        """Interpret the *ref record, and all the many variants
        """
        #print(self.insp+ "*REF ---- '"+ args +"'")
        url= get_offending_filename(survexblock.survexfile.path)
        # *REF but also ; Ref      years from 1960 to 2039
        refline = self.rx_ref_text.match(args)
        if refline:
            # a textual reference such as  "1996-1999 Not-KH survey book pp 92-95"
            print(f'{self.insp} *REF quoted text so ignored:{args} in {survexblock.survexfile.path}')
            return
        
        if len(args)< 4:
            message = f" ! Empty or BAD *REF statement '{args}' in '{survexblock.survexfile.path}'"
            print(self.insp+message)
            DataIssue.objects.create(parser='survex', message=message, url=url)
            return

        argsgps = self.rx_argsref.match(args)
        if argsgps:
            yr, letterx, wallet = argsgps.groups()
        else:
            perps = get_people_on_trip(survexblock)
            message = f" ! Wallet *REF bad in '{survexblock.survexfile.path}' malformed id '{args}' {perps}"
            print(self.insp+message)
            DataIssue.objects.create(parser='survex', message=message, url=url)
            return

        if not letterx:
            letterx = ""
        else:
            letterx = "X"
        if len(wallet)<2:
            wallet = "0" + wallet
        if not (int(yr)>1960 and int(yr)<2050):
                message = " ! Wallet year out of bounds {yr} '{refscan}' {survexblock.survexfile.path}"
                print(self.insp+message)
                DataIssue.objects.create(parser='survex', message=message, url=url)
            
        refscan = f"{yr}#{letterx}{wallet}"
        try:
            if int(wallet)>99:
                message = f" ! Wallet *REF {refscan} - very big (more than 99) so probably wrong in '{survexblock.survexfile.path}'"
                print(self.insp+message)
                DataIssue.objects.create(parser='survex', message=message, url=url)
        except:
            message = f" ! Wallet *REF {refscan} - not numeric in '{survexblock.survexfile.path}'"
            print(self.insp+message)
            DataIssue.objects.create(parser='survex', message=message, url=url)
            
        manywallets = Wallet.objects.filter(walletname=refscan) # assumes all wallets found in earlier pass of data import
        if manywallets:
            if len(manywallets) > 1:
                message = f" ! Wallet *REF {refscan} - more than one found {len(manywallets)} wallets in db with same id {survexblock.survexfile.path}"
                print(self.insp+message)
                DataIssue.objects.create(parser='survex', message=message, url=url)
            
            if survexblock.scanswallet:
                if survexblock.scanswallet.walletname != refscan:
                    message = f" ! Wallet *REF {refscan} in {survexblock.survexfile.path} - Already a DIFFERENT wallet is set for this block  '{survexblock.scanswallet.walletname}'"
                    print(self.insp+message)
                    DataIssue.objects.create(parser='survex', message=message, url=url)
            else:
                survexblock.scanswallet = manywallets[0] # this is a ForeignKey field
                survexblock.save()
                # This is where we should check that the wallet JSON contains a link to the survexfile
                # and that the JSON date and walletdate are set correctly to the survexblock date.
        else:
            perps = get_people_on_trip(survexblock)
            message = f" ! Wallet *REF bad in '{survexblock.survexfile.path}' '{refscan}' NOT in database i.e. wallet does not exist {perps}."
            print(self.insp+message)
            DataIssue.objects.create(parser='survex', message=message, url=url)

    def TickSurvexQM(self, survexblock, qmtick):
        """Interpret the specially formatted comment which is a QM TICKED statement
        """
        # Now we need to find the correct QM object. It will be in the same block and have the same number.
       
        try:
            qm = QM.objects.filter(block=survexblock, number=int(qmtick.group(1)))
        except:
            #raise
            message = f' ! QM TICK find FAIL  QM{qmtick.group(1)}  date:"{qmtick.group(2)}" qmlist:"{qm}" in "{survexblock.survexfile.path}" + comment:"{qmtick.group(3)}" '
            print(message)
            DataIssue.objects.create(parser='survex', message=message, url=get_offending_filename(survexblock.survexfile.path))
        if len(qm)>1:
            message = f' ! QM TICK MULTIPLE found FAIL  QM{qmtick.group(1)}  date:"{qmtick.group(2)}" in "{survexblock.survexfile.path}" + comment:"{qmtick.group(3)}" '
            print(message)
            DataIssue.objects.create(parser='survex', message=message, url=get_offending_filename(survexblock.survexfile.path))
        qm[0].ticked = True
        qm[0].save()

    def LoadSurvexQM(self, survexblock, qmline):
        """Interpret the specially formatted comment which is a QM definition
        """
        insp = self.insp

        qm_no = qmline.group(1) # this may not be unique across multiple survex files
        
        qm_grade = qmline.group(2)
        if qmline.group(3):  # usual closest survey station
            qm_nearest = qmline.group(3) 
            if qmline.group(4):
                qm_nearest = qm_nearest +"."+ qmline.group(4)
                
        if qmline.group(6) and qmline.group(6) != '-':
            qm_resolve_station = qmline.group(6)
            if qmline.group(7):
                qm_resolve_station = qm_resolve_station +"."+ qmline.group(7) 
        else:
            qm_resolve_station = ""
        qm_notes = qmline.group(8)
        # Spec of QM in SVX files:
        # ;Serial number   grade(A/B/C/D/X)  nearest-station  resolution-station description
        # ;QM1	a	hobnob_hallway_2.42	hobnob-hallway_3.42	junction of keyhole passage
        # ;QM1	a	hobnob_hallway_2.42	-	junction of keyhole passage

        # NB none of the SurveyStations are in the DB now, so if we want to link to aSurvexStation
        # we would have to create one. But that is not obligatory and no QMs loaded from CSVs have one
        
        # Older troggle/CSV assumes a logbook entry 'found_by' for each QM, with a date. 
        # We don't need this anymore so we don't need to create a placeholder logbook entry.
        qmyear = str(survexblock.date)[:4]
        blockname = survexblock.name[:6] + survexblock.name[-1:]
        #logslug = f'D{int(qmyear)}_{blockname}_{int(qm_no):03d}'
        if survexblock.survexfile.cave:
            caveslug = survexblock.survexfile.cave.slug()
            place = survexblock.survexfile.cave
        else:
            caveslug = None
            place = None

        try:
            qm = QM.objects.create(number=qm_no,
                                              # nearest_station=a_survex_station_object, # can be null
                                              nearest_station_description=qm_resolve_station,
                                              nearest_station_name=qm_nearest,
                                              grade=qm_grade.upper(),
                                              location_description=qm_notes,
                                              block = survexblock,   # only set for survex-imported QMs
                                              blockname = blockname, # only set for survex-imported QMs
                                              expoyear = str(survexblock.date.year),
                                              cave = survexblock.survexfile.cave)
            qm.save
        except:
            message = f" ! QM{qm_no} FAIL to create {qm_nearest} in'{survexblock.survexfile.path}'"
            print(insp+message)
            DataIssue.objects.create(parser='survex', message=message, url=get_offending_filename(survexblock.survexfile.path))

    def LoadSurvexDataNormal(self,survexblock,args):
        """Sets the order for data elements in this and following blocks, e.g.
        *data normal from to compass clino tape
        *data normal from to tape compass clino
        We are only collecting length data so we are disinterested in from, to, LRUD etc.
        """
        # datastardefault = { # included here as reference to help understand the code
                        # "type":"normal", 
                        # "t":"leg", 
                        # "from":0, 
                        # "to":1, 
                        # "tape":2, 
                        # "compass":3, 
                        # "clino":4}
        datastar = copy.deepcopy(self.datastardefault)
        if args == "":
            # naked '*data' which is relevant only for passages. Ignore. Continue with previous settings.
            return
        # DEFAULT | NORMAL | CARTESIAN| NOSURVEY |PASSAGE | TOPOFIL | CYLPOLAR | DIVING  
        ls = args.lower().split()   
        if ls[0] == "default":
            self.datastar = copy.deepcopy(self.datastardefault)
        elif ls[0] == "normal" or ls[0] == "topofil":
            if not ("from" in datastar and "to" in datastar):
                message = f" ! - Unrecognised *data normal statement '{args}' {survexblock.name}|{survexblock.survexpath}"
                print(message)
                print(message,file=sys.stderr)
                DataIssue.objects.create(parser='survex', message=message, url=get_offending_filename(survexblock.survexfile.path))
                return
            else:
                datastar = self.datastardefault
                # ls = ["normal", "from", "to", "tape", "compass", "clino" ]
                for i in range(1, len(ls)): # len[0] is "normal"
                    if ls[i].lower() =="newline":
                        message = f" ! - ABORT *data statement has NEWLINE in it in {survexblock.survexfile.path}. Not parsed by troggle. '{args}'"
                        print(message)
                        print(message,file=sys.stderr)
                        DataIssue.objects.create(parser='survex', message=message, url=get_offending_filename(survexblock.survexfile.path))
                        return False
                   
                    if ls[i] in ["bearing","compass"]:
                        datastar["compass"] = i-1
                    if ls[i] in ["clino","gradient"]:
                        datastar["clino"] = i-1
                    if ls[i] in ["tape","length"]:
                        datastar["tape"] = i-1
                self.datastar = copy.deepcopy(datastar)
                return
        elif ls[0] == "passage" or ls[0] == "nosurvey" or ls[0] == "diving" or ls[0] == "cylpolar":
            #message = " ! - *data {}  blocks ignored. {}|{}"   '{}' .format(ls[0].upper(), survexblock.name, survexblock.survexpath, args)
            # print(message)
            #print(message,file=sys.stderr)
            #DataIssue.objects.create(parser='survex', message=message)
            self.datastar["type"] = ls[0]
        elif ls[0] == "cartesian": # We should not ignore this ?! Default for Germans ?
            #message = " ! - *data {}  blocks ignored. {}|{}"   '{}' .format(ls[0].upper(), survexblock.name, survexblock.survexpath, args)
            # print(message)
            #print(message,file=sys.stderr)
            #DataIssue.objects.create(parser='survex', message=message)
            self.datastar["type"] = ls[0]
        else:
            message = f" ! - Unrecognised *data statement '{args}' {survexblock.name}|{survexblock.survexpath}"
            print(message)
            print(message,file=sys.stderr)
            DataIssue.objects.create(parser='survex', message=message, url=get_offending_filename(survexblock.survexfile.path))
            self.datastar["type"] = ls[0]

    def LoadSurvexFlags(self, args):
        # Valid flags are DUPLICATE, SPLAY, and SURFACE, and a flag may be preceded with NOT to turn it off.
        # Default values are NOT any of them
        self.flagsstar = copy.deepcopy(self.flagsdefault)
        flags = []
        
        args = self.rx_flagsnot.sub("not",args)
        argslist = args.split()
        for s in argslist:
            flags.append(s)
        if debugprint:
            print(f" ^ flagslist:{flags}",)
        
        if "duplicate" in flags:
            self.flagsstar["duplicate"] = True
        if "surface" in flags:
            self.flagsstar["surface"] = True
        if "splay" in flags:
            self.flagsstar["splay"] = True

        if "notduplicate" in flags:
            self.flagsstar["duplicate"] = False
        if "notsurface" in flags:
            self.flagsstar["surface"] = False
        if "notsplay" in flags:
            self.flagsstar["splay"] = False

        # if self.flagsstar["duplicate"] == True or self.flagsstar["surface"] == True or self.flagsstar["splay"] == True:
        # actually we do want to count duplicates as this is for "effort expended in surveying underground"
        if self.flagsstar["surface"] == True or self.flagsstar["splay"] == True:
            self.flagsstar["skiplegs"] = True
        if debugprint:
            print(f" $ flagslist:{flags}",)


    def IdentifyCave(self, cavepath):
        if cavepath.lower() in self.caveslist:
            return self.caveslist[cavepath.lower()]
        # TO DO - this predates the big revision to Gcavelookup so look at this again carefully
        path_match = self.rx_cave.search(cavepath)
        if path_match:
            sluggy = f'{path_match.group(1)}-{path_match.group(2)}'
            guesses = [sluggy.lower(), path_match.group(2).lower()]
            for g in guesses:
                if g in self.caveslist:
                    self.caveslist[cavepath] = self.caveslist[g]
                    return self.caveslist[g]
            print(f'    ! Failed to find cave for {cavepath.lower()}')
        else:
            # not a cave, but that is fine.
            # print('    ! No regex(standard identifier) cave match for %s' % cavepath.lower())
            return None

    def GetSurvexDirectory(self, headpath):
        """This creates a SurvexDirectory if it has not been seen before, and on creation
        it sets the primarysurvexfile. This is correct as it should be set on the first file
        in the directory, where first is defined by the *include ordering. Which is what we
        are doing.
        """
        if not headpath:
            return self.svxdirs[""]
        if headpath.lower() not in self.svxdirs:
            self.svxdirs[headpath.lower()] = SurvexDirectory(path=headpath, primarysurvexfile=self.currentsurvexfile)
            self.svxdirs[headpath.lower()].save()
            self.survexdict[self.svxdirs[headpath.lower()]] = [] # list of the files in the directory
        return self.svxdirs[headpath.lower()]

    def ReportNonCaveIncludes(self, headpath, includelabel, depth):
        """Ignore surface, kataser and gpx *include survex files
        """
        if not self.pending:
            self.pending = set()
            fpending = Path(settings.CAVEDESCRIPTIONS, "pendingcaves.txt")
            if fpending.is_file():
                with open(fpending, "r") as fo:
                    cids = fo.readlines()
                for cid in cids:
                    id = cid.strip().rstrip('\n').upper()
                    if cid.startswith("162"):
                        self.pending.add(id)
                    else:
                        self.pending.add("1623-" + id)

        if headpath in self.ignorenoncave:
            message = f" - {headpath} is <ignorenoncave> (while creating '{includelabel}' sfile & sdirectory)"
            #print("\n"+message)
            #print("\n"+message,file=sys.stderr)
            return
        for i in self.ignoreprefix:
            if headpath.startswith(i):
                message = f" - {headpath} starts with <ignoreprefix> (while creating '{includelabel}' sfile & sdirectory)"
                # print("\n"+message)
                # print("\n"+message,file=sys.stderr)
                return
        caveid = f'{headpath[6:10]}-{headpath[11:]}'.upper()
        if caveid in self.pending:
           # Yes we didn't find this cave, but we know it is a pending one. So not an error.
           # print(f'! ALREADY PENDING {caveid}',file=sys.stderr)
           return
        id = caveid[5:]
        if id in self.pending:
           print(f'! ALREADY PENDING {id}',file=sys.stderr)
           return
        
        message = f" ! Warning: cave identifier '{caveid}'or {id} (guessed from file path)  is not a known cave.  Need to add to expoweb/cave_data/pending.txt ?  In '{includelabel}.svx' at depth:[{len(depth)}]."
        print("\n"+message)
        print("\n"+message,file=sys.stderr)
        print(f"{self.pending}",end="", file=sys.stderr)
        DataIssue.objects.create(parser='survex', message=message, url=get_offending_filename(includelabel))
        # print(f' # datastack in  LoadSurvexFile:{includelabel}', file=sys.stderr)
        # for dict in self.datastack:
            # print(f'   type: <{dict["type"].upper()}   >', file=sys.stderr)
        

    def LoadSurvexFile(self, svxid):
        """Creates SurvexFile in the database, and SurvexDirectory if needed
        with links to 'cave'
        Creates a new current survexfile and valid .survexdirectory
        Inspects the parent folder of the survexfile and uses that to decide if this is a cave we know
        The survexblock passed-in is not necessarily the parent. FIX THIS.
        """
        if debugprint:
            print(f" # datastack in  LoadSurvexFile:{svxid} 'type':", end="")
            for dict in self.datastack:
                print(f"'{dict['type'].upper()}'   ", end="")
            print("")


        depth = " " * self.depthbegin
        # print("{:2}{}   - NEW survexfile:'{}'".format(self.depthbegin, depth, svxid))
        headpath = os.path.dirname(svxid)

        newfile = SurvexFile(path=svxid)
        newfile.save() # until we do this there is no internal id so no foreign key works
        self.currentsurvexfile = newfile 
        newdirectory = self.GetSurvexDirectory(headpath)
        newdirectory.save() 
        newfile.survexdirectory = newdirectory
        self.survexdict[newdirectory].append(newfile)
        cave = self.IdentifyCave(headpath) # cave already exists in db

        if not newdirectory:
            message = f" ! 'None' SurvexDirectory returned from GetSurvexDirectory({headpath})"
            print(message)
            print(message,file=sys.stderr)
            DataIssue.objects.create(parser='survex', message=message, url = f'/survexfile/{svxid}')

        if cave:
            newdirectory.cave = cave
            newfile.cave   = cave
            # print(f"\n - New directory '{newdirectory}' for cave '{cave}'",file=sys.stderr)
        else: # probably a surface survey, or a cave in a new area e.g. 1624 not previously managed, and not in the pending list
            self.ReportNonCaveIncludes(headpath, svxid, depth)
            
        if not newfile.survexdirectory:
            message = f" ! SurvexDirectory NOT SET in new SurvexFile {svxid} "
            print(message)
            print(message,file=sys.stderr)
            DataIssue.objects.create(parser='survex', message=message)
        self.currentsurvexfile.save() # django insists on this although it is already saved !?
        try:
            newdirectory.save()
        except:
            print(newdirectory, file=sys.stderr)
            print(newdirectory.primarysurvexfile, file=sys.stderr)
            raise
            
        if debugprint:
            print(f" # datastack end LoadSurvexFile:{svxid} 'type':", end="")
            for dict in self.datastack:
                print(f"'{dict['type'].upper()}'   ", end="")
            print("")

    def ProcessIncludeLine(self, included):
        global debugprint
        svxid = included.groups()[0]
        if svxid.lower() == debugprinttrigger.lower():
            debugprint = True
        self.LoadSurvexFile(svxid)
        self.stacksvxfiles.append(self.currentsurvexfile)

    def ProcessEdulcniLine(self, edulcni):
        """Saves the current survexfile in the db
        """
        global debugprint
        svxid = edulcni.groups()[0]
        if debugprint:
            depth = " " * self.depthbegin
            print(f"{self.depthbegin:2}{depth}   - Edulcni  survexfile:'{svxid}'")
        if svxid.lower() == debugprinttrigger.lower():
            debugprint = False
        self.currentsurvexfile.save()
        self.currentsurvexfile = self.stacksvxfiles.pop()

    def LoadSurvexComment(self, survexblock, comment):
        # ignore all comments except ;ref, ; wallet and ;QM and ;*include (for collated survex file)
        #    rx_ref2 = re.compile(r'(?i)\s*ref[.;]?')
       
        # This should also check that the QM survey point rxists in the block

        refline = self.rx_commref.match(comment)
        if refline:
            #comment = re.sub('(?i)\s*ref[.;]?',"",comment.strip())
            comment = self.rx_ref2.sub("",comment.strip())
            print(f'rx_ref2 -- {comment=} in {survexblock.survexfile.path} :: {survexblock}')
            self.LoadSurvexRef(survexblock, comment)
            
        # handle
        # ; Messteam: Jörg Haussmann, Robert Eckardt, Thilo Müller
        # ; Zeichner: Thilo Müller
        # But none of these will be  valid teammembers because they are not actually on our expo
 
        team = self.rx_commteam.match(comment)
        if  team:
            # print(f'rx_commteam -- {comment=} in {survexblock.survexfile.path} :: {survexblock}')
            pass

        qml = self.rx_qm0.match(comment)
        if qml:
            qmline = self.rx_qm.match(comment)
            if qmline:
                self.LoadSurvexQM(survexblock, qmline)
            else:
                qmtick = self.rx_qm_tick.match(comment)
                if qmtick:
                    self.TickSurvexQM(survexblock, qmtick)
                else:
                    message = f' ! QM Unrecognised as valid in "{survexblock.survexfile.path}" QM{qml.group(1)} "{qml.group(2)}" : regex failure, typo?'
                    print(message)
                    DataIssue.objects.create(parser='survex', message=message, url=get_offending_filename(survexblock.survexfile.path))

            
        included = self.rx_comminc.match(comment)
        # ;*include means 'we have been included'; whereas *include means 'proceed to include' 
        # bug, If the original survex file contians the line ;*include then we pick it up ! So fix our special code to be ;|*include
        if included:
            self.ProcessIncludeLine(included)

        edulcni = self.rx_commcni.match(comment)
        # ;*edulcni means we are returning from an included file
        if edulcni:
            self.ProcessEdulcniLine(edulcni)

    def LoadSurvexSetup(self,survexblock, survexfile):
        self.depthbegin = 0
        self.datastar = self.datastardefault
        blocklegs = self.legsnumber
        print(self.insp+f"  - MEM:{get_process_memory():.3f} Reading. parent:{survexblock.survexfile.path}  <> {survexfile.path} ")
        self.lineno = 0
        sys.stderr.flush();
        self.callcount +=1
        if self.callcount % 10 ==0 :
            print(".", file=sys.stderr,end='')
        if self.callcount % 500 ==0 :
            print("\n", file=sys.stderr,end='')
        # Try to find the cave in the DB if not use the string as before
        path_match = re.search(r"caves-(\d\d\d\d)/(\d+|\d\d\d\d-?\w+-\d+)/", survexblock.survexfile.path)
        if path_match:
            pos_cave = f'{path_match.group(1)}-{path_match.group(2)}'
            cave = getCaveByReference(pos_cave)
            if cave:
                survexfile.cave = cave

    def LinearLoad(self, survexblock, path, collatefilename):
        """Loads a single survex file. Usually used to import all the survex files which have been collated
        into a single file. Loads the begin/end blocks using a stack for labels.
        Uses the python generator idiom to avoid loading the whole file (21MB) into memory.
        """
        blkid = None
        pathlist = None
        args = None
        oldflags = None
        blockcount = 0
        self.lineno = 0
        slengthtotal = 0.0
        nlegstotal = 0
        self.relativefilename = path
        cave = self.IdentifyCave(path) # this will produce null for survex files which are geographic collections
        
        self.currentsurvexfile = survexblock.survexfile
        self.currentsurvexfile.save() # django insists on this although it is already saved !?

        self.datastar = copy.deepcopy(self.datastardefault)
        self.flagsstar = copy.deepcopy(self.flagsdefault)

        def tickle():
            nonlocal blockcount
            
            blockcount +=1
            if blockcount % 20 ==0 :
                print(".", file=sys.stderr,end='')
            if blockcount % 800 ==0 :
                print("\n", file=sys.stderr,end='')
                mem=get_process_memory()
                print(f"  - MEM: {mem:7.2f} MB in use",file=sys.stderr)
                print("    ", file=sys.stderr,end='')
            sys.stderr.flush()

        def printbegin():
            nonlocal blkid
            nonlocal pathlist

            depth = " " * self.depthbegin
            self.insp = depth
            if debugprint:
                print(f"{self.depthbegin:2}{depth}   - Begin for :'{blkid}'")
            pathlist = ""
            for id in self.stackbegin:
                if len(id) > 0:
                    pathlist += "." + id

        def printend():
            nonlocal args

            depth = " " * self.depthbegin
            if debugprint:
                print(f"{self.depthbegin:2}{depth}   - End   from:'{args}'")
                print("{:2}{}   - LEGS: {} (n: {}, length:{} units:{})".format(self.depthbegin,
                        depth, self.slength, self.slength, self.legsnumber, self.units))

        def pushblock():
            nonlocal blkid
            if debugprint:
                print(f" # datastack at  1 *begin {blkid} 'type':", end="")
                for dict in self.datastack:
                    print(f"'{dict['type'].upper()}'   ", end="")
                print("")
                print(f"'{self.datastar['type'].upper()}' self.datastar  ")
            # ------------ * DATA
            self.datastack.append(copy.deepcopy(self.datastar))
            # ------------ * DATA
            if debugprint:
                print(f" # datastack at  2 *begin {blkid} 'type':", end="")
                for dict in self.datastack:
                    print(f"'{dict['type'].upper()}'   ", end="")
                print("")
                print(f"'{self.datastar['type'].upper()}' self.datastar  ")
            
            # ------------ * FLAGS
            self.flagsstack.append(copy.deepcopy(self.flagsstar))
            # ------------ * FLAGS
            pass

        def popblock():
            nonlocal blkid
            nonlocal oldflags
            if debugprint:
                print(f" # datastack  at  *end '{blkid} 'type':", end="")
                for dict in self.datastack:
                    print(f"'{dict['type'].upper()}'   ", end="")
                print("")
                print(f"'{self.datastar['type'].upper()}' self.datastar  ")
            # ------------ * DATA
            self.datastar  = copy.deepcopy(self.datastack.pop())
            # ------------ * DATA
            if debugprint:
                print(f" # datastack  after *end '{blkid} 'type':", end="")
                for dict in self.datastack:
                    print(f"'{dict['type'].upper()}'   ", end="")
                print("")
                print(f"'{self.datastar['type'].upper()}' self.datastar  ")
            
            # ------------ * FLAGS
            self.flagsstar = copy.deepcopy(self.flagsstack.pop()) 
            # ------------ * FLAGS
            if debugprint:
                if oldflags["skiplegs"] != self.flagsstar["skiplegs"]:
                    print(f" # POP  'any' flag now:'{self.flagsstar['skiplegs']}'  was:{oldflags['skiplegs']} ")

        def starstatement(star):
            """Interprets a survex comamnd where * is the first character on the line, e.g. *begin
            """
            nonlocal survexblock
            nonlocal blkid
            nonlocal pathlist
            nonlocal args
            nonlocal oldflags
            nonlocal slengthtotal
            nonlocal nlegstotal

            cmd, args = star.groups()
            cmd = cmd.lower()

            # ------------------------BEGIN
            if self.rx_begin.match(cmd):
                blkid = args.lower()
                # PUSH state ++++++++++++++
                self.stackbegin.append(blkid)
                self.unitsstack.append((self.units, self.unitsfactor))
                self.legsnumberstack.append(self.legsnumber)
                self.slengthstack.append(self.slength)
                self.personexpedstack.append(self.currentpersonexped)
                pushblock()
                # PUSH state ++++++++++++++
                self.legsnumber = 0
                self.slength = 0.0
                self.units = "metres"
                self.currentpersonexped = []
                printbegin()
                newsurvexblock = SurvexBlock(name=blkid, parent=survexblock, 
                        survexpath=pathlist, 
                        cave=self.currentcave, survexfile=self.currentsurvexfile, 
                        legsall=0, legslength=0.0)
                newsurvexblock.save()
                newsurvexblock.title = "("+survexblock.title+")" # copy parent inititally, overwrite if it has its own
                survexblock = newsurvexblock
                survexblock.save() # django insists on this , but we want to save at the end !
                tickle()

            # ---------------------------END
            elif self.rx_end.match(cmd):
                survexblock.legsall = self.legsnumber
                survexblock.legslength = self.slength
                printend()
                slengthtotal += self.slength
                nlegstotal += self.legsnumber

                try:
                    survexblock.parent.save() # django insists on this although it is already saved !?
                except:
                    print(survexblock.parent, file=sys.stderr)
                    raise
                try:
                    survexblock.save() # save to db at end of block
                except:
                    print(survexblock, file=sys.stderr)
                    raise
               # POP  state ++++++++++++++
                popblock()
                self.currentpersonexped = self.personexpedstack.pop()
                self.legsnumber = self.legsnumberstack.pop()
                self.units, self.unitsfactor = self.unitsstack.pop()
                self.slength = self.slengthstack.pop()
                blkid = self.stackbegin.pop()
                self.currentsurvexblock = survexblock.parent
                survexblock = survexblock.parent
                oldflags = self.flagsstar
                self.depthbegin -= 1
                # POP  state ++++++++++++++

            # -----------------------------
            elif self.rx_title.match(cmd):
                quotedtitle = re.match("(?i)^\"(.*)\"$",args)
                if quotedtitle:
                    survexblock.title = quotedtitle.groups()[0]
                else:
                    survexblock.title = args 
            elif self.rx_ref.match(cmd):
                self.LoadSurvexRef(survexblock, args)
            elif self.rx_flags.match(cmd):
                oldflags = self.flagsstar
                self.LoadSurvexFlags(args)
                if debugprint:
                    if oldflags["skiplegs"] != self.flagsstar["skiplegs"]:
                       print(f" # CHANGE 'any' flag now:'{self.flagsstar['skiplegs']}'  was:{oldflags['skiplegs']} ")

            elif self.rx_data.match(cmd):
                if self.LoadSurvexDataNormal(survexblock, args):
                    pass
                else:
                    # Abort, we do not cope with this *data format
                    return
            elif self.rx_alias.match(cmd):
                self.LoadSurvexAlias(survexblock, args)
            elif self.rx_entrance.match(cmd):
                self.LoadSurvexEntrance(survexblock, args)
            elif self.rx_date.match(cmd):
                self.LoadSurvexDate(survexblock, args)
            elif self.rx_units.match(cmd):
                self.LoadSurvexUnits(survexblock, args)
            elif self.rx_team.match(cmd):
                self.LoadSurvexTeam(survexblock, args)
            elif self.rx_set.match(cmd) and self.rx_names.match(cmd):
                pass
            elif self.rx_include.match(cmd):
                message = f" ! -ERROR *include command not expected here {path}. Re-run a full Survex import."
                print(message)
                print(message,file=sys.stderr)
                DataIssue.objects.create(parser='survex', message=message, )
            else:
                self.LoadSurvexFallThrough(survexblock, args, cmd)


        # this is a python generator idiom. 
        # see https://realpython.com/introduction-to-python-generators/
        # this is the first use of generators in troggle (Oct.2022) and save 21 MB of memory
        with open(collatefilename, "r") as fcollate:
            for svxline in fcollate:
                self.lineno += 1
                sline, comment = self.rx_comment.match(svxline).groups()
                if comment:
                    # this catches the ;*include NEWFILE and ;*edulcni ENDOFFILE lines too
                    self.LoadSurvexComment(survexblock, comment) 

                if not sline:
                    continue # skip blank lines

                # detect a merge failure inserted by version control
                mfail = self.rx_badmerge.match(sline)
                if mfail: 
                    message = f"\n ! - ERROR version control merge failure\n   - '{sline}'\n"
                    message = message + f"   - line {self.lineno} in {blkid} in {survexblock}\n   - NERD++ needed to fix it"
                    print(message)
                    print(message,file=sys.stderr)
                    DataIssue.objects.create(parser='survex', message=message)
                    continue # skip this line

                # detect a star command
                star = self.rx_star.match(sline)
                if star: 
                    # yes we are reading a *command
                    starstatement(star)
                else: # not a *cmd so we are reading data OR a ";" rx_comment failed. We hope.
                    self.LoadSurvexLeg(survexblock, sline, comment, svxline)

            self.legsnumber = nlegstotal
            self.slength = slengthtotal  
                
    def PushdownStackScan(self, survexblock, path, finname, flinear, fcollate):
        """Follows the *include links in all the survex files from the root file  (usually 1623.svx)
        and reads only the *include and *begin and *end statements. It produces a linearised
        list of the include tree and detects blocks included more than once.
        """
        global stop_dup_warning

        def process_line(svxline):
            self.lineno += 1
            # detect a merge failure inserted by version control
            mfail = self.rx_badmerge.match(svxline)
            if mfail: 
                message = f"\n!! - ERROR version control merge failure\n   - '{svxline}'\n"
                message = message + f"   - in '{path}' at line {thissvxline}\n"
                message = message + f"   - line {self.lineno}  {survexblock}\n   - Parsing aborted. NERD++ needed to fix it"
                print(message)
                print(message,file=sys.stderr)
                DataIssue.objects.create(parser='survex', message=message,  url=get_offending_filename(path))
                return # skip this survex file and all things *included in it

            includestmt =self.rx_include.match(svxline)
            if not includestmt:
                fcollate.write(f"{svxline.strip()}\n")

            sline, comment = self.rx_comment.match(svxline.strip()).groups()
            star = self.rx_star.match(sline)
            if star: # yes we are reading a *cmd
                cmd, args = star.groups()
                cmd = cmd.lower()
                if re.match("(?i)include$", cmd):
                    includepath = os.path.normpath(os.path.join(os.path.split(path)[0], re.sub(r"\.svx$", "", args)))

                    fullpath = os.path.join(settings.SURVEX_DATA, includepath + ".svx")
                    self.RunSurvexIfNeeded(os.path.join(settings.SURVEX_DATA, includepath), path)
                    self.checkUniqueness(os.path.join(settings.SURVEX_DATA, includepath))
                    if os.path.isfile(fullpath):
                        #--------------------------------------------------------
                        self.depthinclude += 1
                        # fininclude = open(fullpath,'r')
                        finincludename = fullpath
                        fcollate.write(f";|*include {includepath}\n")
                        flinear.write(f"{self.depthinclude:2} {indent} *include {includepath}\n")
                        push = includepath.lower()
                        self.includestack.append(push)
                        #-----------------
                        self.PushdownStackScan(survexblock, includepath, finincludename, flinear, fcollate)
                        #-----------------
                        pop = self.includestack.pop()
                        if pop != push:
                            message = "!! ERROR mismatch *include pop!=push  {}".format(pop, push, self.includestack)
                            print(message)
                            print(message,file=flinear)
                            print(message,file=sys.stderr)
                            DataIssue.objects.create(parser='survex', message=message,  url=get_offending_filename(path))
                        flinear.write(f"{self.depthinclude:2} {indent} *edulcni {pop}\n")
                        fcollate.write(f";|*edulcni {pop}\n")
                        # fininclude.close()
                        self.depthinclude -= 1
                        #--------------------------------------------------------
                    else:
                        message = f"    ! ERROR *include file '{includepath}' not found, listed in '{fin.name}'"
                        print(message)
                        print(message,file=sys.stderr)
                        DataIssue.objects.create(parser='survex', message=message,  url=get_offending_filename(path))
                elif re.match("(?i)begin$", cmd):
                    self.depthbegin += 1
                    depth = " " * self.depthbegin
                    if args:
                        pushargs = args
                    else:
                        pushargs = " "
                    self.stackbegin.append(pushargs.lower())
                    flinear.write(f"            {self.depthbegin:2} {depth} *begin {args}\n")
                    pass
                elif re.match("(?i)end$", cmd):
                    depth = " " * self.depthbegin
                    flinear.write(f"            {self.depthbegin:2} {depth} *end   {args}\n")
                    if not args:
                        args = " "
                    popargs = self.stackbegin.pop()
                    if popargs != args.lower():
                        message = f"!! ERROR mismatch in BEGIN/END labels pop!=push '{popargs}'!='{args}'\n{self.stackbegin}"
                        print(message)
                        print(message,file=flinear)
                        print(message,file=sys.stderr)
                        DataIssue.objects.create(parser='survex', message=message,  url=get_offending_filename(path))

                    self.depthbegin -= 1
                    pass
                elif re.match("(?i)title$", cmd):
                    depth = " " * self.depthbegin
                    flinear.write(f"                    {self.depthbegin:2} {depth} *title {args}\n")
                    pass

        indent = " " * self.depthinclude
        sys.stderr.flush();
        self.callcount +=1
        
        
        if self.callcount % 10 ==0 :
            print(".", file=sys.stderr,end='')
        if self.callcount % 500 ==0 :
            print("\n    ", file=sys.stderr,end='')

        if path in self.svxfileslist:
            # We have already used os.normpath() so this is OK. "/../" and "//" have been simplified already.
            if stop_dup_warning:
                #print("D",end="", file=sys.stderr)
                pass
            else:
                message = f" * Warning. Duplicate detected. We have already seen this *include '{path}' from another survex file. Detected at callcount:{self.callcount} depth:{self.depthinclude}"
                print(message)
                print(message,file=flinear)
                #print(message,file=sys.stderr)
                DataIssue.objects.create(parser='survex', message=message, url=get_offending_filename(path))
            if self.svxfileslist.count(path) > 2:
                message = f" ! ERROR. Should have been caught before this. Survex file already *included 2x. Probably an infinite loop so fix your *include statements that include this. Aborting. {path}"
                print(message)
                print(message,file=flinear)
                #print(message,file=sys.stderr)
                DataIssue.objects.create(parser='survex', message=message, url=get_offending_filename(path))
                return
            return
        try:
            # python generator idiom again. Not important here as these are small files
            with open(finname, "r") as fin:
                for svxline in fin:
                    process_line(svxline)
                    
            self.svxfileslist.append(path)

        except UnicodeDecodeError:
            # some bugger put an umlaut in a non-UTF survex file ?!
            message = f"    ! ERROR *include file '{path}' in '{survexblock}' has UnicodeDecodeError. Omitted."
            print(message)
            print(message,file=sys.stderr)
            DataIssue.objects.create(parser='survex', message=message,  url=get_offending_filename(path))
            return # skip this survex file and all things *included in it
        except :
            message = f"    ! ERROR *include file '{path}' in '{survexblock}' has unexpected error. Omitted."
            print(message)
            print(message,file=sys.stderr)
            DataIssue.objects.create(parser='survex', message=message,  url=get_offending_filename(path))
            return # skip this survex file and all things *included in it
       
    def checkUniqueness(self,fullpath):
        fn = Path(fullpath).name
        if fn not in self.uniquename:
            self.uniquename[fn] = [fullpath]
        else:
            self.uniquename[fn].append(fullpath)
            # This is not an error now that we are moving .3d files to the :loser: directory tree
            # message = f" ! NON-UNIQUE survex filename, '{fn}' - '{self.uniquename[fn]}' #{len(self.uniquename[fn])}"
            # print(message)
            # DataIssue.objects.create(parser='survex', message=message)
            message = f" NOTE:  non-unique survex filename, '{fn}' - '{self.uniquename[fn]}' #{len(self.uniquename[fn])}"
            print(message)

    
    def RunSurvexIfNeeded(self,fullpath, calledpath):
        now = time.time()
        cav_t = now - 365*24*3600
        log_t = now - 365*24*3600
        svx_t = now - 365*24*3600

        def runcavern():
            '''regenerates the .3d file from the .svx if it is older than the svx file, or older than the software,
            or randomly using chaosmonkey() just to keep things ticking over.
            '''
            print(f" -  Regenerating stale (or chaos-monkeyed) cavern .log and .3d for '{fullpath}'\n     at '{logpath}'\n")
            print(f"days svx old: {(svx_t - log_t)/(24*3600):.1f}  cav:{(cav_t - log_t)/(24*3600):.1f}   log old: { (now - log_t)/(24*3600):.1f}")
            
            outputdir = Path(str(f'{fullpath}.svx')).parent
            sp = subprocess.run([settings.CAVERN, "--log", f'--output={outputdir}', f'{fullpath}.svx'],
                                    capture_output=True, check=False, text=True)
            if sp.returncode != 0:
                message = f' ! Error running {settings.CAVERN}: {fullpath}'
                url = f'/survexfile{fullpath}.svx'.replace(settings.SURVEX_DATA, "")
                DataIssue.objects.create(parser='xEntrances', message=message, url=url)
                print(message)
                print(f'stderr:\n\n' + str(sp.stderr) + '\n\n' + str(sp.stdout)  + '\n\nreturn code: ' + str(sp.returncode))
            self.caverncount += 1
            
            # should also collect all the .err files too and create a DataIssue for each one which 
            # - is nonzero in size AND
            # - has Error greater than 5% anywhere, or some other more serious error
            
            errpath = Path(fullpath + ".err")
            if errpath.is_file():
                if errpath.stat().st_size == 0:
                    errpath.unlink() # delete empty closure error file


        svxpath = Path(fullpath + ".svx")
        logpath = Path(fullpath + ".log")
        outputdir = Path(svxpath).parent

        if not svxpath.is_file(): 
            message = f' ! BAD survex file "{fullpath}" specified in *include in {calledpath} ' 
            DataIssue.objects.create(parser='entrances', message=message)
            print(message)
            return
            
        if not logpath.is_file(): # always run if logfile not there
            runcavern()
            return

        self.caverndate = now - 2*365*24*3600

        if not self.caverndate:
            sp = subprocess.run(["which", f"{settings.CAVERN}"], 
                                    capture_output=True, check=False, text=True)
            if sp.returncode != 0:
                message = f' ! Error running "which" on {settings.CAVERN}' 
                DataIssue.objects.create(parser='entrances', message=message)
                print(message)
                print(f'stderr:\n\n' + str(sp.stderr) + '\n\n' + str(sp.stdout)  + '\n\nreturn code: ' + str(sp.returncode))
                self.caverndate = os.path.getmtime(sp.stdout.strip())
            else:
                self.caverndate = now - 2*365*24*3600
        cav_t = self.caverndate
        log_t = os.path.getmtime(logpath)
        svx_t = os.path.getmtime(svxpath)
        now = time.time()

        if svx_t - log_t > 0:          # stale, svx file is newer than log
            runcavern()
            return
        if now - log_t > 60 *24*60*60: # >60 days, re-run anyway
            runcavern()
            return
        if cav_t - log_t > 0:          # new version of cavern
            runcavern()
            return
        if chaosmonkey(350):           # one in every 350 runs
            runcavern()

def FindAndLoadSurvex(survexblockroot):
    """Follows the *include links successively to find files in the whole include tree
    """
    global stop_dup_warning
    print('  - redirecting stdout to svxblks.log...')
    stdout_orig = sys.stdout
    # Redirect sys.stdout to the file
    sys.stdout = open('svxblks.log', 'w')

    print(f'  - Scanning Survex Blocks tree from {settings.SURVEX_TOPNAME}.svx ...',file=sys.stderr)
    survexfileroot = survexblockroot.survexfile # i.e. SURVEX_TOPNAME only
    collatefilename = "_" + survexfileroot.path + ".svx"

    svx_scan = LoadingSurvex()
    svx_scan.callcount = 0
    svx_scan.depthinclude = 0
    fullpathtotop = os.path.join(survexfileroot.survexdirectory.path, survexfileroot.path)
    
    print(f"  - RunSurvexIfNeeded cavern on '{fullpathtotop}'", file=sys.stderr)
    svx_scan.RunSurvexIfNeeded(fullpathtotop, fullpathtotop)
    svx_scan.checkUniqueness(fullpathtotop)
    
    indent=""
    fcollate = open(collatefilename, 'w')

    mem0 = get_process_memory()
    print(f"  - MEM:{mem0:7.2f} MB START",file=sys.stderr)
    flinear = open('svxlinear.log', 'w')
    flinear.write(f"    - MEM:{mem0:7.2f} MB START {survexfileroot.path}\n")
    print("    ", file=sys.stderr,end='')

    finrootname = Path(settings.SURVEX_DATA, survexfileroot.path + ".svx")
    fcollate.write(f";*include {survexfileroot.path}\n")
    flinear.write(f"{svx_scan.depthinclude:2} {indent} *include {survexfileroot.path}\n")

    import cProfile, pstats
    from pstats import SortKey
    pr = cProfile.Profile()
    pr.enable()
    #----------------------------------------------------------------
    svx_scan.PushdownStackScan(survexblockroot, survexfileroot.path, finrootname, flinear, fcollate)
    #----------------------------------------------------------------
    pr.disable()
    with open('PushdownStackScan.prof', 'w') as f:
        ps = pstats.Stats(pr, stream=f)
        ps.sort_stats(SortKey.CUMULATIVE)
        ps.print_stats()
    
    flinear.write(f"{svx_scan.depthinclude:2} {indent} *edulcni {survexfileroot.path}\n")
    fcollate.write(f";*edulcni {survexfileroot.path}\n")
    mem1 = get_process_memory()
    flinear.write(f"\n    - MEM:{mem1:.2f} MB STOP {survexfileroot.path}\n")
    flinear.write(f"    - MEM:{mem1 - mem0:.3f} MB ADDITIONALLY USED\n")
    flinear.write(f"    - {len(svx_scan.svxfileslist):,} survex files in linear include list \n")
     
    print(f"\n  -  {svx_scan.caverncount:,} runs of survex 'cavern' refreshing .3d files",file=sys.stderr)
    print(f"  -  {len(svx_scan.svxfileslist):,} survex files from tree in linear include list",file=sys.stderr)
       
    mem1 = get_process_memory()
    print(f"  - MEM:{mem1:7.2f} MB END ",file=sys.stderr)
    print(f"  - MEM:{mem1 - mem0:7.3f} MB ADDITIONALLY USED",file=sys.stderr)  
    #
    # Process all the omitted files in :loser: with some exceptions
    #
    unseens = set()
    b=[]
    
    for p in Path(settings.SURVEX_DATA).rglob('*.svx'):
        if p.is_file():
            po = p.relative_to(Path(settings.SURVEX_DATA))
            pox = po.with_suffix('')
            if str(pox) not in svx_scan.svxfileslist:
                # print(f"[{pox}]", file=sys.stderr)
                unseens.add(pox)
            else:
                b.append(pox)
    
    if len(b) != len(svx_scan.svxfileslist):
        print(f" ! Mismatch. {len(b)} survex files found which should be {len(svx_scan.svxfileslist)} in main tree)", file=sys.stderr)
     
    excpts = ["surface/terrain", "kataster/kataster-boundaries", "template", "docs", "_unseens"]
    removals = []
    for x in unseens:
        for o in excpts:
            if  str(x).strip().startswith(o):
                removals.append(x)
    # special fix for file not actually in survex format
    unseens.remove(Path("fixedpts/gps/gps00raw"))
    
    for x in removals:
        unseens.remove(x)
    print(f"\n  - {len(unseens)} survex files found which were not included in main tree. ({len(svx_scan.svxfileslist)} in main tree)", file=sys.stderr)
    print(f" -- Now loading the previously-omitted survex files.", file=sys.stderr)
    
    with open(Path(settings.SURVEX_DATA, '_unseens.svx'), 'w') as u: 
        u.write(f"; {len(unseens):,} survex files not *included by {settings.SURVEX_TOPNAME} (which are {len(svx_scan.svxfileslist):,} files)\n")
        u.write(f"; autogenerated  by parser/survex.py from databasereset.py on '{datetime.now(timezone.utc)}'\n")
        u.write(f"; omitting any file beginning with {excpts}\n\n")
        u.write(f"*begin unseens\n")
        for x in sorted(unseens):
            u.write(f"    *include {x}\n")
        u.write(f"*end unseens\n")
 
    survexfileroot = survexblockroot.survexfile # i.e. SURVEX_TOPNAME only

    omit_scan = LoadingSurvex()
    omit_scan.callcount = 0
    omit_scan.depthinclude = 0
    fullpathtotop = os.path.join(survexfileroot.survexdirectory.path, '_unseens.svx')
    
    # copy the list to prime the next pass through the files
    omit_scan.svxfileslist = svx_scan.svxfileslist[:]
    svx_scan.svxfileslist = [] # free memory
    svx_scan = None # Hmm. Does this actually delete all the instance variables if they are lists, dicts etc.? 
  
    print(f"  - RunSurvexIfNeeded cavern on '{fullpathtotop}'", file=sys.stderr)
    omit_scan.RunSurvexIfNeeded(fullpathtotop, fullpathtotop)
    omit_scan.checkUniqueness(fullpathtotop)
 
    mem0 = get_process_memory()
    print(f"  - MEM:{mem0:7.2f} MB START '_unseens'",file=sys.stderr)
    #flinear = open('svxlinear.log', 'w')
    flinear.write(f"    - MEM:{mem0:7.2f} MB START '_unseens'\n")
    print("    ", file=sys.stderr,end='')

    finrootname = fullpathtotop
    fcollate.write(";*include _unseens.svx\n")
    flinear.write(f"{omit_scan.depthinclude:2} {indent} *include _unseens\n")
    stop_dup_warning = True
    #----------------------------------------------------------------
    omit_scan.PushdownStackScan(survexblockroot, '_unseens', finrootname, flinear, fcollate)
    #----------------------------------------------------------------
    stop_dup_warning = False

    flinear.write(f"{omit_scan.depthinclude:2} {indent} *edulcni _unseens\n")
    fcollate.write(";*edulcni _unseens.svx\n")
    mem1 = get_process_memory()
    flinear.write(f"\n    - MEM:{mem1:.2f} MB STOP _unseens.svx OMIT\n")
    flinear.write(f"    - MEM:{mem1 - mem0:.3f} MB ADDITIONALLY USED OMIT\n")
    flinear.write(f"    - {len(omit_scan.svxfileslist):,} survex files in linear include list OMIT \n")
    
    flinear.close()
    fcollate.close()
    
    print(f"\n -  {omit_scan.caverncount:,} runs of survex 'cavern' refreshing .3d files in the unseen list",file=sys.stderr)
    
    print(f" -  {len(omit_scan.svxfileslist):,} survex files in linear include list including previously unseen ones \n",file=sys.stderr)
    omit_scan = None # Hmm. Does this actually delete all the instance variables if they are lists, dicts etc.? 
      
    mem1 = get_process_memory()
    print(f"  - MEM:{mem1:7.2f} MB END ",file=sys.stderr)
    print(f"  - MEM:{mem1 - mem0:7.3f} MB ADDITIONALLY USED",file=sys.stderr)

 
    # Before doing this, it would be good to identify the *equate and *entrance we need that are relevant to the
    # entrance locations currently loaded after this by LoadPos(), but could better be done before ?
    # look in MapLocations() for how we find the entrances
   
    print('\n  - Loading All Survex Blocks (LinearLoad)',file=sys.stderr)
    svx_load = LoadingSurvex()

    svx_load.survexdict[survexfileroot.survexdirectory] = []
    svx_load.survexdict[survexfileroot.survexdirectory].append(survexfileroot)
    svx_load.svxdirs[""] = survexfileroot.survexdirectory

    #pr2 = cProfile.Profile()
    #pr2.enable()
    print("    ", file=sys.stderr,end='')
    #----------------------------------------------------------------
    svx_load.LinearLoad(survexblockroot, survexfileroot.path, collatefilename)
    #----------------------------------------------------------------
    #pr2.disable()
    # with open('LinearLoad.prof', 'w') as f:
        # ps = pstats.Stats(pr2, stream=f)
        # ps.sort_stats(SortKey.CUMULATIVE)
        # ps.print_stats()
    mem1 = get_process_memory()
    print(f"\n - MEM:{mem1:7.2f} MB STOP",file=sys.stderr)
    print(f" - MEM:{mem1 - mem0:7.3f} MB ADDITIONALLY USED",file=sys.stderr)

    # Close the logging file, Restore sys.stdout to our old saved file handle
    sys.stdout.close()
    print("+", file=sys.stderr)
    sys.stderr.flush();
    sys.stdout = stdout_orig

    legsnumber = svx_load.legsnumber
    mem1 = get_process_memory()

    print(f"  - Number of SurvexDirectories: {len(svx_load.survexdict):,}")
    tf=0
    for d in svx_load.survexdict:
        tf += len(svx_load.survexdict[d])
    print(f"  - Number of SurvexFiles: {tf:,}")
    print(f"  - Number of Survex legs: {legsnumber:,}")
    svx_load = None

    return legsnumber

def MakeSurvexFileRoot():
    """Returns a file_object.path = SURVEX_TOPNAME associated with directory_object.path = SURVEX_DATA
    """
    # find a cave, any cave..
    caves = Cave.objects.all()
    smk = caves.filter(kataster_number="000") # returns a list, a QuerySet
    
    fileroot = SurvexFile(path=settings.SURVEX_TOPNAME, cave=None)
    fileroot.save()
    directoryroot = SurvexDirectory(path=settings.SURVEX_DATA, cave=smk[0], primarysurvexfile=fileroot)
    # MariaDB doesn't like this hack. Complains about non-null cave_id EVEN THOUGH our model file says this is OK:
    # cave = models.ForeignKey('Cave', blank=True, null=True,on_delete=models.SET_NULL)
    directoryroot.save()
    fileroot.survexdirectory = directoryroot # i.e. SURVEX_DATA/SURVEX_TOPNAME
    fileroot.save() # mutually dependent objects need a double-save like this
    return fileroot
    
def MakeOmitFileRoot(fn):
    """Returns a file_object.path = _unseens.svx associated with directory_object.path = SURVEX_DATA
    """
    fileroot = SurvexFile(path=fn, cave=None)
    fileroot.survexdirectory = SurvexDirectory.objects.get(path=settings.SURVEX_DATA)
    fileroot.save() 
    return fileroot

def LoadSurvexBlocks():
    mem1 = get_process_memory()
    print(f"  - MEM:{mem1:7.2f} MB now ",file=sys.stderr)

    print(' - Flushing All Survex Blocks...')
    # why does this increase memory use by 20 MB ?!
    # We have foreign keys, Django needs to load the related objects 
    # in order to resolve how the relation should handle the deletion: 
    # https://docs.djangoproject.com/en/3.2/ref/models/fields/#django.db.models.ForeignKey.on_delete
    SurvexBlock.objects.all().delete()
    SurvexFile.objects.all().delete()
    SurvexDirectory.objects.all().delete()
    SurvexPersonRole.objects.all().delete()
    SurvexStation.objects.all().delete()
    mem1 = get_process_memory()
    print(f"  - MEM:{mem1:7.2f} MB now. Foreign key objects loaded on deletion. ",file=sys.stderr)
    
    print("  - Flushing survex Data Issues ")
    DataIssue.objects.filter(parser='survex').delete()
    DataIssue.objects.filter(parser='svxdate').delete()
    DataIssue.objects.filter(parser='survexleg').delete()
    DataIssue.objects.filter(parser='survexunits').delete()
    DataIssue.objects.filter(parser='entrances').delete()
    DataIssue.objects.filter(parser='xEntrances').delete()
    print("  - survex Data Issues flushed")
    mem1 = get_process_memory()
    print(f"  - MEM:{mem1:7.2f} MB now ",file=sys.stderr)
    
    survexfileroot = MakeSurvexFileRoot()
    # this next makes a block_object assciated with a file_object.path = SURVEX_TOPNAME
    survexblockroot = SurvexBlock(name=ROOTBLOCK, survexpath="", cave=None, survexfile=survexfileroot, 
            legsall=0, legslength=0.0)
    # crashes here sometimes on MariaDB complaining that cave_id should not be null. But it should be.
    #django.db.utils.IntegrityError: (1048, "Column 'cave_id' cannot be null")
    # fix by restarting db on server
    # sudo service mariadb stop
    # sudo service mariadb start
    survexblockroot.save()
    
    omitsfileroot = MakeOmitFileRoot("_unseens.svx")
    survexomitsroot = SurvexBlock(name=OMITBLOCK, survexpath="", cave=None, survexfile=omitsfileroot, 
        legsall=0, legslength=0.0)
    survexomitsroot.save()  

    print(' - Loading Survex Blocks...')
    memstart = get_process_memory()
    #----------------------------------------------------------------
    FindAndLoadSurvex(survexblockroot)
    #----------------------------------------------------------------
    memend = get_process_memory()
    print(f" - MEMORY start:{memstart:.3f} MB end:{memend:.3f} MB increase={memend - memstart:.3f} MB")
    
    survexblockroot.save()

    print(' - Loaded All Survex Blocks.')

poslineregex = re.compile(r"^\(\s*([+-]?\d*\.\d*),\s*([+-]?\d*\.\d*),\s*([+-]?\d*\.\d*)\s*\)\s*([^\s]+)$")

def LoadPositions():
    """First load the survex stations for entrances and fixed points (about 600) into the database.
    Run cavern to produce a complete .3d file, then run 3dtopos to produce a table of 
    all survey point positions. Then lookup each position by name to see if we have it in the database 
    and if we do, then save the x/y/z coordinates. This gives us coordinates of the entrances.
    If we don't have it in the database, print an error message and discard it.
    """
    svx_t = 0
    d3d_t = 0
    def runcavern3d():
        outputdir = Path(str(f'{topdata}.svx')).parent

        # print(" -  Regenerating stale cavern .log and .3d for '{}'\n    days old: {:.1f}    {:.1f}    {:.1f}".
        #    format(topdata, (svx_t - d3d_t)/(24*3600), (cav_t - d3d_t)/(24*3600), (now - d3d_t)/(24*3600)))

        file3d = Path(f'{topdata}.3d')
        try:
            sp = subprocess.run([settings.CAVERN, "--log", f"--output={outputdir}", f"{topdata}.svx"], 
                                    capture_output=True, check=False, text=True) #check=False means exception not raised
            if sp.returncode != 0:
                message = f' ! Error: cavern: creating {file3d} in runcavern3()'
                DataIssue.objects.create(parser='entrances', message=message)
                print(message)
                
                # find the errors in the 1623.log file
                sp = subprocess.run(["grep", "error:", f"{topdata}.log"], 
                                        capture_output=True, check=False, text=True) #check=False means exception not raised
                message = f' ! Error: cavern: {sp.stdout} creating {file3d} '
                DataIssue.objects.create(parser='entrances', message=message)
                print(message)

        except: 
            message = f" ! CalledProcessError 'cavern' in runcavern3() at {topdata}."
            DataIssue.objects.create(parser='entrances', message=message)
            print(message)
            
            if file3d.is_file():
                message = f" ! CalledProcessError. File permissions {file3d.stat().st_mode} on {str(file3d)}"
                DataIssue.objects.create(parser='entrances', message=message)
                print(message)
                
        if file3d.is_file(): # might be an old one though
            try:
                # print(" -  Regenerating {} {}.3d  in  {}".format(settings.SURVEXPORT, topdata, settings.SURVEX_DATA))
                sp = subprocess.run([settings.SURVEXPORT, '--pos', f'{file3d}'], cwd = settings.SURVEX_DATA, 
                                        capture_output=True, check=False, text=True)
                if sp.returncode != 0:
                    print(f' ! Error: survexport creating {topdata}.pos in runcavern3().\n\n' + str(sp.stdout)  + '\n\nreturn code: ' + str(sp.returncode))
            except: 
                message = f" ! CalledProcessError 'survexport' in runcavern3() at {file3d}." 
                DataIssue.objects.create(parser='entrances', message=message)
                print(message)
        else:
            message = f" ! Failed to find {file3d} so aborting generation of new .pos, using old one if present"
            DataIssue.objects.create(parser='entrances', message=message)
            print(message)

    topdata = os.fspath(Path(settings.SURVEX_DATA) / settings.SURVEX_TOPNAME)
    print(f' - Generating a list of Pos from {topdata}.svx and then loading...')

    found = 0
    skip = {}
    print("\n") # extra line because cavern overwrites the text buffer somehow
    # cavern defaults to using same cwd as supplied input file

    completed_process = subprocess.run(["which", f"{settings.CAVERN}"], 
                            capture_output=True, check=True, text=True)
    cav_t = os.path.getmtime(completed_process.stdout.strip())

    svxpath = topdata + ".svx"
    d3dpath = topdata + ".3d"
    pospath = topdata + ".pos"

    svx_t = os.path.getmtime(svxpath)

    if os.path.isfile(d3dpath):
        # always fails to find log file if a double directory, e.g. caves-1623/B4/B4/B4.svx Why ?
        d3d_t = os.path.getmtime(d3dpath)

    now = time.time()
    if not os.path.isfile(pospath):
        runcavern3d()
    if not os.path.isfile(d3dpath):
        runcavern3d()
    elif d3d_t - svx_t > 0:          # stale, 3d older than svx file
        runcavern3d()
    elif now - d3d_t> 60 *24*60*60: # >60 days old, re-run anyway
        runcavern3d()
    elif cav_t - d3d_t > 0:          # new version of cavern
        runcavern3d()

    mappoints = {}
    for pt in MapLocations().points():
        svxid, number,  point_type, label = pt
        mappoints[svxid]=True

    if not Path(pospath).is_file():
        message = f" ! Failed to find {pospath} so aborting generation of entrance locations. "
        DataIssue.objects.create(parser='entrances', message=message)
        print(message)
        return

    posfile = open(pospath)
    posfile.readline() #Drop header
    try:
        survexblockroot = SurvexBlock.objects.get(name=ROOTBLOCK)
    except:
        try:
            survexblockroot = SurvexBlock.objects.get(id=1)
        except:
            message = f' ! FAILED to find root SurvexBlock'
            print(message)
            DataIssue.objects.create(parser='entrances', message=message)
            raise
    for line in posfile.readlines():
        r = poslineregex.match(line)
        if r:
            x, y, z, id = r.groups() 
            for sid in mappoints:
                if id.endswith(sid):
                    blockpath = "." + id[:-len(sid)].strip(".")
                    # But why are we doing this? Why do we need the survexblock id for each of these ?
                    # ..because mostly they don't actually appear in any SVX file. We should match them up
                    # via the cave data, not by this half-arsed syntactic match which almost never works. PMS.
                    if False:
                        try:
                            sbqs = SurvexBlock.objects.filter(survexpath=blockpath)
                            if len(sbqs)==1:
                                sb = sbqs[0]
                            if len(sbqs)>1:
                                message = f" ! MULTIPLE SurvexBlocks {len(sbqs):3} matching Entrance point {blockpath} {sid} '{id}'"
                                print(message)
                                DataIssue.objects.create(parser='entrances', message=message)
                                sb = sbqs[0]
                            elif len(sbqs)<=0:
                                message = f" ! ZERO SurvexBlocks matching Entrance point {blockpath} {sid} '{id}'"
                                print(message)
                                DataIssue.objects.create(parser='entrances', message=message)
                                sb = survexblockroot
                        except:
                            message = f' ! FAIL in getting SurvexBlock matching Entrance point {blockpath} {sid}'
                            print(message)
                            DataIssue.objects.create(parser='entrances', message=message)
                    try:
                        ss = SurvexStation(name=id, block=survexblockroot)
                        ss.x = float(x)
                        ss.y = float(y)
                        ss.z = float(z) 
                        ss.save()
                        found += 1
                    except:
                        message = f' ! FAIL to create SurvexStation Entrance point {blockpath} {sid}'
                        print(message)
                        DataIssue.objects.create(parser='entrances', message=message)
                        raise
    print(f" -  {found} SurvexStation entrances found.")