2023-01-19 18:33:04 +00:00
import copy
2020-05-28 01:16:45 +01:00
import os
import re
2021-04-03 00:34:34 +01:00
import subprocess
2023-01-19 18:33:04 +00:00
import sys
import time
2023-01-19 21:34:09 +00:00
from datetime import datetime , timezone
2021-03-24 15:46:35 +00:00
from pathlib import Path
2009-05-13 05:39:52 +01:00
2020-02-21 15:57:07 +00:00
2020-05-28 01:16:45 +01:00
import troggle . settings as settings
2023-01-19 21:34:09 +00:00
from troggle . core . models . caves import QM , Cave , Entrance
2023-01-19 21:18:42 +00:00
from troggle . core . models . survex import SurvexBlock , SurvexDirectory , SurvexFile , SurvexPersonRole , SurvexStation , Wallet
2021-04-13 01:13:08 +01:00
from troggle . core . models . troggle import DataIssue , Expedition
2023-01-19 18:33:04 +00:00
from troggle . core . utils import chaosmonkey , get_process_memory
from troggle . parsers . logbooks import GetCaveLookup
2023-01-19 21:18:42 +00:00
from troggle . parsers . people import GetPersonExpeditionNameLookup , known_foreigner
2020-05-28 01:16:45 +01:00
2023-01-19 21:18:42 +00:00
""" Imports the tree of survex files following from a defined root .svx file
2022-10-07 21:48:41 +01:00
It also scans the Loser repo for all the svx files , which it loads individually afterwards .
2023-01-19 21:18:42 +00:00
"""
2021-04-13 01:37:42 +01:00
2023-01-19 21:18:42 +00:00
todo = """
2021-11-06 21:57:51 +00:00
2022-11-17 01:24:39 +00:00
- #BUG, if *date comes after *team, the person's date is not set at all.
It needs re - setting at the end of the block .
2022-03-01 01:30:09 +00:00
- LoadSurvexFile ( ) Creates a new current survexfile and valid . survexdirectory
The survexblock passed - in is not necessarily the parent . FIX THIS .
2022-07-08 23:54:48 +01:00
2022-11-15 22:25:39 +00:00
- When Olly implements LEG in the ' dump3d --legs ' utility , then we can use that to get the length of
all the legs in a survex block instead of adding them up oursleves . Which means that we can
ignore all the Units and offset stuff , that troggle will work with survex files with backsights ,
repeated readings from distox etc . . Not actually useful for pre 2022 survey data , but good future - proofing .
Also it will be a tiny bit more accurate as these leg lengths are after loop closure fixup .
2023-01-19 21:18:42 +00:00
"""
2020-06-15 03:28:51 +01:00
survexblockroot = None
2022-10-05 19:11:18 +01:00
survexomitsroot = None
2020-06-19 00:26:15 +01:00
ROOTBLOCK = " rootblock "
2022-10-05 19:11:18 +01:00
OMITBLOCK = " omitblock "
2020-07-08 00:00:56 +01:00
METRESINFEET = 3.28084
2020-06-19 00:26:15 +01:00
2022-10-05 19:11:18 +01:00
stop_dup_warning = False
2023-01-19 21:18:42 +00:00
debugprint = False # Turns on debug printout for just one *include file
2020-07-07 02:46:18 +01:00
debugprinttrigger = " ! "
# debugprinttrigger = "caves-1623/40/old/EisSVH"
2020-07-06 21:46:19 +01:00
2023-01-19 21:18:42 +00:00
2021-04-17 01:41:06 +01:00
class MapLocations ( object ) :
2023-01-19 21:18:42 +00:00
""" Class used only for identifying teh entrance locations """
2021-04-17 01:41:06 +01:00
p = [
( " laser.0_7 " , " BNase " , " Reference " , " Bräuning Nase laser point " ) ,
( " 226-96 " , " BZkn " , " Reference " , " Bräuning Zinken trig point " ) ,
2023-01-19 21:18:42 +00:00
( " vd1 " , " VD1 " , " Reference " , " VD1 survey point " ) ,
( " laser.kt114_96 " , " HSK " , " Reference " , " Hinterer Schwarzmooskogel trig point " ) ,
( " 2000 " , " Nipple " , " Reference " , " Nipple (Weiße Warze) " ) ,
( " 3000 " , " VSK " , " Reference " , " Vorderer Schwarzmooskogel summit " ) ,
2021-04-17 01:41:06 +01:00
( " topcamp " , " OTC " , " Reference " , " Old Top Camp " ) ,
( " laser.0 " , " LSR0 " , " Reference " , " Laser Point 0 " ) ,
( " laser.0_1 " , " LSR1 " , " Reference " , " Laser Point 0/1 " ) ,
( " laser.0_3 " , " LSR3 " , " Reference " , " Laser Point 0/3 " ) ,
( " laser.0_5 " , " LSR5 " , " Reference " , " Laser Point 0/5 " ) ,
2023-01-19 21:18:42 +00:00
( " 225-96 " , " BAlm " , " Reference " , " Bräuning Alm trig point " ) ,
2021-04-17 01:41:06 +01:00
]
2023-01-19 21:18:42 +00:00
2021-04-17 01:41:06 +01:00
def points ( self ) :
for ent in Entrance . objects . all ( ) :
if ent . best_station ( ) :
2022-12-22 16:04:16 +00:00
# print(f"{ent.filename}", end=", ")
2021-04-17 01:41:06 +01:00
try :
k = ent . caveandentrance_set . all ( ) [ 0 ] . cave
except :
2022-12-22 00:56:46 +00:00
message = f " ! Failed to get Cave linked to Entrance: { ent . name } from: { ent . filename } best: { ent . best_station ( ) } { ent . caveandentrance_set . all ( ) } "
2023-01-19 21:18:42 +00:00
DataIssue . objects . create ( parser = " entrances " , message = message )
2021-04-17 01:41:06 +01:00
print ( message )
2023-01-19 21:18:42 +00:00
continue # skip this entrance
2021-04-17 01:41:06 +01:00
try :
areaName = k . getArea ( ) . short_name
except :
2022-11-23 10:41:14 +00:00
message = f " ! Failed to get Area on cave ' { k } ' linked to Entrance: { ent . name } from: { ent . filename } best: { ent . best_station ( ) } "
2023-01-19 21:18:42 +00:00
DataIssue . objects . create ( parser = " entrances " , message = message )
2021-04-17 01:41:06 +01:00
print ( message )
raise
2022-11-23 10:41:14 +00:00
self . p . append ( ( ent . best_station ( ) , f " { areaName } - { str ( ent ) [ 5 : ] } " , ent . needs_surface_work ( ) , str ( ent ) ) )
2021-04-17 01:41:06 +01:00
message = f " - { len ( self . p ) } entrances linked to caves. "
print ( message )
return self . p
def __str__ ( self ) :
2022-11-23 10:41:14 +00:00
return f " { len ( self . p ) } map locations "
2023-01-19 21:18:42 +00:00
2022-07-23 17:26:47 +01:00
def get_offending_filename ( path ) :
2022-11-17 01:24:39 +00:00
""" Used to provide the URL for a line in the DataErrors page
whcih reports problems on importing data into troggle
"""
2022-07-23 17:26:47 +01:00
return " /survexfile/ " + path + " .svx "
2021-04-17 01:41:06 +01:00
2023-01-19 21:18:42 +00:00
class SurvexLeg :
""" No longer a models.Model subclass, so no longer a database table """
tape = 0.0
compass = 0.0
clino = 0.0
2020-06-24 14:10:13 +01:00
2022-09-18 21:53:04 +01:00
def get_people_on_trip ( survexblock ) :
qpeople = SurvexPersonRole . objects . filter ( survexblock = survexblock )
people = [ ]
for p in qpeople :
2023-01-19 21:18:42 +00:00
people . append ( f " { p . personname } " )
2022-09-18 21:53:04 +01:00
return list ( set ( people ) )
2022-10-07 21:48:41 +01:00
2023-01-19 21:18:42 +00:00
class LoadingSurvex :
2020-06-24 14:10:13 +01:00
""" A ' survex block ' is a *begin...*end set of cave data.
A survex file can contain many begin - end blocks , which can be nested , and which can * include
other survex files .
2021-04-26 19:50:03 +01:00
A ' scanswallet ' is what we today call a " survey scans folder " or a " wallet " .
2020-06-24 14:10:13 +01:00
"""
2023-01-19 21:18:42 +00:00
2022-11-18 20:42:03 +00:00
# python regex flags (?i) means case-insentitive, (?s) means . matches newline too
# see https://docs.python.org/3/library/re.html
2023-01-19 21:18:42 +00:00
rx_begin = re . compile ( r " (?i)begin " )
rx_end = re . compile ( r " (?i)end$ " )
rx_title = re . compile ( r " (?i)title$ " )
rx_ref = re . compile ( r " (?i)ref$ " )
rx_data = re . compile ( r " (?i)data$ " )
rx_flags = re . compile ( r " (?i)flags$ " )
rx_alias = re . compile ( r " (?i)alias$ " )
rx_entrance = re . compile ( r " (?i)entrance$ " )
rx_date = re . compile ( r " (?i)date$ " )
rx_units = re . compile ( r " (?i)units$ " )
rx_team = re . compile ( r " (?i)team$ " )
rx_set = re . compile ( r " (?i)set$ " )
rx_names = re . compile ( r " (?i)names " )
rx_flagsnot = re . compile ( r " not \ s " )
2020-07-07 01:35:58 +01:00
rx_linelen = re . compile ( r " [ \ d \ -+.]+$ " )
2022-10-07 21:48:41 +01:00
instruments = " (bitch|bodger|bolt|bolter|bolting|book|clino|comp|compass|consultant|disto|distox|distox2|dog|dogsbody|drawing|drill|gps|helper|inst|instr|instrument|monkey|nagging|nail|nail_polish|nail_polish_bitch|nail_polish_monkey|nail_varnish|nail_varnish_bitch|note|paint|photo|pic|point|polish|powerdrill|rig|rigger|rigging|sketch|slacker|something|tape|topodroid|unknown|useless|varnish|waiting_patiently) "
2023-01-19 21:18:42 +00:00
rx_teammem = re . compile ( r " (?i) " + instruments + " ?(?:es|s)? \ s+(.*)$ " )
rx_teamold = re . compile ( r " (?i)(.*) \ s+ " + instruments + " ?(?:es|s)?$ " )
rx_teamabs = re . compile ( r " (?i)^ \ s*( " + instruments + " )?(?:es|s)? \ s*$ " )
rx_person = re . compile ( r " (?i) and |/| / |, | , |&| & | \ + |^both$|^none$ " )
rx_qm = re . compile (
r " (?i)^ \ s*QM( \ d+) \ s+?([a-dA-DxX]) \ s+([ \ w \ - \ _]+) \ .([ \ w \ . \ -]+) \ s+(([ \ w \ -]+) \ .([ \ w \ . \ -]+)| \ -) \ s+(.+)$ "
)
2022-07-08 23:54:48 +01:00
# does not recognise non numeric suffix survey point ids
2023-01-19 21:18:42 +00:00
rx_qm0 = re . compile ( r " (?i)^ \ s*QM( \ d+) \ s+(.+)$ " )
rx_qm_tick = re . compile ( r " (?i)^ \ s*QM( \ d+) \ s+TICK \ s([ \ d \ -]+) \ s(.*)$ " )
# remember there is also QM_PATTERN used in views.other and set in settings.py
rx_tapelng = re . compile ( r " (?i).*(tape|length).*$ " )
rx_cave = re . compile ( r " (?i)caves-( \ d \ d \ d \ d)/([- \ d \ w]+| \ d \ d \ d \ d-? \ w+- \ d+) " )
rx_comment = re . compile ( r " ([^;]*?) \ s*(?:; \ s*(.*))? \ n?$ " )
rx_comminc = re . compile ( r " (?i)^ \ | \ *include[ \ s]*([- \ w/]*).*$ " ) # inserted by linear collate ;*include
rx_commcni = re . compile ( r " (?i)^ \ | \ *edulcni[ \ s]*([- \ w/]*).*$ " ) # inserted by linear collate ;*edulcni
rx_include = re . compile ( r " (?i)^ \ s*( \ *include[ \ s].*)$ " )
rx_commref = re . compile ( r " (?i)^ \ s*ref(?:erence)?[ \ s.:]*( \ d+) \ s*# \ s*(X)? \ s*( \ d+) " )
rx_ref_text = re . compile ( r ' (?i)^ \ s* \ " [^ " ]* \ " \ s*$ ' )
rx_star = re . compile ( r " (?i) \ s* \ *[ \ s,]*( \ w+) \ s*(.*?) \ s*(?:;.*)?$ " )
rx_starref = re . compile ( r " (?i)^ \ s* \ *ref[ \ s.:]*((?:19[6789] \ d)|(?:20[0123] \ d)) \ s*#? \ s*(X)? \ s*(.*? \ d+.*?)$ " )
rx_argsref = re . compile ( r " (?i)^[ \ s.:]*((?:19[6789] \ d)|(?:20[012345] \ d)) \ s*#? \ s*(X)? \ s*(.*? \ d+.*?)$ " )
rx_badmerge = re . compile ( r " (?i).*( \ > \ > \ > \ > \ >)|( \ = \ = \ = \ = \ =)|( \ < \ < \ < \ < \ <).*$ " )
rx_ref2 = re . compile ( r " (?i) \ s*ref[.;]? " )
rx_commteam = re . compile ( r " (?i) \ s*(Messteam|Zeichner) \ s*[:]?(.*) " )
2020-06-24 14:10:13 +01:00
2020-07-02 16:25:51 +01:00
# This interprets the survex "*data normal" command which sets out the order of the fields in the data, e.g.
# *DATA normal from to length gradient bearing ignore ignore ignore ignore
2023-01-19 21:18:42 +00:00
datastardefault = { " type " : " normal " , " from " : 0 , " to " : 1 , " tape " : 2 , " compass " : 3 , " clino " : 4 }
flagsdefault = { " duplicate " : False , " surface " : False , " splay " : False , " skiplegs " : False , " splayalias " : False }
2020-07-02 16:25:51 +01:00
2023-01-19 21:18:42 +00:00
datastar = { }
2020-07-03 17:22:15 +01:00
flagsstar = { }
2020-07-07 01:35:58 +01:00
units = " metres "
2020-07-08 00:00:56 +01:00
unitsfactor = None
2020-07-04 01:10:17 +01:00
slength = 0.0
legsnumber = 0
2020-06-24 22:46:18 +01:00
depthbegin = 0
2020-06-27 17:55:59 +01:00
depthinclude = 0
2020-07-07 01:35:58 +01:00
unitsstack = [ ]
2020-07-04 01:10:17 +01:00
legsnumberstack = [ ]
slengthstack = [ ]
2020-07-06 01:24:43 +01:00
personexpedstack = [ ]
2023-01-19 21:18:42 +00:00
stackbegin = [ ]
flagsstack = [ ]
datastack = [ ]
2020-07-07 01:35:58 +01:00
includestack = [ ]
2020-06-28 14:42:26 +01:00
stacksvxfiles = [ ]
2020-06-28 01:50:34 +01:00
svxfileslist = [ ]
2023-01-19 21:18:42 +00:00
svxdirs = { }
2021-11-05 20:59:54 +00:00
uniquename = { }
2020-07-04 13:31:46 +01:00
expos = { }
2023-01-19 21:18:42 +00:00
survexdict = { } # each key is a directory, and its value is a list of files
2020-06-24 22:46:18 +01:00
lineno = 0
2020-06-24 14:10:13 +01:00
insp = " "
callcount = 0
2021-11-05 20:59:54 +00:00
caverncount = 0
2020-06-30 15:39:24 +01:00
ignoreprefix = [ " surface " , " kataster " , " fixedpts " , " gpx " ]
2023-01-19 21:18:42 +00:00
ignorenoncave = [
" caves-1623 " ,
" caves-1623/2007-NEU " ,
" caves-1626 " ,
" caves-1624 " ,
" caves-1627 " ,
" fixedpts/gps/gps00raw " ,
" " ,
]
includedfilename = " "
2020-06-27 17:55:59 +01:00
currentsurvexblock = None
currentsurvexfile = None
currentcave = None
2020-07-02 16:25:51 +01:00
caverndate = None
2020-07-06 01:24:43 +01:00
currentpersonexped = [ ]
2022-07-28 16:36:57 +01:00
pending = [ ]
2020-06-24 14:10:13 +01:00
def __init__ ( self ) :
2020-06-29 21:16:13 +01:00
self . caveslist = GetCaveLookup ( )
2020-06-24 14:10:13 +01:00
pass
2023-01-19 21:18:42 +00:00
2020-07-05 17:22:26 +01:00
def LoadSurvexFallThrough ( self , survexblock , line , cmd ) :
2020-06-28 14:42:26 +01:00
if cmd == " require " :
2023-01-19 21:18:42 +00:00
pass # should we check survex version available for processing?
elif cmd in [ " equate " , " fix " , " calibrate " , " cs " , " export " , " case " , " declination " , " infer " , " instrument " , " sd " ] :
pass # we ignore all these, which is fine.
2020-06-24 19:07:11 +01:00
else :
2023-01-19 21:18:42 +00:00
if cmd in [ " include " , " data " , " flags " , " title " , " entrance " , " set " , " units " , " alias " , " ref " ] :
message = (
f " ! Warning. Unparsed [* { cmd } ]: ' { line } ' { survexblock . survexfile . path } - not an error (probably) "
)
print ( self . insp + message )
DataIssue . objects . create (
parser = " survex " , message = message , url = get_offending_filename ( survexblock . survexfile . path )
)
2020-06-24 19:07:11 +01:00
else :
2023-01-19 21:18:42 +00:00
message = (
f " ! Bad unrecognised svx command: [* { cmd } ] { line } ( { survexblock } ) { survexblock . survexfile . path } "
)
print ( self . insp + message )
DataIssue . objects . create (
parser = " survex " , message = message , url = get_offending_filename ( survexblock . survexfile . path )
)
2020-06-24 19:07:11 +01:00
def LoadSurvexTeam ( self , survexblock , line ) :
2020-07-09 18:06:03 +01:00
""" Interpeting the *team fields means interpreting older style survex as well as current survex standard,
* team Insts Anthony Day - this is how most of our files specify the team member
* team " Anthony Day " notes pictures tape - this is how the survex documentation says it should be done
2023-01-19 21:18:42 +00:00
We have a huge variety of abbreviations and mispellings . The most laconic being
2020-07-09 18:06:03 +01:00
* team gb , bl
2023-01-19 21:18:42 +00:00
2022-11-17 01:24:39 +00:00
personrole is used to record that a person was on a survex trip , NOT the role they played .
( NB PersonTrip is a logbook thing , not a survex thing . Yes they could be merged , maybe . )
2020-07-09 18:06:03 +01:00
"""
2023-01-19 21:18:42 +00:00
2022-10-07 21:48:41 +01:00
def record_team_member ( tm , survexblock ) :
2023-01-19 21:18:42 +00:00
tm = tm . strip ( " \" ' " ) . strip ( )
2022-10-07 21:48:41 +01:00
# Refactor. The dict GetPersonExpeditionNameLookup(expo) indexes by name and has values of personexpedition
# This is convoluted, the whole personexpedition concept is unnecessary.
2023-01-19 21:18:42 +00:00
2022-10-07 21:48:41 +01:00
# we need the current expedition, but if there has been no date yet in the survex file, we don't know which one it is.
# so we can't validate whether the person was on expo or not.
2023-01-19 21:18:42 +00:00
# we will have to attach them to the survexblock anyway, and then do a
2022-10-07 21:48:41 +01:00
# later check on whether they are valid when we get the date.
2023-01-19 21:18:42 +00:00
expo = survexblock . expedition # may be None if no *date yet
2022-10-07 21:48:41 +01:00
# this syntax was bizarre.. made more obvious
if expo :
2023-01-19 21:18:42 +00:00
if not survexblock . expeditionday : # *date has been set
2022-10-07 21:48:41 +01:00
# should not happen
2022-11-23 10:41:14 +00:00
message = f " ! *team { expo . year } expo ok, expedition day not in *team { survexblock . survexfile . path } ( { survexblock } ) "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
DataIssue . objects . create (
parser = " survex " , message = message , url = get_offending_filename ( survexblock . survexfile . path )
)
personexpedition = GetPersonExpeditionNameLookup ( expo ) . get ( tm . lower ( ) )
if personexpedition :
personrole , created = SurvexPersonRole . objects . update_or_create (
survexblock = survexblock , personexpedition = personexpedition , personname = tm
)
personrole . person = personexpedition . person
personrole . expeditionday = survexblock . expeditionday
self . currentpersonexped . append ( personexpedition ) # used in push/pop block code
2022-10-07 22:48:21 +01:00
personrole . save ( )
2023-01-19 21:18:42 +00:00
elif known_foreigner ( tm ) : # note, not using .lower()
2022-11-23 10:41:14 +00:00
message = f " - *team { expo . year } ' { tm } ' known foreigner on *team { survexblock . survexfile . path } ( { survexblock } ) in ' { line } ' "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
2022-10-10 13:40:21 +01:00
# DataIssue.objects.create(parser='survex', message=message, url=get_offending_filename(survexblock.survexfile.path))
2022-10-07 22:48:21 +01:00
else :
2022-10-07 21:48:41 +01:00
# we know the date and expo, but can't find the person
2022-11-23 10:41:14 +00:00
message = f " ! *team { expo . year } ' { tm } ' FAIL personexpedition lookup on *team { survexblock . survexfile . path } ( { survexblock } ) in ' { line } ' "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
DataIssue . objects . create (
parser = " survex " , message = message , url = get_offending_filename ( survexblock . survexfile . path )
)
2022-10-07 21:48:41 +01:00
else :
personexpedition = None
2023-01-19 21:18:42 +00:00
personrole , created = SurvexPersonRole . objects . update_or_create (
survexblock = survexblock , personexpedition = personexpedition , personname = tm
)
# don't know the date yet, so cannot query the table about validity.
2022-10-07 22:48:21 +01:00
# assume the person is valid. It will get picked up with the *date appears
personrole . save ( )
2022-10-07 21:48:41 +01:00
2023-01-19 21:18:42 +00:00
mteammember = self . rx_teammem . match ( line ) # matches the role at the beginning
2022-10-07 21:48:41 +01:00
if not mteammember :
2023-01-19 21:18:42 +00:00
moldstyle = self . rx_teamold . match ( line ) # matches the role at the the end of the string
2022-10-07 21:48:41 +01:00
if moldstyle :
for tm in self . rx_person . split ( moldstyle . group ( 1 ) ) :
if tm :
record_team_member ( tm , survexblock )
# seems to be working
# msg = "! OLD tm='{}' line: '{}' ({}) {}".format(tm, line, survexblock, survexblock.survexfile.path)
# print(msg, file=sys.stderr)
else :
2022-11-23 10:41:14 +00:00
message = f " ! *team { survexblock . survexfile . path } ( { survexblock } ) Weird ' { mteammember . group ( 1 ) } ' oldstyle line: ' { line } ' "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
DataIssue . objects . create (
parser = " survex " , message = message , url = get_offending_filename ( survexblock . survexfile . path )
)
2022-10-07 21:48:41 +01:00
else :
2023-01-19 21:18:42 +00:00
nullmember = self . rx_teamabs . match ( line ) # matches empty role line. Ignore these.
2022-10-07 21:48:41 +01:00
if not nullmember :
2022-11-23 10:41:14 +00:00
message = f " ! *team { survexblock . survexfile . path } ( { survexblock } ) Bad line: ' { line } ' "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
DataIssue . objects . create (
parser = " survex " , message = message , url = get_offending_filename ( survexblock . survexfile . path )
)
2022-10-07 21:48:41 +01:00
else :
2020-06-24 19:07:11 +01:00
for tm in self . rx_person . split ( mteammember . group ( 2 ) ) :
if tm :
2022-10-07 21:48:41 +01:00
record_team_member ( tm , survexblock )
else :
2023-01-19 21:34:09 +00:00
if mteammember . group ( 2 ) . lower ( ) not in ( " none " , " both " ) :
2022-11-23 10:41:14 +00:00
message = f " ! Weird *team ' { mteammember . group ( 2 ) } ' newstyle line: ' { line } ' ( { survexblock } ) { survexblock . survexfile . path } "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
DataIssue . objects . create (
parser = " survex " , message = message , url = get_offending_filename ( survexblock . survexfile . path )
)
2020-06-24 19:07:11 +01:00
2020-07-04 13:31:46 +01:00
def LoadSurvexEntrance ( self , survexblock , line ) :
# Not using this yet
pass
2023-01-19 21:18:42 +00:00
2020-07-04 13:31:46 +01:00
def LoadSurvexAlias ( self , survexblock , line ) :
# *alias station - ..
2023-01-19 21:18:42 +00:00
splayalias = re . match ( " (?i)station \ s* \ - \ s* \ . \ . \ s*$ " , line )
2020-07-04 13:31:46 +01:00
if splayalias :
self . flagsstar [ " splayalias " ] = True
else :
2022-11-23 10:41:14 +00:00
message = f " ! Bad *ALIAS: ' { line } ' ( { survexblock } ) { survexblock . survexfile . path } "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
DataIssue . objects . create ( parser = " survex " , message = message )
2020-07-04 13:31:46 +01:00
2020-07-07 01:35:58 +01:00
def LoadSurvexUnits ( self , survexblock , line ) :
# all for 4 survex files with measurements in feet. bugger.
2022-11-17 01:24:39 +00:00
# Won't need this once we move to using cavern or d3dump output for lengths
2023-01-19 21:18:42 +00:00
tapeunits = self . rx_tapelng . match ( line ) # tape|length
2020-07-07 01:35:58 +01:00
if not tapeunits :
return
2023-01-19 21:18:42 +00:00
convert = re . match ( " (?i)( \ w*) \ s*([ \ . \ d]+) \ s*( \ w*) " , line )
2020-07-07 02:46:18 +01:00
if convert :
2020-07-08 00:00:56 +01:00
factor = convert . groups ( ) [ 1 ]
self . unitsfactor = float ( factor )
if debugprint :
2023-01-19 21:18:42 +00:00
message = (
f " ! *UNITS NUMERICAL conversion [ { factor } x] ' { line } ' ( { survexblock } ) { survexblock . survexfile . path } "
)
print ( self . insp + message )
DataIssue . objects . create ( parser = " survexunits " , message = message )
feet = re . match ( " (?i).*feet$ " , line )
metres = re . match ( " (?i).*(METRIC|METRES|METERS)$ " , line )
2020-07-07 01:35:58 +01:00
if feet :
self . units = " feet "
elif metres :
self . units = " metres "
else :
2022-11-23 10:41:14 +00:00
message = f " ! *UNITS in YARDS!? - not converted ' { line } ' ( { survexblock } ) { survexblock . survexfile . path } "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
DataIssue . objects . create ( parser = " survexunits " , message = message )
2022-10-07 21:48:41 +01:00
def get_expo_from_year ( self , year ) :
2023-01-19 21:18:42 +00:00
# cacheing to save DB query on every block
2022-10-07 21:48:41 +01:00
if year in self . expos :
expo = self . expos [ year ]
else :
expeditions = Expedition . objects . filter ( year = year )
2023-01-19 21:18:42 +00:00
if len ( expeditions ) != 1 :
message = (
f " ! More than one expedition in year { year } ' { line } ' ( { survexblock } ) { survexblock . survexfile . path } "
)
print ( self . insp + message )
DataIssue . objects . create (
parser = " survex " , message = message , url = get_offending_filename ( survexblock . survexfile . path )
)
expo = expeditions [ 0 ]
self . expos [ year ] = expo
return expo
2020-06-24 17:55:42 +01:00
def LoadSurvexDate ( self , survexblock , line ) :
2022-09-16 20:54:22 +01:00
# we should make this a date RANGE for everything?
2023-01-19 21:18:42 +00:00
2022-10-07 21:48:41 +01:00
def setdate_on_survexblock ( year ) :
2022-10-07 22:48:21 +01:00
# We are assuming that deferred *team people are in the same block. Otherwise, ouch.
2022-10-07 21:48:41 +01:00
expo = self . get_expo_from_year ( year )
2020-07-04 13:31:46 +01:00
survexblock . expedition = expo
2022-10-07 21:48:41 +01:00
survexblock . expeditionday = expo . get_expedition_day ( survexblock . date )
2020-07-04 13:31:46 +01:00
survexblock . save ( )
2023-01-19 21:18:42 +00:00
2022-10-07 21:48:41 +01:00
team = SurvexPersonRole . objects . filter ( survexblock = survexblock )
2022-10-07 22:48:21 +01:00
for pr in team :
2023-01-19 21:18:42 +00:00
if not pr . expeditionday : # *date and *team in 'wrong' order. All working now.
pr . expeditionday = survexblock . expeditionday
2022-10-07 22:48:21 +01:00
pr . save ( )
2023-01-19 21:18:42 +00:00
if not pr . personexpedition : # again, we didn't know the date until now
2022-10-07 22:48:21 +01:00
pe = GetPersonExpeditionNameLookup ( expo ) . get ( pr . personname . lower ( ) )
2022-10-07 21:48:41 +01:00
if pe :
# message = "! {} ({}) Fixing undated personexpedition '{}'".format(survexblock.survexfile.path, survexblock, p.personname)
# print(self.insp+message)
# DataIssue.objects.create(parser='survex', message=message)
2022-10-07 22:48:21 +01:00
pr . personexpedition = pe
2022-10-07 23:52:10 +01:00
pr . person = pr . personexpedition . person
2022-10-07 22:48:21 +01:00
pr . save ( )
2023-01-19 21:18:42 +00:00
self . currentpersonexped . append ( pe ) # used in push/pop block code
elif known_foreigner ( pr . personname ) : # note, not using .lower()
2022-11-23 10:41:14 +00:00
message = f " - *team { expo . year } ' { pr . personname } ' known foreigner on *date { survexblock . survexfile . path } ( { survexblock } ) in ' { line } ' "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
2022-10-10 13:40:21 +01:00
# DataIssue.objects.create(parser='survex', message=message, url=get_offending_filename(survexblock.survexfile.path))
2022-10-07 21:48:41 +01:00
else :
2022-11-23 10:41:14 +00:00
message = f " ! *team { year } ' { pr . personname } ' FAIL personexpedition lookup on *date { survexblock . survexfile . path } ( { survexblock } ) ' { pr . personname } ' "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
DataIssue . objects . create (
parser = " survex " ,
message = message ,
url = get_offending_filename ( survexblock . survexfile . path ) ,
)
2022-09-16 20:54:22 +01:00
oline = line
2023-01-19 21:18:42 +00:00
if len ( line ) > 10 :
2022-09-16 20:54:22 +01:00
# message = "! DATE Warning LONG DATE '{}' ({}) {}".format(oline, survexblock, survexblock.survexfile.path)
# print(self.insp+message)
# DataIssue.objects.create(parser='survex', message=message, url=get_offending_filename(survexblock.survexfile.path))
2023-01-19 21:18:42 +00:00
if line [ 10 ] == " - " : # ie a range, just look at first date
2020-07-08 00:00:56 +01:00
line = line [ 0 : 10 ]
2023-01-19 21:18:42 +00:00
if len ( line ) == 10 :
2020-07-08 00:00:56 +01:00
year = line [ : 4 ]
2022-09-16 20:54:22 +01:00
# TO DO set to correct Austrian timezone Europe/Vienna ?
2020-07-08 00:00:56 +01:00
# %m and %d need leading zeros. Source svx files require them.
2023-01-19 21:18:42 +00:00
survexblock . date = datetime . strptime ( line . replace ( " . " , " - " ) , " % Y- % m- %d " )
2022-10-07 21:48:41 +01:00
setdate_on_survexblock ( year )
2023-01-19 21:18:42 +00:00
elif len ( line ) == 7 :
2020-07-08 00:00:56 +01:00
year = line [ : 4 ]
2023-01-19 21:18:42 +00:00
perps = get_people_on_trip ( survexblock ) # What, you don't know Judge Dredd slang ?
2022-09-18 21:53:04 +01:00
message = f " ! DATE Warning only accurate to the month, setting to 1st ' { oline } ' ( { survexblock } ) { survexblock . survexfile . path } { perps } "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
DataIssue . objects . create (
parser = " svxdate " , message = message , url = get_offending_filename ( survexblock . survexfile . path )
)
survexblock . date = datetime . strptime ( line . replace ( " . " , " - " ) , " % Y- % m " ) # sets to first of month
2022-10-07 21:48:41 +01:00
setdate_on_survexblock ( year )
2023-01-19 21:18:42 +00:00
elif len ( line ) == 4 :
2020-07-08 00:00:56 +01:00
year = line [ : 4 ]
2022-09-18 21:53:04 +01:00
perps = get_people_on_trip ( survexblock )
message = f " ! DATE WARNING only accurate to the YEAR, setting to 1st January ' { oline } ' ( { survexblock } ) { survexblock . survexfile . path } { perps } "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
DataIssue . objects . create (
parser = " svxdate " , message = message , url = get_offending_filename ( survexblock . survexfile . path )
)
survexblock . date = datetime . strptime ( line , " % Y " ) # sets to January 1st
2022-10-07 21:48:41 +01:00
setdate_on_survexblock ( year )
2020-07-08 00:00:56 +01:00
else :
2022-10-05 21:18:11 +01:00
# these errors are reporting the wrong survexblock, which is actually a SurvexFile (!)
2023-01-19 21:18:42 +00:00
message = (
f " ! DATE Error unrecognised ' { oline } - { survexblock } ' ( { type ( survexblock ) } ) { survexblock . survexfile . path } "
)
print ( self . insp + message )
DataIssue . objects . create (
parser = " survex " , message = message , url = get_offending_filename ( survexblock . survexfile . path )
)
print ( f " { type ( survexblock ) =} " ) # survexblock.parent fails as a SurvexFile has no .parent ...ugh.
2022-10-05 21:18:11 +01:00
print ( f " { survexblock . survexpath =} " )
print ( f " { survexblock . survexfile =} " )
2023-01-19 21:18:42 +00:00
# raise
2020-07-08 00:00:56 +01:00
2022-10-05 21:18:11 +01:00
def LoadSurvexLeg ( self , survexblock , sline , comment , svxline ) :
2020-06-24 14:10:13 +01:00
""" This reads compass, clino and tape data but only keeps the tape lengths,
the rest is discarded after error - checking .
2020-07-07 01:35:58 +01:00
Now skipping the error checking - returns as soon as the leg is not one we count .
2023-01-19 21:18:42 +00:00
REPLACE ALL THIS by reading the . log output of cavern for the file .
2022-11-17 01:24:39 +00:00
But we need the lengths per Block , not by File . dump3d will do lengths per block .
2020-06-24 14:10:13 +01:00
"""
2020-07-04 01:10:17 +01:00
invalid_clino = 180.0
invalid_compass = 720.0
invalid_tape = 0.0
2020-07-04 13:31:46 +01:00
if self . flagsstar [ " skiplegs " ] :
2020-07-06 21:46:19 +01:00
if debugprint :
print ( " skip in " , self . flagsstar , survexblock . survexfile . path )
2020-07-04 13:31:46 +01:00
return
2020-07-04 01:10:17 +01:00
2020-07-06 21:46:19 +01:00
if debugprint :
2022-11-23 10:41:14 +00:00
print ( f " ! LEG datastar type: { self . datastar [ ' type ' ] . upper ( ) } ++ { survexblock . survexfile . path } \n { sline } " )
2020-07-03 17:22:15 +01:00
if self . datastar [ " type " ] == " passage " :
2020-07-03 14:53:36 +01:00
return
2020-07-03 17:22:15 +01:00
if self . datastar [ " type " ] == " cartesian " :
2020-07-03 14:53:36 +01:00
return
2020-07-03 17:22:15 +01:00
if self . datastar [ " type " ] == " nosurvey " :
2020-07-03 14:53:36 +01:00
return
2020-07-03 17:22:15 +01:00
if self . datastar [ " type " ] == " diving " :
2020-07-03 14:53:36 +01:00
return
2020-07-03 17:22:15 +01:00
if self . datastar [ " type " ] == " cylpolar " :
2020-07-03 14:53:36 +01:00
return
2020-07-07 01:35:58 +01:00
if debugprint :
2023-01-19 21:18:42 +00:00
print (
f " !! LEG data lineno: { self . lineno } \n !! sline: ' { sline } ' \n !! datastar[ ' tape ' ]: { self . datastar [ ' tape ' ] } "
)
if self . datastar [ " type " ] != " normal " :
2020-07-03 14:53:36 +01:00
return
2023-01-19 21:18:42 +00:00
2022-10-05 21:18:11 +01:00
ls = sline . lower ( ) . split ( )
# NORMAL, so there should be 5 fields
# from the content, this is clearly reading fixedpts/gps/gps00raw.svx, but not reporting it by that name
if len ( ls ) < 5 :
print ( " ! Fewer than 5 fields in NORMAL in " , survexblock . survexfile . path , survexfile , survexfile . parent )
print ( " datastar NORMAL: " , self . datastar )
print ( f " Line (split): { ls } , comment: { comment } " )
print ( f " Line: { sline } \n svxline: { svxline } " )
2023-01-19 21:18:42 +00:00
message = f " ! Not 5 fields in line ' { sline . lower ( ) } ' { self . datastar =} { ls =} in \n { survexblock } \n { survexblock . survexfile } \n { survexblock . survexfile . path } "
DataIssue . objects . create (
parser = " survexleg " , message = message , url = get_offending_filename ( survexblock . survexfile . path )
)
2022-10-05 21:18:11 +01:00
2023-01-19 21:18:42 +00:00
datastar = self . datastar # shallow copy: alias but the things inside are the same things
2020-06-24 14:10:13 +01:00
survexleg = SurvexLeg ( )
2023-01-19 21:18:42 +00:00
2020-07-04 13:31:46 +01:00
# skip all splay legs
2022-07-23 17:26:47 +01:00
try :
if ls [ datastar [ " from " ] ] == " .. " or ls [ datastar [ " from " ] ] == " . " :
2020-07-06 21:46:19 +01:00
if debugprint :
2022-07-23 17:26:47 +01:00
print ( " Splay in " , survexblock . survexfile . path )
2020-07-04 13:31:46 +01:00
return
2022-07-23 17:26:47 +01:00
if ls [ datastar [ " to " ] ] == " .. " or ls [ datastar [ " to " ] ] == " . " :
2020-07-06 21:46:19 +01:00
if debugprint :
2022-07-23 17:26:47 +01:00
print ( " Splay in " , survexblock . survexfile . path )
2020-07-04 13:31:46 +01:00
return
2022-07-23 17:26:47 +01:00
if self . flagsstar [ " splayalias " ] :
if ls [ datastar [ " from " ] ] == " - " :
if debugprint :
print ( " Aliased splay in " , survexblock . survexfile . path )
return
if ls [ datastar [ " to " ] ] == " - " :
if debugprint :
print ( " Aliased splay in " , survexblock . survexfile . path )
return
except :
2023-01-19 21:18:42 +00:00
message = f " ! datastar parsing from/to incorrect in line { ls } in { survexblock . survexfile . path } "
print ( self . insp + message )
DataIssue . objects . create (
parser = " survexleg " , message = message , url = get_offending_filename ( survexblock . survexfile . path )
)
2022-07-23 17:26:47 +01:00
return
2020-07-04 13:31:46 +01:00
2020-07-03 14:53:36 +01:00
try :
2020-07-03 17:22:15 +01:00
tape = ls [ datastar [ " tape " ] ]
2020-07-03 14:53:36 +01:00
except :
2023-01-19 21:18:42 +00:00
message = f " ! datastar parsing incorrect in line { ls } in { survexblock . survexfile . path } "
print ( self . insp + message )
DataIssue . objects . create (
parser = " survexleg " , message = message , url = get_offending_filename ( survexblock . survexfile . path )
)
2020-07-03 14:53:36 +01:00
survexleg . tape = invalid_tape
return
2020-07-04 13:31:46 +01:00
# e.g. '29/09' or '(06.05)' in the tape measurement
2020-07-04 01:10:17 +01:00
# tape = tape.replace("(","") # edited original file (only one) instead
# tape = tape.replace(")","") # edited original file (only one) instead
# tape = tape.replace("/",".") # edited original file (only one) instead.
2020-07-03 14:53:36 +01:00
try :
2020-07-08 00:00:56 +01:00
if self . unitsfactor :
tape = float ( tape ) * self . unitsfactor
if debugprint :
2022-11-23 10:41:14 +00:00
message = f " ! Units: Length scaled { tape } m ' { ls } ' in ( { survexblock . survexfile . path } ) units: { self . units } factor: { self . unitsfactor } x "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
DataIssue . objects . create (
parser = " survexleg " , message = message , url = get_offending_filename ( survexblock . survexfile . path )
)
if self . units == " feet " :
2020-07-08 00:00:56 +01:00
tape = float ( tape ) / METRESINFEET
if debugprint :
2022-11-23 10:41:14 +00:00
message = f " ! Units: converted to { tape : .3f } m from { self . units } ' { ls } ' in ( { survexblock . survexfile . path } ) "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
DataIssue . objects . create (
parser = " survexleg " , message = message , url = get_offending_filename ( survexblock . survexfile . path )
)
2020-07-08 00:00:56 +01:00
survexleg . tape = float ( tape )
2020-07-04 01:10:17 +01:00
self . legsnumber + = 1
2020-07-03 14:53:36 +01:00
except ValueError :
2022-11-23 10:41:14 +00:00
message = f " ! Value Error: Tape misread in line ' { ls } ' in { survexblock . survexfile . path } units: { self . units } "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
DataIssue . objects . create (
parser = " survexleg " , message = message , url = get_offending_filename ( survexblock . survexfile . path )
)
2020-07-03 14:53:36 +01:00
survexleg . tape = invalid_tape
try :
2020-07-04 13:31:46 +01:00
survexblock . legslength + = survexleg . tape
2023-01-19 21:18:42 +00:00
self . slength + = survexleg . tape
2020-07-03 14:53:36 +01:00
except ValueError :
2023-01-19 21:18:42 +00:00
message = (
f " ! Value Error: Tape length not added ' { ls } ' in { survexblock . survexfile . path } units: { self . units } "
)
print ( self . insp + message )
DataIssue . objects . create (
parser = " survexleg " , message = message , url = get_offending_filename ( survexblock . survexfile . path )
)
2020-07-03 14:53:36 +01:00
try :
2020-07-03 17:22:15 +01:00
lcompass = ls [ datastar [ " compass " ] ]
2020-07-03 14:53:36 +01:00
except :
2023-01-19 21:18:42 +00:00
message = f " ! Value Error: Compass not found in line { ls } in { survexblock . survexfile . path } "
print ( self . insp + message )
DataIssue . objects . create (
parser = " survexleg " , message = message , url = get_offending_filename ( survexblock . survexfile . path )
)
2020-07-03 14:53:36 +01:00
lcompass = invalid_compass
try :
2020-07-03 17:22:15 +01:00
lclino = ls [ datastar [ " clino " ] ]
2020-07-03 14:53:36 +01:00
except :
print ( ( " ! Clino misread in " , survexblock . survexfile . path ) )
2020-07-03 17:22:15 +01:00
print ( ( " datastar: " , datastar ) )
2020-07-03 14:53:36 +01:00
print ( ( " Line: " , ls ) )
2023-01-19 21:18:42 +00:00
message = f " ! Value Error: Clino misread in line ' { sline . lower ( ) } ' { datastar =} { self . datastar =} { ls =} in \n { survexblock } \n { survexblock . survexfile } \n { survexblock . survexfile . path } "
DataIssue . objects . create (
parser = " survexleg " , message = message , url = get_offending_filename ( survexblock . survexfile . path )
)
2020-07-03 14:53:36 +01:00
lclino = invalid_clino
if lclino == " up " :
survexleg . clino = 90.0
lcompass = invalid_compass
elif lclino == " down " :
survexleg . clino = - 90.0
lcompass = invalid_compass
elif lclino == " - " or lclino == " level " :
survexleg . clino = - 90.0
try :
survexleg . compass = float ( lcompass )
except ValueError :
print ( ( " ! Compass misread in " , survexblock . survexfile . path ) )
2020-07-03 17:22:15 +01:00
print ( ( " datastar: " , datastar ) )
2020-07-03 14:53:36 +01:00
print ( ( " Line: " , ls ) )
2023-01-19 21:18:42 +00:00
message = " ! Value Error: lcompass: ' {} ' line {} in ' {} ' " . format ( lcompass , ls , survexblock . survexfile . path )
DataIssue . objects . create (
parser = " survexleg " , message = message , url = get_offending_filename ( survexblock . survexfile . path )
)
2020-07-03 14:53:36 +01:00
survexleg . compass = invalid_compass
2020-07-04 01:10:17 +01:00
# delete the object to save memory
2020-07-03 14:53:36 +01:00
survexleg = None
2023-01-19 21:18:42 +00:00
2020-06-24 22:46:18 +01:00
def LoadSurvexRef ( self , survexblock , args ) :
2023-01-19 21:18:42 +00:00
""" Interpret the *ref record, and all the many variants """
# print(self.insp+ "*REF ---- '"+ args +"'")
url = get_offending_filename ( survexblock . survexfile . path )
2020-06-24 22:46:18 +01:00
# *REF but also ; Ref years from 1960 to 2039
2020-07-04 01:10:17 +01:00
refline = self . rx_ref_text . match ( args )
if refline :
# a textual reference such as "1996-1999 Not-KH survey book pp 92-95"
2023-01-19 21:18:42 +00:00
print ( f " { self . insp } *REF quoted text so ignored: { args } in { survexblock . survexfile . path } " )
2020-07-04 01:10:17 +01:00
return
2023-01-19 21:18:42 +00:00
if len ( args ) < 4 :
2022-11-23 10:41:14 +00:00
message = f " ! Empty or BAD *REF statement ' { args } ' in ' { survexblock . survexfile . path } ' "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
DataIssue . objects . create ( parser = " survex " , message = message , url = url )
2020-06-24 22:46:18 +01:00
return
2020-06-24 14:10:13 +01:00
2020-06-24 22:46:18 +01:00
argsgps = self . rx_argsref . match ( args )
if argsgps :
yr , letterx , wallet = argsgps . groups ( )
else :
2022-09-18 21:53:04 +01:00
perps = get_people_on_trip ( survexblock )
2022-10-06 19:02:15 +01:00
message = f " ! Wallet *REF bad in ' { survexblock . survexfile . path } ' malformed id ' { args } ' { perps } "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
DataIssue . objects . create ( parser = " survex " , message = message , url = url )
2020-06-24 22:46:18 +01:00
return
2020-06-24 14:10:13 +01:00
2020-06-24 14:49:39 +01:00
if not letterx :
letterx = " "
else :
letterx = " X "
2023-01-19 21:18:42 +00:00
if len ( wallet ) < 2 :
2020-06-24 14:49:39 +01:00
wallet = " 0 " + wallet
2023-01-19 21:18:42 +00:00
if not ( int ( yr ) > 1960 and int ( yr ) < 2050 ) :
message = " ! Wallet year out of bounds {yr} ' {refscan} ' {survexblock.survexfile.path} "
print ( self . insp + message )
DataIssue . objects . create ( parser = " survex " , message = message , url = url )
2022-11-23 10:41:14 +00:00
refscan = f " { yr } # { letterx } { wallet } "
2020-06-24 22:46:18 +01:00
try :
2023-01-19 21:18:42 +00:00
if int ( wallet ) > 99 :
2022-11-23 10:41:14 +00:00
message = f " ! Wallet *REF { refscan } - very big (more than 99) so probably wrong in ' { survexblock . survexfile . path } ' "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
DataIssue . objects . create ( parser = " survex " , message = message , url = url )
2020-06-24 22:46:18 +01:00
except :
2022-11-23 10:41:14 +00:00
message = f " ! Wallet *REF { refscan } - not numeric in ' { survexblock . survexfile . path } ' "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
DataIssue . objects . create ( parser = " survex " , message = message , url = url )
manywallets = Wallet . objects . filter (
walletname = refscan
) # assumes all wallets found in earlier pass of data import
2021-04-26 19:22:29 +01:00
if manywallets :
if len ( manywallets ) > 1 :
2022-11-23 10:41:14 +00:00
message = f " ! Wallet *REF { refscan } - more than one found { len ( manywallets ) } wallets in db with same id { survexblock . survexfile . path } "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
DataIssue . objects . create ( parser = " survex " , message = message , url = url )
2022-10-06 19:02:15 +01:00
if survexblock . scanswallet :
if survexblock . scanswallet . walletname != refscan :
message = f " ! Wallet *REF { refscan } in { survexblock . survexfile . path } - Already a DIFFERENT wallet is set for this block ' { survexblock . scanswallet . walletname } ' "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
DataIssue . objects . create ( parser = " survex " , message = message , url = url )
2022-10-06 19:02:15 +01:00
else :
2023-01-19 21:18:42 +00:00
survexblock . scanswallet = manywallets [ 0 ] # this is a ForeignKey field
2022-10-06 19:02:15 +01:00
survexblock . save ( )
2022-12-20 00:07:55 +00:00
# This is where we should check that the wallet JSON contains a link to the survexfile
# and that the JSON date and walletdate are set correctly to the survexblock date.
2020-06-24 14:49:39 +01:00
else :
2022-09-18 21:53:04 +01:00
perps = get_people_on_trip ( survexblock )
2022-10-06 19:02:15 +01:00
message = f " ! Wallet *REF bad in ' { survexblock . survexfile . path } ' ' { refscan } ' NOT in database i.e. wallet does not exist { perps } . "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
DataIssue . objects . create ( parser = " survex " , message = message , url = url )
2020-06-24 14:49:39 +01:00
2022-07-20 18:47:29 +01:00
def TickSurvexQM ( self , survexblock , qmtick ) :
2023-01-19 21:18:42 +00:00
""" Interpret the specially formatted comment which is a QM TICKED statement """
2022-07-20 18:47:29 +01:00
# Now we need to find the correct QM object. It will be in the same block and have the same number.
2023-01-19 21:18:42 +00:00
2022-07-20 18:47:29 +01:00
try :
qm = QM . objects . filter ( block = survexblock , number = int ( qmtick . group ( 1 ) ) )
except :
2023-01-19 21:18:42 +00:00
# raise
2022-07-20 18:47:29 +01:00
message = f ' ! QM TICK find FAIL QM { qmtick . group ( 1 ) } date: " { qmtick . group ( 2 ) } " qmlist: " { qm } " in " { survexblock . survexfile . path } " + comment: " { qmtick . group ( 3 ) } " '
print ( message )
2023-01-19 21:18:42 +00:00
DataIssue . objects . create (
parser = " survex " , message = message , url = get_offending_filename ( survexblock . survexfile . path )
)
if len ( qm ) > 1 :
2022-07-20 18:47:29 +01:00
message = f ' ! QM TICK MULTIPLE found FAIL QM { qmtick . group ( 1 ) } date: " { qmtick . group ( 2 ) } " in " { survexblock . survexfile . path } " + comment: " { qmtick . group ( 3 ) } " '
print ( message )
2023-01-19 21:18:42 +00:00
DataIssue . objects . create (
parser = " survex " , message = message , url = get_offending_filename ( survexblock . survexfile . path )
)
2022-07-20 18:47:29 +01:00
qm [ 0 ] . ticked = True
qm [ 0 ] . save ( )
2020-06-24 22:46:18 +01:00
def LoadSurvexQM ( self , survexblock , qmline ) :
2023-01-19 21:18:42 +00:00
""" Interpret the specially formatted comment which is a QM definition """
2020-06-24 22:46:18 +01:00
insp = self . insp
2022-07-08 18:08:42 +01:00
2023-01-19 21:18:42 +00:00
qm_no = qmline . group ( 1 ) # this may not be unique across multiple survex files
2020-06-24 14:49:39 +01:00
qm_grade = qmline . group ( 2 )
2020-06-25 03:17:56 +01:00
if qmline . group ( 3 ) : # usual closest survey station
2023-01-19 21:18:42 +00:00
qm_nearest = qmline . group ( 3 )
2020-06-25 03:17:56 +01:00
if qmline . group ( 4 ) :
2023-01-19 21:18:42 +00:00
qm_nearest = qm_nearest + " . " + qmline . group ( 4 )
if qmline . group ( 6 ) and qmline . group ( 6 ) != " - " :
2020-06-25 03:17:56 +01:00
qm_resolve_station = qmline . group ( 6 )
if qmline . group ( 7 ) :
2023-01-19 21:18:42 +00:00
qm_resolve_station = qm_resolve_station + " . " + qmline . group ( 7 )
2020-06-25 03:17:56 +01:00
else :
qm_resolve_station = " "
2020-06-24 14:49:39 +01:00
qm_notes = qmline . group ( 8 )
2020-06-25 03:17:56 +01:00
# Spec of QM in SVX files:
2020-06-24 14:49:39 +01:00
# ;Serial number grade(A/B/C/D/X) nearest-station resolution-station description
# ;QM1 a hobnob_hallway_2.42 hobnob-hallway_3.42 junction of keyhole passage
# ;QM1 a hobnob_hallway_2.42 - junction of keyhole passage
2020-06-25 03:17:56 +01:00
# NB none of the SurveyStations are in the DB now, so if we want to link to aSurvexStation
# we would have to create one. But that is not obligatory and no QMs loaded from CSVs have one
2023-01-19 21:18:42 +00:00
# Older troggle/CSV assumes a logbook entry 'found_by' for each QM, with a date.
2022-07-06 15:35:08 +01:00
# We don't need this anymore so we don't need to create a placeholder logbook entry.
2023-01-19 21:34:09 +00:00
str ( survexblock . date ) [ : 4 ]
2022-07-20 15:02:38 +01:00
blockname = survexblock . name [ : 6 ] + survexblock . name [ - 1 : ]
2023-01-19 21:18:42 +00:00
# logslug = f'D{int(qmyear)}_{blockname}_{int(qm_no):03d}'
2022-07-05 18:24:51 +01:00
if survexblock . survexfile . cave :
2023-01-19 21:34:09 +00:00
survexblock . survexfile . cave . slug ( )
2022-07-05 18:24:51 +01:00
else :
2023-01-19 21:34:09 +00:00
pass
2022-07-20 15:02:38 +01:00
2020-06-25 03:17:56 +01:00
try :
2023-01-19 21:18:42 +00:00
qm = QM . objects . create (
number = qm_no ,
# nearest_station=a_survex_station_object, # can be null
nearest_station_description = qm_resolve_station ,
nearest_station_name = qm_nearest ,
grade = qm_grade . upper ( ) ,
location_description = qm_notes ,
block = survexblock , # only set for survex-imported QMs
blockname = blockname , # only set for survex-imported QMs
expoyear = str ( survexblock . date . year ) ,
cave = survexblock . survexfile . cave ,
)
2020-06-25 03:17:56 +01:00
qm . save
except :
2022-11-23 10:41:14 +00:00
message = f " ! QM { qm_no } FAIL to create { qm_nearest } in ' { survexblock . survexfile . path } ' "
2023-01-19 21:18:42 +00:00
print ( insp + message )
DataIssue . objects . create (
parser = " survex " , message = message , url = get_offending_filename ( survexblock . survexfile . path )
)
2020-06-25 03:17:56 +01:00
2023-01-19 21:18:42 +00:00
def LoadSurvexDataNormal ( self , survexblock , args ) :
2020-07-02 16:25:51 +01:00
""" Sets the order for data elements in this and following blocks, e.g.
* data normal from to compass clino tape
* data normal from to tape compass clino
We are only collecting length data so we are disinterested in from , to , LRUD etc .
"""
2020-07-03 17:22:15 +01:00
# datastardefault = { # included here as reference to help understand the code
2023-01-19 21:18:42 +00:00
# "type":"normal",
# "t":"leg",
# "from":0,
# "to":1,
# "tape":2,
# "compass":3,
# "clino":4}
2020-07-03 17:22:15 +01:00
datastar = copy . deepcopy ( self . datastardefault )
2020-07-02 16:25:51 +01:00
if args == " " :
# naked '*data' which is relevant only for passages. Ignore. Continue with previous settings.
return
2023-01-19 21:18:42 +00:00
# DEFAULT | NORMAL | CARTESIAN| NOSURVEY |PASSAGE | TOPOFIL | CYLPOLAR | DIVING
ls = args . lower ( ) . split ( )
2020-07-03 14:53:36 +01:00
if ls [ 0 ] == " default " :
2020-07-03 17:22:15 +01:00
self . datastar = copy . deepcopy ( self . datastardefault )
2020-07-03 14:53:36 +01:00
elif ls [ 0 ] == " normal " or ls [ 0 ] == " topofil " :
2020-07-03 17:22:15 +01:00
if not ( " from " in datastar and " to " in datastar ) :
2023-01-19 21:18:42 +00:00
message = (
f " ! - Unrecognised *data normal statement ' { args } ' { survexblock . name } | { survexblock . survexpath } "
)
2020-07-02 16:25:51 +01:00
print ( message )
2023-01-19 21:18:42 +00:00
print ( message , file = sys . stderr )
DataIssue . objects . create (
parser = " survex " , message = message , url = get_offending_filename ( survexblock . survexfile . path )
)
2020-07-02 16:25:51 +01:00
return
else :
2020-07-03 17:22:15 +01:00
datastar = self . datastardefault
2020-07-02 16:25:51 +01:00
# ls = ["normal", "from", "to", "tape", "compass", "clino" ]
2023-01-19 21:18:42 +00:00
for i in range ( 1 , len ( ls ) ) : # len[0] is "normal"
if ls [ i ] . lower ( ) == " newline " :
2022-07-23 18:05:58 +01:00
message = f " ! - ABORT *data statement has NEWLINE in it in { survexblock . survexfile . path } . Not parsed by troggle. ' { args } ' "
2022-07-23 17:26:47 +01:00
print ( message )
2023-01-19 21:18:42 +00:00
print ( message , file = sys . stderr )
DataIssue . objects . create (
parser = " survex " , message = message , url = get_offending_filename ( survexblock . survexfile . path )
)
2022-07-23 17:26:47 +01:00
return False
2023-01-19 21:18:42 +00:00
if ls [ i ] in [ " bearing " , " compass " ] :
datastar [ " compass " ] = i - 1
if ls [ i ] in [ " clino " , " gradient " ] :
datastar [ " clino " ] = i - 1
if ls [ i ] in [ " tape " , " length " ] :
datastar [ " tape " ] = i - 1
2020-07-03 17:22:15 +01:00
self . datastar = copy . deepcopy ( datastar )
2020-07-02 16:25:51 +01:00
return
2022-10-05 19:11:18 +01:00
elif ls [ 0 ] == " passage " or ls [ 0 ] == " nosurvey " or ls [ 0 ] == " diving " or ls [ 0 ] == " cylpolar " :
2023-01-19 21:18:42 +00:00
# message = " ! - *data {} blocks ignored. {}|{}" '{}' .format(ls[0].upper(), survexblock.name, survexblock.survexpath, args)
2020-07-04 01:10:17 +01:00
# print(message)
2023-01-19 21:18:42 +00:00
# print(message,file=sys.stderr)
# DataIssue.objects.create(parser='survex', message=message)
2022-10-05 19:11:18 +01:00
self . datastar [ " type " ] = ls [ 0 ]
2023-01-19 21:18:42 +00:00
elif ls [ 0 ] == " cartesian " : # We should not ignore this ?! Default for Germans ?
# message = " ! - *data {} blocks ignored. {}|{}" '{}' .format(ls[0].upper(), survexblock.name, survexblock.survexpath, args)
2022-10-05 19:11:18 +01:00
# print(message)
2023-01-19 21:18:42 +00:00
# print(message,file=sys.stderr)
# DataIssue.objects.create(parser='survex', message=message)
2020-07-03 17:22:15 +01:00
self . datastar [ " type " ] = ls [ 0 ]
2020-06-27 17:55:59 +01:00
else :
2022-11-23 10:41:14 +00:00
message = f " ! - Unrecognised *data statement ' { args } ' { survexblock . name } | { survexblock . survexpath } "
2020-07-02 16:25:51 +01:00
print ( message )
2023-01-19 21:18:42 +00:00
print ( message , file = sys . stderr )
DataIssue . objects . create (
parser = " survex " , message = message , url = get_offending_filename ( survexblock . survexfile . path )
)
2022-10-05 19:11:18 +01:00
self . datastar [ " type " ] = ls [ 0 ]
2020-06-27 17:55:59 +01:00
2020-07-03 14:53:36 +01:00
def LoadSurvexFlags ( self , args ) :
# Valid flags are DUPLICATE, SPLAY, and SURFACE, and a flag may be preceded with NOT to turn it off.
# Default values are NOT any of them
2020-07-03 17:22:15 +01:00
self . flagsstar = copy . deepcopy ( self . flagsdefault )
2020-07-03 14:53:36 +01:00
flags = [ ]
2023-01-19 21:18:42 +00:00
args = self . rx_flagsnot . sub ( " not " , args )
2020-07-03 14:53:36 +01:00
argslist = args . split ( )
for s in argslist :
flags . append ( s )
2020-07-06 21:46:19 +01:00
if debugprint :
2023-01-19 21:18:42 +00:00
print (
f " ^ flagslist: { flags } " ,
)
2020-07-03 14:53:36 +01:00
if " duplicate " in flags :
2020-07-03 17:22:15 +01:00
self . flagsstar [ " duplicate " ] = True
2020-07-03 14:53:36 +01:00
if " surface " in flags :
2020-07-03 17:22:15 +01:00
self . flagsstar [ " surface " ] = True
2020-07-03 14:53:36 +01:00
if " splay " in flags :
2020-07-03 17:22:15 +01:00
self . flagsstar [ " splay " ] = True
2020-07-03 14:53:36 +01:00
if " notduplicate " in flags :
2020-07-03 17:22:15 +01:00
self . flagsstar [ " duplicate " ] = False
2020-07-03 14:53:36 +01:00
if " notsurface " in flags :
2020-07-03 17:22:15 +01:00
self . flagsstar [ " surface " ] = False
2020-07-03 14:53:36 +01:00
if " notsplay " in flags :
2020-07-03 17:22:15 +01:00
self . flagsstar [ " splay " ] = False
2020-07-03 14:53:36 +01:00
2020-07-03 17:22:15 +01:00
# if self.flagsstar["duplicate"] == True or self.flagsstar["surface"] == True or self.flagsstar["splay"] == True:
2020-07-03 14:53:36 +01:00
# actually we do want to count duplicates as this is for "effort expended in surveying underground"
2023-01-19 21:34:09 +00:00
if self . flagsstar [ " surface " ] is True or self . flagsstar [ " splay " ] is True :
2020-07-04 01:10:17 +01:00
self . flagsstar [ " skiplegs " ] = True
2020-07-06 21:46:19 +01:00
if debugprint :
2023-01-19 21:18:42 +00:00
print (
f " $ flagslist: { flags } " ,
)
2020-06-27 17:55:59 +01:00
def IdentifyCave ( self , cavepath ) :
2020-06-29 21:16:13 +01:00
if cavepath . lower ( ) in self . caveslist :
return self . caveslist [ cavepath . lower ( ) ]
2022-07-28 13:15:11 +01:00
# TO DO - this predates the big revision to Gcavelookup so look at this again carefully
2020-06-28 01:50:34 +01:00
path_match = self . rx_cave . search ( cavepath )
2020-06-27 17:55:59 +01:00
if path_match :
2023-01-19 21:18:42 +00:00
sluggy = f " { path_match . group ( 1 ) } - { path_match . group ( 2 ) } "
2020-06-29 21:16:13 +01:00
guesses = [ sluggy . lower ( ) , path_match . group ( 2 ) . lower ( ) ]
for g in guesses :
if g in self . caveslist :
self . caveslist [ cavepath ] = self . caveslist [ g ]
return self . caveslist [ g ]
2023-01-19 21:18:42 +00:00
print ( f " ! Failed to find cave for { cavepath . lower ( ) } " )
2020-06-27 17:55:59 +01:00
else :
2020-07-01 22:49:38 +01:00
# not a cave, but that is fine.
2023-01-02 22:26:33 +00:00
# print(f' ! No regex(standard identifier) cave match for {cavepath.lower()}')
2020-06-27 17:55:59 +01:00
return None
2020-06-29 21:16:13 +01:00
def GetSurvexDirectory ( self , headpath ) :
2020-07-01 17:41:09 +01:00
""" This creates a SurvexDirectory if it has not been seen before, and on creation
it sets the primarysurvexfile . This is correct as it should be set on the first file
in the directory , where first is defined by the * include ordering . Which is what we
are doing .
"""
2020-06-29 21:16:13 +01:00
if not headpath :
return self . svxdirs [ " " ]
if headpath . lower ( ) not in self . svxdirs :
2021-04-13 01:13:08 +01:00
self . svxdirs [ headpath . lower ( ) ] = SurvexDirectory ( path = headpath , primarysurvexfile = self . currentsurvexfile )
2020-07-01 17:41:09 +01:00
self . svxdirs [ headpath . lower ( ) ] . save ( )
2023-01-19 21:18:42 +00:00
self . survexdict [ self . svxdirs [ headpath . lower ( ) ] ] = [ ] # list of the files in the directory
2020-06-29 21:16:13 +01:00
return self . svxdirs [ headpath . lower ( ) ]
2022-07-23 17:26:47 +01:00
def ReportNonCaveIncludes ( self , headpath , includelabel , depth ) :
2023-01-19 21:18:42 +00:00
""" Ignore surface, kataser and gpx *include survex files """
2022-07-28 16:36:57 +01:00
if not self . pending :
self . pending = set ( )
fpending = Path ( settings . CAVEDESCRIPTIONS , " pendingcaves.txt " )
if fpending . is_file ( ) :
with open ( fpending , " r " ) as fo :
cids = fo . readlines ( )
for cid in cids :
2023-01-19 21:18:42 +00:00
id = cid . strip ( ) . rstrip ( " \n " ) . upper ( )
2022-09-25 22:18:41 +01:00
if cid . startswith ( " 162 " ) :
self . pending . add ( id )
else :
self . pending . add ( " 1623- " + id )
2022-07-28 16:36:57 +01:00
2020-06-30 15:39:24 +01:00
if headpath in self . ignorenoncave :
2022-07-28 13:15:11 +01:00
message = f " - { headpath } is <ignorenoncave> (while creating ' { includelabel } ' sfile & sdirectory) "
2023-01-19 21:18:42 +00:00
# print("\n"+message)
# print("\n"+message,file=sys.stderr)
2020-06-30 15:39:24 +01:00
return
for i in self . ignoreprefix :
if headpath . startswith ( i ) :
2023-01-19 21:18:42 +00:00
message = (
f " - { headpath } starts with <ignoreprefix> (while creating ' { includelabel } ' sfile & sdirectory) "
)
2022-07-28 13:15:11 +01:00
# print("\n"+message)
# print("\n"+message,file=sys.stderr)
2020-06-30 15:39:24 +01:00
return
2023-01-19 21:18:42 +00:00
caveid = f " { headpath [ 6 : 10 ] } - { headpath [ 11 : ] } " . upper ( )
2022-07-28 16:36:57 +01:00
if caveid in self . pending :
2023-01-19 21:18:42 +00:00
# Yes we didn't find this cave, but we know it is a pending one. So not an error.
# print(f'! ALREADY PENDING {caveid}',file=sys.stderr)
return
2022-10-05 21:18:11 +01:00
id = caveid [ 5 : ]
if id in self . pending :
2023-01-19 21:18:42 +00:00
print ( f " ! ALREADY PENDING { id } " , file = sys . stderr )
return
2022-10-06 10:51:43 +01:00
message = f " ! Warning: cave identifier ' { caveid } ' or { id } (guessed from file path) is not a known cave. Need to add to expoweb/cave_data/pending.txt ? In ' { includelabel } .svx ' at depth:[ { len ( depth ) } ]. "
2023-01-19 21:18:42 +00:00
print ( " \n " + message )
print ( " \n " + message , file = sys . stderr )
print ( f " { self . pending } " , end = " " , file = sys . stderr )
DataIssue . objects . create ( parser = " survex " , message = message , url = get_offending_filename ( includelabel ) )
2022-10-05 21:18:11 +01:00
# print(f' # datastack in LoadSurvexFile:{includelabel}', file=sys.stderr)
# for dict in self.datastack:
2023-01-19 21:18:42 +00:00
# print(f' type: <{dict["type"].upper()} >', file=sys.stderr)
2020-07-04 01:10:17 +01:00
2020-07-01 22:49:38 +01:00
def LoadSurvexFile ( self , svxid ) :
2020-06-28 14:42:26 +01:00
""" Creates SurvexFile in the database, and SurvexDirectory if needed
2020-06-27 17:55:59 +01:00
with links to ' cave '
2020-07-01 22:49:38 +01:00
Creates a new current survexfile and valid . survexdirectory
2022-07-28 16:36:57 +01:00
Inspects the parent folder of the survexfile and uses that to decide if this is a cave we know
2020-06-28 14:42:26 +01:00
The survexblock passed - in is not necessarily the parent . FIX THIS .
2020-06-27 17:55:59 +01:00
"""
2020-07-06 21:46:19 +01:00
if debugprint :
2022-11-23 10:41:14 +00:00
print ( f " # datastack in LoadSurvexFile: { svxid } ' type ' : " , end = " " )
2020-07-06 21:46:19 +01:00
for dict in self . datastack :
2022-11-23 10:41:14 +00:00
print ( f " ' { dict [ ' type ' ] . upper ( ) } ' " , end = " " )
2020-07-06 21:46:19 +01:00
print ( " " )
2020-07-03 17:22:15 +01:00
2020-06-28 01:50:34 +01:00
depth = " " * self . depthbegin
2020-07-05 17:22:26 +01:00
# print("{:2}{} - NEW survexfile:'{}'".format(self.depthbegin, depth, svxid))
2020-07-01 22:49:38 +01:00
headpath = os . path . dirname ( svxid )
2020-06-27 17:55:59 +01:00
2021-04-13 01:13:08 +01:00
newfile = SurvexFile ( path = svxid )
2023-01-19 21:18:42 +00:00
newfile . save ( ) # until we do this there is no internal id so no foreign key works
self . currentsurvexfile = newfile
2020-06-30 15:39:24 +01:00
newdirectory = self . GetSurvexDirectory ( headpath )
2023-01-19 21:18:42 +00:00
newdirectory . save ( )
2020-07-01 17:41:09 +01:00
newfile . survexdirectory = newdirectory
2020-07-03 17:22:15 +01:00
self . survexdict [ newdirectory ] . append ( newfile )
2023-01-19 21:18:42 +00:00
cave = self . IdentifyCave ( headpath ) # cave already exists in db
2020-07-01 17:41:09 +01:00
2020-06-30 15:39:24 +01:00
if not newdirectory :
2022-11-23 10:41:14 +00:00
message = f " ! ' None ' SurvexDirectory returned from GetSurvexDirectory( { headpath } ) "
2020-06-30 15:39:24 +01:00
print ( message )
2023-01-19 21:18:42 +00:00
print ( message , file = sys . stderr )
DataIssue . objects . create ( parser = " survex " , message = message , url = f " /survexfile/ { svxid } " )
2020-06-29 21:16:13 +01:00
2020-06-28 01:50:34 +01:00
if cave :
2020-06-30 15:39:24 +01:00
newdirectory . cave = cave
2023-01-19 21:18:42 +00:00
newfile . cave = cave
2022-07-28 13:15:11 +01:00
# print(f"\n - New directory '{newdirectory}' for cave '{cave}'",file=sys.stderr)
2023-01-19 21:18:42 +00:00
else : # probably a surface survey, or a cave in a new area e.g. 1624 not previously managed, and not in the pending list
2022-07-23 17:26:47 +01:00
self . ReportNonCaveIncludes ( headpath , svxid , depth )
2023-01-19 21:18:42 +00:00
2020-06-30 15:39:24 +01:00
if not newfile . survexdirectory :
2022-11-23 10:41:14 +00:00
message = f " ! SurvexDirectory NOT SET in new SurvexFile { svxid } "
2020-06-30 15:39:24 +01:00
print ( message )
2023-01-19 21:18:42 +00:00
print ( message , file = sys . stderr )
DataIssue . objects . create ( parser = " survex " , message = message )
self . currentsurvexfile . save ( ) # django insists on this although it is already saved !?
2020-06-28 14:42:26 +01:00
try :
2020-06-30 15:39:24 +01:00
newdirectory . save ( )
2020-06-28 14:42:26 +01:00
except :
2020-06-30 15:39:24 +01:00
print ( newdirectory , file = sys . stderr )
print ( newdirectory . primarysurvexfile , file = sys . stderr )
2020-06-28 14:42:26 +01:00
raise
2023-01-19 21:18:42 +00:00
2020-07-06 21:46:19 +01:00
if debugprint :
2022-11-23 10:41:14 +00:00
print ( f " # datastack end LoadSurvexFile: { svxid } ' type ' : " , end = " " )
2020-07-06 21:46:19 +01:00
for dict in self . datastack :
2022-11-23 10:41:14 +00:00
print ( f " ' { dict [ ' type ' ] . upper ( ) } ' " , end = " " )
2020-07-06 21:46:19 +01:00
print ( " " )
2020-06-28 01:50:34 +01:00
2020-06-28 14:42:26 +01:00
def ProcessIncludeLine ( self , included ) :
2020-07-06 21:46:19 +01:00
global debugprint
2020-06-28 01:50:34 +01:00
svxid = included . groups ( ) [ 0 ]
2020-07-06 21:46:19 +01:00
if svxid . lower ( ) == debugprinttrigger . lower ( ) :
debugprint = True
2020-06-28 14:42:26 +01:00
self . LoadSurvexFile ( svxid )
self . stacksvxfiles . append ( self . currentsurvexfile )
def ProcessEdulcniLine ( self , edulcni ) :
2023-01-19 21:18:42 +00:00
""" Saves the current survexfile in the db """
2020-07-06 21:46:19 +01:00
global debugprint
2020-06-28 01:50:34 +01:00
svxid = edulcni . groups ( ) [ 0 ]
2020-07-06 21:46:19 +01:00
if debugprint :
depth = " " * self . depthbegin
2022-11-23 10:41:14 +00:00
print ( f " { self . depthbegin : 2 } { depth } - Edulcni survexfile: ' { svxid } ' " )
2020-07-06 21:46:19 +01:00
if svxid . lower ( ) == debugprinttrigger . lower ( ) :
debugprint = False
2020-06-28 14:42:26 +01:00
self . currentsurvexfile . save ( )
self . currentsurvexfile = self . stacksvxfiles . pop ( )
2020-06-24 14:49:39 +01:00
2020-06-24 22:46:18 +01:00
def LoadSurvexComment ( self , survexblock , comment ) :
2020-07-05 17:22:26 +01:00
# ignore all comments except ;ref, ; wallet and ;QM and ;*include (for collated survex file)
2021-12-19 14:24:20 +00:00
# rx_ref2 = re.compile(r'(?i)\s*ref[.;]?')
2023-01-19 21:18:42 +00:00
2022-07-08 23:54:48 +01:00
# This should also check that the QM survey point rxists in the block
2021-12-19 14:24:20 +00:00
2020-07-05 17:22:26 +01:00
refline = self . rx_commref . match ( comment )
2020-06-24 22:46:18 +01:00
if refline :
2023-01-19 21:18:42 +00:00
# comment = re.sub('(?i)\s*ref[.;]?',"",comment.strip())
comment = self . rx_ref2 . sub ( " " , comment . strip ( ) )
print ( f " rx_ref2 -- { comment =} in { survexblock . survexfile . path } :: { survexblock } " )
2020-07-05 17:22:26 +01:00
self . LoadSurvexRef ( survexblock , comment )
2023-01-19 21:18:42 +00:00
2022-09-18 21:53:04 +01:00
# handle
# ; Messteam: Jörg Haussmann, Robert Eckardt, Thilo Müller
# ; Zeichner: Thilo Müller
# But none of these will be valid teammembers because they are not actually on our expo
2023-01-19 21:18:42 +00:00
2022-09-18 21:53:04 +01:00
team = self . rx_commteam . match ( comment )
2023-01-19 21:18:42 +00:00
if team :
2022-09-19 18:55:34 +01:00
# print(f'rx_commteam -- {comment=} in {survexblock.survexfile.path} :: {survexblock}')
2022-09-18 21:53:04 +01:00
pass
2020-06-24 14:10:13 +01:00
2022-07-08 18:08:42 +01:00
qml = self . rx_qm0 . match ( comment )
if qml :
qmline = self . rx_qm . match ( comment )
if qmline :
self . LoadSurvexQM ( survexblock , qmline )
else :
2022-07-20 18:47:29 +01:00
qmtick = self . rx_qm_tick . match ( comment )
if qmtick :
self . TickSurvexQM ( survexblock , qmtick )
else :
message = f ' ! QM Unrecognised as valid in " { survexblock . survexfile . path } " QM { qml . group ( 1 ) } " { qml . group ( 2 ) } " : regex failure, typo? '
print ( message )
2023-01-19 21:18:42 +00:00
DataIssue . objects . create (
parser = " survex " , message = message , url = get_offending_filename ( survexblock . survexfile . path )
)
2022-07-08 18:08:42 +01:00
2020-06-27 17:55:59 +01:00
included = self . rx_comminc . match ( comment )
2023-01-19 21:18:42 +00:00
# ;*include means 'we have been included'; whereas *include means 'proceed to include'
2022-07-28 13:15:11 +01:00
# bug, If the original survex file contians the line ;*include then we pick it up ! So fix our special code to be ;|*include
2020-06-27 17:55:59 +01:00
if included :
2020-06-28 14:42:26 +01:00
self . ProcessIncludeLine ( included )
2020-06-27 17:55:59 +01:00
edulcni = self . rx_commcni . match ( comment )
2020-06-28 01:50:34 +01:00
# ;*edulcni means we are returning from an included file
2020-06-27 17:55:59 +01:00
if edulcni :
2020-06-28 14:42:26 +01:00
self . ProcessEdulcniLine ( edulcni )
2020-06-24 22:46:18 +01:00
2023-01-19 21:18:42 +00:00
def LoadSurvexSetup ( self , survexblock , survexfile ) :
2020-06-24 22:46:18 +01:00
self . depthbegin = 0
2020-07-03 17:22:15 +01:00
self . datastar = self . datastardefault
2023-01-19 21:18:42 +00:00
print (
self . insp
+ f " - MEM: { get_process_memory ( ) : .3f } Reading. parent: { survexblock . survexfile . path } <> { survexfile . path } "
)
2020-06-24 22:46:18 +01:00
self . lineno = 0
2023-01-19 21:18:42 +00:00
sys . stderr . flush ( )
self . callcount + = 1
if self . callcount % 10 == 0 :
print ( " . " , file = sys . stderr , end = " " )
if self . callcount % 500 == 0 :
print ( " \n " , file = sys . stderr , end = " " )
2020-06-24 14:10:13 +01:00
# Try to find the cave in the DB if not use the string as before
path_match = re . search ( r " caves-( \ d \ d \ d \ d)/( \ d+| \ d \ d \ d \ d-? \ w+- \ d+)/ " , survexblock . survexfile . path )
if path_match :
2023-01-19 21:18:42 +00:00
pos_cave = f " { path_match . group ( 1 ) } - { path_match . group ( 2 ) } "
2021-04-17 01:41:06 +01:00
cave = getCaveByReference ( pos_cave )
2020-06-24 14:10:13 +01:00
if cave :
survexfile . cave = cave
2020-06-24 22:46:18 +01:00
2022-10-07 08:57:30 +01:00
def LinearLoad ( self , survexblock , path , collatefilename ) :
2020-06-27 17:55:59 +01:00
""" Loads a single survex file. Usually used to import all the survex files which have been collated
2020-07-01 22:49:38 +01:00
into a single file . Loads the begin / end blocks using a stack for labels .
2022-10-07 21:48:41 +01:00
Uses the python generator idiom to avoid loading the whole file ( 21 MB ) into memory .
2020-06-27 17:55:59 +01:00
"""
2020-07-04 01:10:17 +01:00
blkid = None
pathlist = None
args = None
oldflags = None
blockcount = 0
self . lineno = 0
slengthtotal = 0.0
nlegstotal = 0
2020-06-27 17:55:59 +01:00
self . relativefilename = path
2023-01-19 21:34:09 +00:00
self . IdentifyCave ( path ) # this will produce null for survex files which are geographic collections
2023-01-19 21:18:42 +00:00
2020-06-28 14:42:26 +01:00
self . currentsurvexfile = survexblock . survexfile
2023-01-19 21:18:42 +00:00
self . currentsurvexfile . save ( ) # django insists on this although it is already saved !?
2020-07-03 14:53:36 +01:00
2020-07-03 17:22:15 +01:00
self . datastar = copy . deepcopy ( self . datastardefault )
self . flagsstar = copy . deepcopy ( self . flagsdefault )
2020-07-03 18:08:59 +01:00
2020-06-28 14:42:26 +01:00
def tickle ( ) :
nonlocal blockcount
2023-01-19 21:18:42 +00:00
blockcount + = 1
if blockcount % 20 == 0 :
print ( " . " , file = sys . stderr , end = " " )
if blockcount % 800 == 0 :
print ( " \n " , file = sys . stderr , end = " " )
mem = get_process_memory ( )
print ( f " - MEM: { mem : 7.2f } MB in use " , file = sys . stderr )
print ( " " , file = sys . stderr , end = " " )
2020-07-01 22:49:38 +01:00
sys . stderr . flush ( )
2020-06-28 14:42:26 +01:00
2020-07-03 18:08:59 +01:00
def printbegin ( ) :
nonlocal blkid
nonlocal pathlist
depth = " " * self . depthbegin
2020-07-04 01:10:17 +01:00
self . insp = depth
2020-07-07 01:35:58 +01:00
if debugprint :
2022-11-23 10:41:14 +00:00
print ( f " { self . depthbegin : 2 } { depth } - Begin for : ' { blkid } ' " )
2020-07-03 18:08:59 +01:00
pathlist = " "
for id in self . stackbegin :
if len ( id ) > 0 :
pathlist + = " . " + id
def printend ( ) :
nonlocal args
depth = " " * self . depthbegin
2020-07-07 01:35:58 +01:00
if debugprint :
2022-11-23 10:41:14 +00:00
print ( f " { self . depthbegin : 2 } { depth } - End from: ' { args } ' " )
2023-01-19 21:18:42 +00:00
print (
" {:2} {} - LEGS: {} (n: {} , length: {} units: {} ) " . format (
self . depthbegin , depth , self . slength , self . slength , self . legsnumber , self . units
)
)
2020-07-03 18:08:59 +01:00
def pushblock ( ) :
nonlocal blkid
2020-07-06 21:46:19 +01:00
if debugprint :
2022-11-23 10:41:14 +00:00
print ( f " # datastack at 1 *begin { blkid } ' type ' : " , end = " " )
2020-07-06 21:46:19 +01:00
for dict in self . datastack :
2022-11-23 10:41:14 +00:00
print ( f " ' { dict [ ' type ' ] . upper ( ) } ' " , end = " " )
2020-07-06 21:46:19 +01:00
print ( " " )
2022-11-23 10:41:14 +00:00
print ( f " ' { self . datastar [ ' type ' ] . upper ( ) } ' self.datastar " )
2020-07-03 18:08:59 +01:00
# ------------ * DATA
self . datastack . append ( copy . deepcopy ( self . datastar ) )
# ------------ * DATA
2020-07-06 21:46:19 +01:00
if debugprint :
2022-11-23 10:41:14 +00:00
print ( f " # datastack at 2 *begin { blkid } ' type ' : " , end = " " )
2020-07-06 21:46:19 +01:00
for dict in self . datastack :
2022-11-23 10:41:14 +00:00
print ( f " ' { dict [ ' type ' ] . upper ( ) } ' " , end = " " )
2020-07-06 21:46:19 +01:00
print ( " " )
2022-11-23 10:41:14 +00:00
print ( f " ' { self . datastar [ ' type ' ] . upper ( ) } ' self.datastar " )
2023-01-19 21:18:42 +00:00
2020-07-03 18:08:59 +01:00
# ------------ * FLAGS
self . flagsstack . append ( copy . deepcopy ( self . flagsstar ) )
# ------------ * FLAGS
2020-07-04 13:31:46 +01:00
pass
2020-07-03 18:08:59 +01:00
def popblock ( ) :
nonlocal blkid
nonlocal oldflags
2020-07-06 21:46:19 +01:00
if debugprint :
2022-11-23 10:41:14 +00:00
print ( f " # datastack at *end ' { blkid } ' type ' : " , end = " " )
2020-07-06 21:46:19 +01:00
for dict in self . datastack :
2022-11-23 10:41:14 +00:00
print ( f " ' { dict [ ' type ' ] . upper ( ) } ' " , end = " " )
2020-07-06 21:46:19 +01:00
print ( " " )
2022-11-23 10:41:14 +00:00
print ( f " ' { self . datastar [ ' type ' ] . upper ( ) } ' self.datastar " )
2020-07-03 18:08:59 +01:00
# ------------ * DATA
2023-01-19 21:18:42 +00:00
self . datastar = copy . deepcopy ( self . datastack . pop ( ) )
2020-07-03 18:08:59 +01:00
# ------------ * DATA
2020-07-06 21:46:19 +01:00
if debugprint :
2022-11-23 10:41:14 +00:00
print ( f " # datastack after *end ' { blkid } ' type ' : " , end = " " )
2020-07-06 21:46:19 +01:00
for dict in self . datastack :
2022-11-23 10:41:14 +00:00
print ( f " ' { dict [ ' type ' ] . upper ( ) } ' " , end = " " )
2020-07-06 21:46:19 +01:00
print ( " " )
2022-11-23 10:41:14 +00:00
print ( f " ' { self . datastar [ ' type ' ] . upper ( ) } ' self.datastar " )
2023-01-19 21:18:42 +00:00
2020-07-03 18:08:59 +01:00
# ------------ * FLAGS
2023-01-19 21:18:42 +00:00
self . flagsstar = copy . deepcopy ( self . flagsstack . pop ( ) )
2020-07-03 18:08:59 +01:00
# ------------ * FLAGS
2020-07-06 21:46:19 +01:00
if debugprint :
if oldflags [ " skiplegs " ] != self . flagsstar [ " skiplegs " ] :
2022-11-23 10:41:14 +00:00
print ( f " # POP ' any ' flag now: ' { self . flagsstar [ ' skiplegs ' ] } ' was: { oldflags [ ' skiplegs ' ] } " )
2020-07-03 18:08:59 +01:00
2020-07-04 13:31:46 +01:00
def starstatement ( star ) :
2023-01-19 21:18:42 +00:00
""" Interprets a survex comamnd where * is the first character on the line, e.g. *begin """
2020-07-03 18:08:59 +01:00
nonlocal survexblock
nonlocal blkid
nonlocal pathlist
nonlocal args
nonlocal oldflags
2020-07-04 01:10:17 +01:00
nonlocal slengthtotal
nonlocal nlegstotal
2020-07-03 18:08:59 +01:00
2020-07-04 13:31:46 +01:00
cmd , args = star . groups ( )
2020-07-03 18:08:59 +01:00
cmd = cmd . lower ( )
# ------------------------BEGIN
2020-07-05 17:22:26 +01:00
if self . rx_begin . match ( cmd ) :
2020-07-03 18:08:59 +01:00
blkid = args . lower ( )
# PUSH state ++++++++++++++
self . stackbegin . append ( blkid )
2020-07-08 00:00:56 +01:00
self . unitsstack . append ( ( self . units , self . unitsfactor ) )
2020-07-04 01:10:17 +01:00
self . legsnumberstack . append ( self . legsnumber )
self . slengthstack . append ( self . slength )
2020-07-06 01:24:43 +01:00
self . personexpedstack . append ( self . currentpersonexped )
2020-07-03 18:08:59 +01:00
pushblock ( )
# PUSH state ++++++++++++++
2020-07-04 01:10:17 +01:00
self . legsnumber = 0
self . slength = 0.0
2020-07-07 01:35:58 +01:00
self . units = " metres "
2020-07-06 01:24:43 +01:00
self . currentpersonexped = [ ]
2020-07-03 18:08:59 +01:00
printbegin ( )
2023-01-19 21:18:42 +00:00
newsurvexblock = SurvexBlock (
name = blkid ,
parent = survexblock ,
survexpath = pathlist ,
cave = self . currentcave ,
survexfile = self . currentsurvexfile ,
legsall = 0 ,
legslength = 0.0 ,
)
2020-07-03 18:08:59 +01:00
newsurvexblock . save ( )
2023-01-19 21:18:42 +00:00
newsurvexblock . title = (
" ( " + survexblock . title + " ) "
) # copy parent inititally, overwrite if it has its own
2020-07-03 18:08:59 +01:00
survexblock = newsurvexblock
2023-01-19 21:18:42 +00:00
survexblock . save ( ) # django insists on this , but we want to save at the end !
2020-07-03 18:08:59 +01:00
tickle ( )
# ---------------------------END
2020-07-05 17:22:26 +01:00
elif self . rx_end . match ( cmd ) :
2020-07-04 01:10:17 +01:00
survexblock . legsall = self . legsnumber
2020-07-04 13:31:46 +01:00
survexblock . legslength = self . slength
2020-07-03 18:08:59 +01:00
printend ( )
2020-07-04 01:10:17 +01:00
slengthtotal + = self . slength
nlegstotal + = self . legsnumber
2020-07-06 01:24:43 +01:00
2020-07-03 18:08:59 +01:00
try :
2023-01-19 21:18:42 +00:00
survexblock . parent . save ( ) # django insists on this although it is already saved !?
2020-07-03 18:08:59 +01:00
except :
print ( survexblock . parent , file = sys . stderr )
raise
try :
2023-01-19 21:18:42 +00:00
survexblock . save ( ) # save to db at end of block
2020-07-03 18:08:59 +01:00
except :
print ( survexblock , file = sys . stderr )
raise
2023-01-19 21:18:42 +00:00
# POP state ++++++++++++++
2020-07-03 18:08:59 +01:00
popblock ( )
2020-07-06 01:24:43 +01:00
self . currentpersonexped = self . personexpedstack . pop ( )
2020-07-04 01:10:17 +01:00
self . legsnumber = self . legsnumberstack . pop ( )
2020-07-08 00:00:56 +01:00
self . units , self . unitsfactor = self . unitsstack . pop ( )
2020-07-04 01:10:17 +01:00
self . slength = self . slengthstack . pop ( )
2020-07-03 18:08:59 +01:00
blkid = self . stackbegin . pop ( )
self . currentsurvexblock = survexblock . parent
survexblock = survexblock . parent
oldflags = self . flagsstar
self . depthbegin - = 1
2020-07-04 01:10:17 +01:00
# POP state ++++++++++++++
2020-07-03 18:08:59 +01:00
# -----------------------------
2020-07-05 17:22:26 +01:00
elif self . rx_title . match ( cmd ) :
2023-01-19 21:18:42 +00:00
quotedtitle = re . match ( ' (?i)^ " (.*) " $ ' , args )
2020-07-05 17:22:26 +01:00
if quotedtitle :
survexblock . title = quotedtitle . groups ( ) [ 0 ]
else :
2023-01-19 21:18:42 +00:00
survexblock . title = args
2020-07-05 17:22:26 +01:00
elif self . rx_ref . match ( cmd ) :
2020-07-03 18:08:59 +01:00
self . LoadSurvexRef ( survexblock , args )
2020-07-05 17:22:26 +01:00
elif self . rx_flags . match ( cmd ) :
2020-07-03 18:08:59 +01:00
oldflags = self . flagsstar
self . LoadSurvexFlags ( args )
2020-07-06 21:46:19 +01:00
if debugprint :
if oldflags [ " skiplegs " ] != self . flagsstar [ " skiplegs " ] :
2023-01-19 21:18:42 +00:00
print ( f " # CHANGE ' any ' flag now: ' { self . flagsstar [ ' skiplegs ' ] } ' was: { oldflags [ ' skiplegs ' ] } " )
2020-07-03 18:08:59 +01:00
2020-07-05 17:22:26 +01:00
elif self . rx_data . match ( cmd ) :
2022-07-23 17:26:47 +01:00
if self . LoadSurvexDataNormal ( survexblock , args ) :
pass
else :
# Abort, we do not cope with this *data format
return
2020-07-07 01:35:58 +01:00
elif self . rx_alias . match ( cmd ) :
2020-07-04 13:31:46 +01:00
self . LoadSurvexAlias ( survexblock , args )
2020-07-07 01:35:58 +01:00
elif self . rx_entrance . match ( cmd ) :
2020-07-04 13:31:46 +01:00
self . LoadSurvexEntrance ( survexblock , args )
2020-07-07 01:35:58 +01:00
elif self . rx_date . match ( cmd ) :
2020-07-03 18:08:59 +01:00
self . LoadSurvexDate ( survexblock , args )
2020-07-07 01:35:58 +01:00
elif self . rx_units . match ( cmd ) :
self . LoadSurvexUnits ( survexblock , args )
elif self . rx_team . match ( cmd ) :
2020-07-03 18:08:59 +01:00
self . LoadSurvexTeam ( survexblock , args )
2020-07-07 01:35:58 +01:00
elif self . rx_set . match ( cmd ) and self . rx_names . match ( cmd ) :
2020-07-03 18:08:59 +01:00
pass
2020-07-07 01:35:58 +01:00
elif self . rx_include . match ( cmd ) :
2022-11-23 10:41:14 +00:00
message = f " ! -ERROR *include command not expected here { path } . Re-run a full Survex import. "
2020-07-03 18:08:59 +01:00
print ( message )
2023-01-19 21:18:42 +00:00
print ( message , file = sys . stderr )
DataIssue . objects . create (
parser = " survex " ,
message = message ,
)
2020-07-03 18:08:59 +01:00
else :
2020-07-05 17:22:26 +01:00
self . LoadSurvexFallThrough ( survexblock , args , cmd )
2020-07-03 18:08:59 +01:00
2023-01-19 21:18:42 +00:00
# this is a python generator idiom.
2022-10-07 08:57:30 +01:00
# see https://realpython.com/introduction-to-python-generators/
2022-10-07 21:48:41 +01:00
# this is the first use of generators in troggle (Oct.2022) and save 21 MB of memory
2022-10-07 08:57:30 +01:00
with open ( collatefilename , " r " ) as fcollate :
for svxline in fcollate :
self . lineno + = 1
sline , comment = self . rx_comment . match ( svxline ) . groups ( )
if comment :
# this catches the ;*include NEWFILE and ;*edulcni ENDOFFILE lines too
2023-01-19 21:18:42 +00:00
self . LoadSurvexComment ( survexblock , comment )
2022-10-07 08:57:30 +01:00
if not sline :
2023-01-19 21:18:42 +00:00
continue # skip blank lines
2022-10-07 08:57:30 +01:00
# detect a merge failure inserted by version control
mfail = self . rx_badmerge . match ( sline )
2023-01-19 21:18:42 +00:00
if mfail :
2022-10-07 08:57:30 +01:00
message = f " \n ! - ERROR version control merge failure \n - ' { sline } ' \n "
2023-01-19 21:18:42 +00:00
message = (
message + f " - line { self . lineno } in { blkid } in { survexblock } \n - NERD++ needed to fix it "
)
2022-10-07 08:57:30 +01:00
print ( message )
2023-01-19 21:18:42 +00:00
print ( message , file = sys . stderr )
DataIssue . objects . create ( parser = " survex " , message = message )
continue # skip this line
2022-10-07 08:57:30 +01:00
# detect a star command
star = self . rx_star . match ( sline )
2023-01-19 21:18:42 +00:00
if star :
2022-10-07 08:57:30 +01:00
# yes we are reading a *command
starstatement ( star )
2023-01-19 21:18:42 +00:00
else : # not a *cmd so we are reading data OR a ";" rx_comment failed. We hope.
2022-10-07 08:57:30 +01:00
self . LoadSurvexLeg ( survexblock , sline , comment , svxline )
self . legsnumber = nlegstotal
2023-01-19 21:18:42 +00:00
self . slength = slengthtotal
2022-10-07 09:41:46 +01:00
def PushdownStackScan ( self , survexblock , path , finname , flinear , fcollate ) :
""" Follows the *include links in all the survex files from the root file (usually 1623.svx)
2020-06-27 17:55:59 +01:00
and reads only the * include and * begin and * end statements . It produces a linearised
2020-07-05 17:22:26 +01:00
list of the include tree and detects blocks included more than once .
2020-06-27 12:08:02 +01:00
"""
2022-10-05 19:11:18 +01:00
global stop_dup_warning
2020-06-27 12:08:02 +01:00
2022-10-07 09:41:46 +01:00
def process_line ( svxline ) :
2020-06-27 12:08:02 +01:00
self . lineno + = 1
2022-03-03 00:26:04 +00:00
# detect a merge failure inserted by version control
mfail = self . rx_badmerge . match ( svxline )
2023-01-19 21:18:42 +00:00
if mfail :
2022-03-03 00:26:04 +00:00
message = f " \n !! - ERROR version control merge failure \n - ' { svxline } ' \n "
message = message + f " - in ' { path } ' at line { thissvxline } \n "
2023-01-19 21:18:42 +00:00
message = (
message + f " - line { self . lineno } { survexblock } \n - Parsing aborted. NERD++ needed to fix it "
)
2022-03-03 00:26:04 +00:00
print ( message )
2023-01-19 21:18:42 +00:00
print ( message , file = sys . stderr )
DataIssue . objects . create ( parser = " survex " , message = message , url = get_offending_filename ( path ) )
return # skip this survex file and all things *included in it
2022-03-03 00:26:04 +00:00
2023-01-19 21:18:42 +00:00
includestmt = self . rx_include . match ( svxline )
2020-06-27 17:55:59 +01:00
if not includestmt :
2022-11-23 10:41:14 +00:00
fcollate . write ( f " { svxline . strip ( ) } \n " )
2020-06-27 17:55:59 +01:00
2020-06-27 12:08:02 +01:00
sline , comment = self . rx_comment . match ( svxline . strip ( ) ) . groups ( )
2020-07-04 13:31:46 +01:00
star = self . rx_star . match ( sline )
2023-01-19 21:18:42 +00:00
if star : # yes we are reading a *cmd
2020-07-04 13:31:46 +01:00
cmd , args = star . groups ( )
2020-06-27 12:08:02 +01:00
cmd = cmd . lower ( )
2020-06-28 14:42:26 +01:00
if re . match ( " (?i)include$ " , cmd ) :
2020-07-01 22:49:38 +01:00
includepath = os . path . normpath ( os . path . join ( os . path . split ( path ) [ 0 ] , re . sub ( r " \ .svx$ " , " " , args ) ) )
2020-06-27 12:08:02 +01:00
2020-07-01 22:49:38 +01:00
fullpath = os . path . join ( settings . SURVEX_DATA , includepath + " .svx " )
2022-10-05 19:11:18 +01:00
self . RunSurvexIfNeeded ( os . path . join ( settings . SURVEX_DATA , includepath ) , path )
2021-11-05 20:59:54 +00:00
self . checkUniqueness ( os . path . join ( settings . SURVEX_DATA , includepath ) )
2020-07-01 22:49:38 +01:00
if os . path . isfile ( fullpath ) :
2023-01-19 21:18:42 +00:00
# --------------------------------------------------------
2020-06-27 17:55:59 +01:00
self . depthinclude + = 1
2022-10-07 09:41:46 +01:00
# fininclude = open(fullpath,'r')
finincludename = fullpath
2022-11-23 10:41:14 +00:00
fcollate . write ( f " ;|*include { includepath } \n " )
flinear . write ( f " { self . depthinclude : 2 } { indent } *include { includepath } \n " )
2020-07-01 22:49:38 +01:00
push = includepath . lower ( )
2020-07-07 01:35:58 +01:00
self . includestack . append ( push )
2023-01-19 21:18:42 +00:00
# -----------------
2022-10-07 09:41:46 +01:00
self . PushdownStackScan ( survexblock , includepath , finincludename , flinear , fcollate )
2023-01-19 21:18:42 +00:00
# -----------------
2020-07-07 01:35:58 +01:00
pop = self . includestack . pop ( )
2020-06-27 12:08:02 +01:00
if pop != push :
2020-07-07 01:35:58 +01:00
message = " !! ERROR mismatch *include pop!=push {} " . format ( pop , push , self . includestack )
2020-06-27 17:55:59 +01:00
print ( message )
2023-01-19 21:18:42 +00:00
print ( message , file = flinear )
print ( message , file = sys . stderr )
DataIssue . objects . create ( parser = " survex " , message = message , url = get_offending_filename ( path ) )
2022-11-23 10:41:14 +00:00
flinear . write ( f " { self . depthinclude : 2 } { indent } *edulcni { pop } \n " )
fcollate . write ( f " ;|*edulcni { pop } \n " )
2022-10-07 09:41:46 +01:00
# fininclude.close()
2020-06-27 17:55:59 +01:00
self . depthinclude - = 1
2023-01-19 21:18:42 +00:00
# --------------------------------------------------------
2020-06-27 12:08:02 +01:00
else :
2022-10-05 19:11:18 +01:00
message = f " ! ERROR *include file ' { includepath } ' not found, listed in ' { fin . name } ' "
2020-06-27 17:55:59 +01:00
print ( message )
2023-01-19 21:18:42 +00:00
print ( message , file = sys . stderr )
DataIssue . objects . create ( parser = " survex " , message = message , url = get_offending_filename ( path ) )
2020-06-28 14:42:26 +01:00
elif re . match ( " (?i)begin$ " , cmd ) :
2020-06-27 12:08:02 +01:00
self . depthbegin + = 1
depth = " " * self . depthbegin
if args :
pushargs = args
else :
pushargs = " "
self . stackbegin . append ( pushargs . lower ( ) )
2022-11-23 10:41:14 +00:00
flinear . write ( f " { self . depthbegin : 2 } { depth } *begin { args } \n " )
2020-06-27 12:08:02 +01:00
pass
2020-06-28 14:42:26 +01:00
elif re . match ( " (?i)end$ " , cmd ) :
2020-06-27 12:08:02 +01:00
depth = " " * self . depthbegin
2022-11-23 10:41:14 +00:00
flinear . write ( f " { self . depthbegin : 2 } { depth } *end { args } \n " )
2020-06-27 12:08:02 +01:00
if not args :
args = " "
popargs = self . stackbegin . pop ( )
if popargs != args . lower ( ) :
2023-01-19 21:18:42 +00:00
message = (
f " !! ERROR mismatch in BEGIN/END labels pop!=push ' { popargs } ' != ' { args } ' \n { self . stackbegin } "
)
2020-06-27 17:55:59 +01:00
print ( message )
2023-01-19 21:18:42 +00:00
print ( message , file = flinear )
print ( message , file = sys . stderr )
DataIssue . objects . create ( parser = " survex " , message = message , url = get_offending_filename ( path ) )
2020-06-27 12:08:02 +01:00
self . depthbegin - = 1
pass
2020-06-28 14:42:26 +01:00
elif re . match ( " (?i)title$ " , cmd ) :
depth = " " * self . depthbegin
2022-11-23 10:41:14 +00:00
flinear . write ( f " { self . depthbegin : 2 } { depth } *title { args } \n " )
2020-06-28 14:42:26 +01:00
pass
2020-06-23 23:34:08 +01:00
2022-10-07 09:41:46 +01:00
indent = " " * self . depthinclude
2023-01-19 21:18:42 +00:00
sys . stderr . flush ( )
self . callcount + = 1
if self . callcount % 10 == 0 :
print ( " . " , file = sys . stderr , end = " " )
if self . callcount % 500 == 0 :
print ( " \n " , file = sys . stderr , end = " " )
2022-10-07 09:41:46 +01:00
if path in self . svxfileslist :
# We have already used os.normpath() so this is OK. "/../" and "//" have been simplified already.
if stop_dup_warning :
2023-01-19 21:18:42 +00:00
# print("D",end="", file=sys.stderr)
2022-10-07 09:41:46 +01:00
pass
else :
message = f " * Warning. Duplicate detected. We have already seen this *include ' { path } ' from another survex file. Detected at callcount: { self . callcount } depth: { self . depthinclude } "
print ( message )
2023-01-19 21:18:42 +00:00
print ( message , file = flinear )
# print(message,file=sys.stderr)
DataIssue . objects . create ( parser = " survex " , message = message , url = get_offending_filename ( path ) )
2022-10-07 09:41:46 +01:00
if self . svxfileslist . count ( path ) > 2 :
2022-11-23 10:41:14 +00:00
message = f " ! ERROR. Should have been caught before this. Survex file already *included 2x. Probably an infinite loop so fix your *include statements that include this. Aborting. { path } "
2022-10-07 09:41:46 +01:00
print ( message )
2023-01-19 21:18:42 +00:00
print ( message , file = flinear )
# print(message,file=sys.stderr)
DataIssue . objects . create ( parser = " survex " , message = message , url = get_offending_filename ( path ) )
2022-10-07 09:41:46 +01:00
return
return
try :
2022-10-07 21:48:41 +01:00
# python generator idiom again. Not important here as these are small files
2022-10-07 09:41:46 +01:00
with open ( finname , " r " ) as fin :
for svxline in fin :
process_line ( svxline )
2023-01-19 21:18:42 +00:00
2022-10-07 09:41:46 +01:00
self . svxfileslist . append ( path )
2021-11-05 20:59:54 +00:00
2022-10-07 09:41:46 +01:00
except UnicodeDecodeError :
# some bugger put an umlaut in a non-UTF survex file ?!
message = f " ! ERROR *include file ' { path } ' in ' { survexblock } ' has UnicodeDecodeError. Omitted. "
print ( message )
2023-01-19 21:18:42 +00:00
print ( message , file = sys . stderr )
DataIssue . objects . create ( parser = " survex " , message = message , url = get_offending_filename ( path ) )
return # skip this survex file and all things *included in it
except :
2022-10-07 09:41:46 +01:00
message = f " ! ERROR *include file ' { path } ' in ' { survexblock } ' has unexpected error. Omitted. "
print ( message )
2023-01-19 21:18:42 +00:00
print ( message , file = sys . stderr )
DataIssue . objects . create ( parser = " survex " , message = message , url = get_offending_filename ( path ) )
return # skip this survex file and all things *included in it
def checkUniqueness ( self , fullpath ) :
2021-11-05 20:59:54 +00:00
fn = Path ( fullpath ) . name
if fn not in self . uniquename :
2021-11-05 21:51:10 +00:00
self . uniquename [ fn ] = [ fullpath ]
2021-11-05 20:59:54 +00:00
else :
2021-11-05 21:51:10 +00:00
self . uniquename [ fn ] . append ( fullpath )
2022-03-05 12:20:26 +00:00
# This is not an error now that we are moving .3d files to the :loser: directory tree
# message = f" ! NON-UNIQUE survex filename, '{fn}' - '{self.uniquename[fn]}' #{len(self.uniquename[fn])}"
# print(message)
# DataIssue.objects.create(parser='survex', message=message)
2023-01-19 21:18:42 +00:00
message = (
f " NOTE: non-unique survex filename, ' { fn } ' - ' { self . uniquename [ fn ] } ' # { len ( self . uniquename [ fn ] ) } "
)
2021-11-05 20:59:54 +00:00
print ( message )
2023-01-19 21:18:42 +00:00
def RunSurvexIfNeeded ( self , fullpath , calledpath ) :
2020-07-02 16:25:51 +01:00
now = time . time ( )
2023-01-19 21:18:42 +00:00
cav_t = now - 365 * 24 * 3600
log_t = now - 365 * 24 * 3600
svx_t = now - 365 * 24 * 3600
2020-07-02 16:25:51 +01:00
def runcavern ( ) :
2023-01-19 21:18:42 +00:00
""" regenerates the .3d file from the .svx if it is older than the svx file, or older than the software,
2022-03-07 16:23:20 +00:00
or randomly using chaosmonkey ( ) just to keep things ticking over .
2023-01-19 21:18:42 +00:00
"""
print (
f " - Regenerating stale (or chaos-monkeyed) cavern .log and .3d for ' { fullpath } ' \n at ' { logpath } ' \n "
)
print (
f " days svx old: { ( svx_t - log_t ) / ( 24 * 3600 ) : .1f } cav: { ( cav_t - log_t ) / ( 24 * 3600 ) : .1f } log old: { ( now - log_t ) / ( 24 * 3600 ) : .1f } "
)
outputdir = Path ( str ( f " { fullpath } .svx " ) ) . parent
sp = subprocess . run (
[ settings . CAVERN , " --log " , f " --output= { outputdir } " , f " { fullpath } .svx " ] ,
capture_output = True ,
check = False ,
text = True ,
)
2022-03-06 01:29:45 +00:00
if sp . returncode != 0 :
2023-01-19 21:18:42 +00:00
message = f " ! Error running { settings . CAVERN } : { fullpath } "
url = f " /survexfile { fullpath } .svx " . replace ( settings . SURVEX_DATA , " " )
DataIssue . objects . create ( parser = " xEntrances " , message = message , url = url )
2022-03-07 16:23:20 +00:00
print ( message )
2023-01-19 21:18:42 +00:00
print (
2023-01-19 21:34:09 +00:00
" stderr: \n \n " + str ( sp . stderr ) + " \n \n " + str ( sp . stdout ) + " \n \n return code: " + str ( sp . returncode )
2023-01-19 21:18:42 +00:00
)
2021-11-05 20:59:54 +00:00
self . caverncount + = 1
2023-01-19 21:18:42 +00:00
# should also collect all the .err files too and create a DataIssue for each one which
2022-03-11 16:22:37 +00:00
# - is nonzero in size AND
2021-11-05 20:59:54 +00:00
# - has Error greater than 5% anywhere, or some other more serious error
2023-01-19 21:18:42 +00:00
2022-03-11 16:22:37 +00:00
errpath = Path ( fullpath + " .err " )
if errpath . is_file ( ) :
if errpath . stat ( ) . st_size == 0 :
2023-01-19 21:18:42 +00:00
errpath . unlink ( ) # delete empty closure error file
2020-07-02 16:25:51 +01:00
2022-03-07 16:23:20 +00:00
svxpath = Path ( fullpath + " .svx " )
logpath = Path ( fullpath + " .log " )
2023-01-19 21:34:09 +00:00
Path ( svxpath ) . parent
2020-07-02 16:25:51 +01:00
2023-01-19 21:18:42 +00:00
if not svxpath . is_file ( ) :
message = f ' ! BAD survex file " { fullpath } " specified in *include in { calledpath } '
DataIssue . objects . create ( parser = " entrances " , message = message )
2022-07-23 17:26:47 +01:00
print ( message )
return
2023-01-19 21:18:42 +00:00
if not logpath . is_file ( ) : # always run if logfile not there
2020-07-02 16:25:51 +01:00
runcavern ( )
return
2023-01-19 21:18:42 +00:00
self . caverndate = now - 2 * 365 * 24 * 3600
2022-03-07 16:23:20 +00:00
2020-07-02 16:25:51 +01:00
if not self . caverndate :
2023-01-19 21:18:42 +00:00
sp = subprocess . run ( [ " which " , f " { settings . CAVERN } " ] , capture_output = True , check = False , text = True )
2022-03-07 16:23:20 +00:00
if sp . returncode != 0 :
2023-01-19 21:18:42 +00:00
message = f ' ! Error running " which " on { settings . CAVERN } '
DataIssue . objects . create ( parser = " entrances " , message = message )
2022-03-07 16:23:20 +00:00
print ( message )
2023-01-19 21:18:42 +00:00
print (
2023-01-19 21:34:09 +00:00
" stderr: \n \n " + str ( sp . stderr ) + " \n \n " + str ( sp . stdout ) + " \n \n return code: " + str ( sp . returncode )
2023-01-19 21:18:42 +00:00
)
2022-03-07 16:23:20 +00:00
self . caverndate = os . path . getmtime ( sp . stdout . strip ( ) )
else :
2023-01-19 21:18:42 +00:00
self . caverndate = now - 2 * 365 * 24 * 3600
2020-07-02 16:25:51 +01:00
cav_t = self . caverndate
log_t = os . path . getmtime ( logpath )
svx_t = os . path . getmtime ( svxpath )
now = time . time ( )
2023-01-19 21:18:42 +00:00
if svx_t - log_t > 0 : # stale, svx file is newer than log
2020-07-02 16:25:51 +01:00
runcavern ( )
return
2023-01-19 21:18:42 +00:00
if now - log_t > 60 * 24 * 60 * 60 : # >60 days, re-run anyway
2020-07-02 16:25:51 +01:00
runcavern ( )
return
2023-01-19 21:18:42 +00:00
if cav_t - log_t > 0 : # new version of cavern
2020-07-02 16:25:51 +01:00
runcavern ( )
return
2023-01-19 21:18:42 +00:00
if chaosmonkey ( 350 ) : # one in every 350 runs
2020-07-02 16:25:51 +01:00
runcavern ( )
2020-06-27 17:55:59 +01:00
2023-01-19 21:18:42 +00:00
2020-06-27 18:00:24 +01:00
def FindAndLoadSurvex ( survexblockroot ) :
2023-01-19 21:18:42 +00:00
""" Follows the *include links successively to find files in the whole include tree """
2022-10-05 19:11:18 +01:00
global stop_dup_warning
2023-01-19 21:18:42 +00:00
print ( " - redirecting stdout to svxblks.log... " )
2020-06-23 23:34:08 +01:00
stdout_orig = sys . stdout
# Redirect sys.stdout to the file
2023-01-19 21:18:42 +00:00
sys . stdout = open ( " svxblks.log " , " w " )
2020-06-24 14:10:13 +01:00
2023-01-19 21:18:42 +00:00
print ( f " - Scanning Survex Blocks tree from { settings . SURVEX_TOPNAME } .svx ... " , file = sys . stderr )
survexfileroot = survexblockroot . survexfile # i.e. SURVEX_TOPNAME only
2020-06-27 17:55:59 +01:00
collatefilename = " _ " + survexfileroot . path + " .svx "
2020-06-27 18:00:24 +01:00
svx_scan = LoadingSurvex ( )
2020-06-27 17:55:59 +01:00
svx_scan . callcount = 0
svx_scan . depthinclude = 0
2020-07-02 16:25:51 +01:00
fullpathtotop = os . path . join ( survexfileroot . survexdirectory . path , survexfileroot . path )
2023-01-19 21:18:42 +00:00
2022-11-23 10:41:14 +00:00
print ( f " - RunSurvexIfNeeded cavern on ' { fullpathtotop } ' " , file = sys . stderr )
2022-10-05 19:11:18 +01:00
svx_scan . RunSurvexIfNeeded ( fullpathtotop , fullpathtotop )
2021-11-05 20:59:54 +00:00
svx_scan . checkUniqueness ( fullpathtotop )
2023-01-19 21:18:42 +00:00
indent = " "
fcollate = open ( collatefilename , " w " )
2020-06-27 17:55:59 +01:00
2021-04-12 23:58:48 +01:00
mem0 = get_process_memory ( )
2023-01-19 21:18:42 +00:00
print ( f " - MEM: { mem0 : 7.2f } MB START " , file = sys . stderr )
flinear = open ( " svxlinear.log " , " w " )
2022-11-23 10:41:14 +00:00
flinear . write ( f " - MEM: { mem0 : 7.2f } MB START { survexfileroot . path } \n " )
2023-01-19 21:18:42 +00:00
print ( " " , file = sys . stderr , end = " " )
2020-06-27 00:50:40 +01:00
2022-10-07 09:41:46 +01:00
finrootname = Path ( settings . SURVEX_DATA , survexfileroot . path + " .svx " )
2022-11-23 10:41:14 +00:00
fcollate . write ( f " ;*include { survexfileroot . path } \n " )
flinear . write ( f " { svx_scan . depthinclude : 2 } { indent } *include { survexfileroot . path } \n " )
2021-11-05 20:59:54 +00:00
2023-01-19 18:33:04 +00:00
import cProfile
import pstats
2021-11-05 20:59:54 +00:00
from pstats import SortKey
2023-01-19 21:18:42 +00:00
2021-11-05 20:59:54 +00:00
pr = cProfile . Profile ( )
pr . enable ( )
2023-01-19 21:18:42 +00:00
# ----------------------------------------------------------------
2022-10-07 09:41:46 +01:00
svx_scan . PushdownStackScan ( survexblockroot , survexfileroot . path , finrootname , flinear , fcollate )
2023-01-19 21:18:42 +00:00
# ----------------------------------------------------------------
2021-11-05 20:59:54 +00:00
pr . disable ( )
2023-01-19 21:18:42 +00:00
with open ( " PushdownStackScan.prof " , " w " ) as f :
2021-11-05 20:59:54 +00:00
ps = pstats . Stats ( pr , stream = f )
ps . sort_stats ( SortKey . CUMULATIVE )
ps . print_stats ( )
2023-01-19 21:18:42 +00:00
2022-11-23 10:41:14 +00:00
flinear . write ( f " { svx_scan . depthinclude : 2 } { indent } *edulcni { survexfileroot . path } \n " )
fcollate . write ( f " ;*edulcni { survexfileroot . path } \n " )
2021-04-12 23:58:48 +01:00
mem1 = get_process_memory ( )
2022-11-23 10:41:14 +00:00
flinear . write ( f " \n - MEM: { mem1 : .2f } MB STOP { survexfileroot . path } \n " )
flinear . write ( f " - MEM: { mem1 - mem0 : .3f } MB ADDITIONALLY USED \n " )
flinear . write ( f " - { len ( svx_scan . svxfileslist ) : , } survex files in linear include list \n " )
2023-01-19 21:18:42 +00:00
print ( f " \n - { svx_scan . caverncount : , } runs of survex ' cavern ' refreshing .3d files " , file = sys . stderr )
print ( f " - { len ( svx_scan . svxfileslist ) : , } survex files from tree in linear include list " , file = sys . stderr )
2021-04-12 23:58:48 +01:00
mem1 = get_process_memory ( )
2023-01-19 21:18:42 +00:00
print ( f " - MEM: { mem1 : 7.2f } MB END " , file = sys . stderr )
print ( f " - MEM: { mem1 - mem0 : 7.3f } MB ADDITIONALLY USED " , file = sys . stderr )
2022-10-05 19:11:18 +01:00
#
# Process all the omitted files in :loser: with some exceptions
#
unseens = set ( )
2023-01-19 21:18:42 +00:00
b = [ ]
for p in Path ( settings . SURVEX_DATA ) . rglob ( " *.svx " ) :
2022-10-03 22:00:55 +01:00
if p . is_file ( ) :
po = p . relative_to ( Path ( settings . SURVEX_DATA ) )
2023-01-19 21:18:42 +00:00
pox = po . with_suffix ( " " )
2022-10-05 19:11:18 +01:00
if str ( pox ) not in svx_scan . svxfileslist :
# print(f"[{pox}]", file=sys.stderr)
unseens . add ( pox )
2022-10-03 22:00:55 +01:00
else :
b . append ( pox )
2023-01-19 21:18:42 +00:00
2022-10-05 19:11:18 +01:00
if len ( b ) != len ( svx_scan . svxfileslist ) :
2023-01-19 21:18:42 +00:00
print (
f " ! Mismatch. { len ( b ) } survex files found which should be { len ( svx_scan . svxfileslist ) } in main tree) " ,
file = sys . stderr ,
)
2022-10-05 19:11:18 +01:00
excpts = [ " surface/terrain " , " kataster/kataster-boundaries " , " template " , " docs " , " _unseens " ]
removals = [ ]
for x in unseens :
for o in excpts :
2023-01-19 21:18:42 +00:00
if str ( x ) . strip ( ) . startswith ( o ) :
2022-10-05 19:11:18 +01:00
removals . append ( x )
2022-10-05 21:18:11 +01:00
# special fix for file not actually in survex format
unseens . remove ( Path ( " fixedpts/gps/gps00raw " ) )
2023-01-19 21:18:42 +00:00
2022-10-05 19:11:18 +01:00
for x in removals :
unseens . remove ( x )
2023-01-19 21:18:42 +00:00
print (
f " \n - { len ( unseens ) } survex files found which were not included in main tree. ( { len ( svx_scan . svxfileslist ) } in main tree) " ,
file = sys . stderr ,
)
2023-01-19 21:34:09 +00:00
print ( " -- Now loading the previously-omitted survex files. " , file = sys . stderr )
2023-01-19 21:18:42 +00:00
with open ( Path ( settings . SURVEX_DATA , " _unseens.svx " ) , " w " ) as u :
u . write (
f " ; { len ( unseens ) : , } survex files not *included by { settings . SURVEX_TOPNAME } (which are { len ( svx_scan . svxfileslist ) : , } files) \n "
)
2022-10-05 19:11:18 +01:00
u . write ( f " ; autogenerated by parser/survex.py from databasereset.py on ' { datetime . now ( timezone . utc ) } ' \n " )
u . write ( f " ; omitting any file beginning with { excpts } \n \n " )
2023-01-19 21:34:09 +00:00
u . write ( " *begin unseens \n " )
2022-10-05 19:11:18 +01:00
for x in sorted ( unseens ) :
u . write ( f " *include { x } \n " )
2023-01-19 21:34:09 +00:00
u . write ( " *end unseens \n " )
2023-01-19 21:18:42 +00:00
survexfileroot = survexblockroot . survexfile # i.e. SURVEX_TOPNAME only
2022-10-05 19:11:18 +01:00
omit_scan = LoadingSurvex ( )
omit_scan . callcount = 0
omit_scan . depthinclude = 0
2023-01-19 21:18:42 +00:00
fullpathtotop = os . path . join ( survexfileroot . survexdirectory . path , " _unseens.svx " )
2022-10-05 19:11:18 +01:00
# copy the list to prime the next pass through the files
omit_scan . svxfileslist = svx_scan . svxfileslist [ : ]
2023-01-19 21:18:42 +00:00
svx_scan . svxfileslist = [ ] # free memory
svx_scan = None # Hmm. Does this actually delete all the instance variables if they are lists, dicts etc.?
2022-11-23 10:41:14 +00:00
print ( f " - RunSurvexIfNeeded cavern on ' { fullpathtotop } ' " , file = sys . stderr )
2022-10-05 19:11:18 +01:00
omit_scan . RunSurvexIfNeeded ( fullpathtotop , fullpathtotop )
omit_scan . checkUniqueness ( fullpathtotop )
2023-01-19 21:18:42 +00:00
2022-10-05 19:11:18 +01:00
mem0 = get_process_memory ( )
2023-01-19 21:18:42 +00:00
print ( f " - MEM: { mem0 : 7.2f } MB START ' _unseens ' " , file = sys . stderr )
# flinear = open('svxlinear.log', 'w')
2022-10-05 19:11:18 +01:00
flinear . write ( f " - MEM: { mem0 : 7.2f } MB START ' _unseens ' \n " )
2023-01-19 21:18:42 +00:00
print ( " " , file = sys . stderr , end = " " )
2022-10-05 19:11:18 +01:00
2022-10-07 09:41:46 +01:00
finrootname = fullpathtotop
2022-11-23 10:41:14 +00:00
fcollate . write ( " ;*include _unseens.svx \n " )
flinear . write ( f " { omit_scan . depthinclude : 2 } { indent } *include _unseens \n " )
2022-10-05 19:11:18 +01:00
stop_dup_warning = True
2023-01-19 21:18:42 +00:00
# ----------------------------------------------------------------
omit_scan . PushdownStackScan ( survexblockroot , " _unseens " , finrootname , flinear , fcollate )
# ----------------------------------------------------------------
2022-10-05 19:11:18 +01:00
stop_dup_warning = False
2022-11-23 10:41:14 +00:00
flinear . write ( f " { omit_scan . depthinclude : 2 } { indent } *edulcni _unseens \n " )
fcollate . write ( " ;*edulcni _unseens.svx \n " )
2022-10-05 19:11:18 +01:00
mem1 = get_process_memory ( )
2022-11-23 10:41:14 +00:00
flinear . write ( f " \n - MEM: { mem1 : .2f } MB STOP _unseens.svx OMIT \n " )
flinear . write ( f " - MEM: { mem1 - mem0 : .3f } MB ADDITIONALLY USED OMIT \n " )
flinear . write ( f " - { len ( omit_scan . svxfileslist ) : , } survex files in linear include list OMIT \n " )
2023-01-19 21:18:42 +00:00
2022-10-05 19:11:18 +01:00
flinear . close ( )
fcollate . close ( )
2023-01-19 21:18:42 +00:00
print (
f " \n - { omit_scan . caverncount : , } runs of survex ' cavern ' refreshing .3d files in the unseen list " ,
file = sys . stderr ,
)
print (
f " - { len ( omit_scan . svxfileslist ) : , } survex files in linear include list including previously unseen ones \n " ,
file = sys . stderr ,
)
omit_scan = None # Hmm. Does this actually delete all the instance variables if they are lists, dicts etc.?
2022-10-05 19:11:18 +01:00
mem1 = get_process_memory ( )
2023-01-19 21:18:42 +00:00
print ( f " - MEM: { mem1 : 7.2f } MB END " , file = sys . stderr )
print ( f " - MEM: { mem1 - mem0 : 7.3f } MB ADDITIONALLY USED " , file = sys . stderr )
2022-10-05 19:11:18 +01:00
2020-06-27 17:55:59 +01:00
# Before doing this, it would be good to identify the *equate and *entrance we need that are relevant to the
# entrance locations currently loaded after this by LoadPos(), but could better be done before ?
# look in MapLocations() for how we find the entrances
2023-01-19 21:18:42 +00:00
print ( " \n - Loading All Survex Blocks (LinearLoad) " , file = sys . stderr )
2020-06-27 18:00:24 +01:00
svx_load = LoadingSurvex ( )
2020-07-03 17:22:15 +01:00
svx_load . survexdict [ survexfileroot . survexdirectory ] = [ ]
svx_load . survexdict [ survexfileroot . survexdirectory ] . append ( survexfileroot )
2020-06-29 21:16:13 +01:00
svx_load . svxdirs [ " " ] = survexfileroot . survexdirectory
2020-07-20 22:53:26 +01:00
2023-01-19 21:18:42 +00:00
# pr2 = cProfile.Profile()
# pr2.enable()
print ( " " , file = sys . stderr , end = " " )
# ----------------------------------------------------------------
2022-10-07 21:48:41 +01:00
svx_load . LinearLoad ( survexblockroot , survexfileroot . path , collatefilename )
2023-01-19 21:18:42 +00:00
# ----------------------------------------------------------------
# pr2.disable()
2021-12-19 14:24:20 +00:00
# with open('LinearLoad.prof', 'w') as f:
2023-01-19 21:18:42 +00:00
# ps = pstats.Stats(pr2, stream=f)
# ps.sort_stats(SortKey.CUMULATIVE)
# ps.print_stats()
2022-10-05 21:18:11 +01:00
mem1 = get_process_memory ( )
2023-01-19 21:18:42 +00:00
print ( f " \n - MEM: { mem1 : 7.2f } MB STOP " , file = sys . stderr )
print ( f " - MEM: { mem1 - mem0 : 7.3f } MB ADDITIONALLY USED " , file = sys . stderr )
2020-06-27 17:55:59 +01:00
2021-11-05 20:59:54 +00:00
# Close the logging file, Restore sys.stdout to our old saved file handle
sys . stdout . close ( )
print ( " + " , file = sys . stderr )
2023-01-19 21:18:42 +00:00
sys . stderr . flush ( )
2021-11-05 20:59:54 +00:00
sys . stdout = stdout_orig
2020-07-04 01:10:17 +01:00
legsnumber = svx_load . legsnumber
2021-04-12 23:58:48 +01:00
mem1 = get_process_memory ( )
2020-07-01 17:41:09 +01:00
2022-11-23 10:41:14 +00:00
print ( f " - Number of SurvexDirectories: { len ( svx_load . survexdict ) : , } " )
2023-01-19 21:18:42 +00:00
tf = 0
2020-07-01 17:41:09 +01:00
for d in svx_load . survexdict :
tf + = len ( svx_load . survexdict [ d ] )
2022-10-05 19:11:18 +01:00
print ( f " - Number of SurvexFiles: { tf : , } " )
print ( f " - Number of Survex legs: { legsnumber : , } " )
2020-06-27 17:55:59 +01:00
svx_load = None
2020-07-07 01:35:58 +01:00
return legsnumber
2020-06-23 23:34:08 +01:00
2023-01-19 21:18:42 +00:00
2020-06-29 21:16:13 +01:00
def MakeSurvexFileRoot ( ) :
2023-01-19 21:18:42 +00:00
""" Returns a file_object.path = SURVEX_TOPNAME associated with directory_object.path = SURVEX_DATA """
2022-07-15 14:17:40 +01:00
# find a cave, any cave..
2022-07-15 14:44:02 +01:00
caves = Cave . objects . all ( )
2023-01-19 21:18:42 +00:00
smk = caves . filter ( kataster_number = " 000 " ) # returns a list, a QuerySet
2021-04-13 01:13:08 +01:00
fileroot = SurvexFile ( path = settings . SURVEX_TOPNAME , cave = None )
2020-07-02 16:25:51 +01:00
fileroot . save ( )
2022-07-17 14:08:01 +01:00
directoryroot = SurvexDirectory ( path = settings . SURVEX_DATA , cave = smk [ 0 ] , primarysurvexfile = fileroot )
2022-07-15 14:17:40 +01:00
# MariaDB doesn't like this hack. Complains about non-null cave_id EVEN THOUGH our model file says this is OK:
# cave = models.ForeignKey('Cave', blank=True, null=True,on_delete=models.SET_NULL)
2020-07-02 16:25:51 +01:00
directoryroot . save ( )
2023-01-19 21:18:42 +00:00
fileroot . survexdirectory = directoryroot # i.e. SURVEX_DATA/SURVEX_TOPNAME
fileroot . save ( ) # mutually dependent objects need a double-save like this
2020-07-02 16:25:51 +01:00
return fileroot
2023-01-19 21:18:42 +00:00
2022-10-05 19:11:18 +01:00
def MakeOmitFileRoot ( fn ) :
2023-01-19 21:18:42 +00:00
""" Returns a file_object.path = _unseens.svx associated with directory_object.path = SURVEX_DATA """
2022-10-05 19:11:18 +01:00
fileroot = SurvexFile ( path = fn , cave = None )
fileroot . survexdirectory = SurvexDirectory . objects . get ( path = settings . SURVEX_DATA )
2023-01-19 21:18:42 +00:00
fileroot . save ( )
2022-10-05 19:11:18 +01:00
return fileroot
2020-06-23 23:34:08 +01:00
2023-01-19 21:18:42 +00:00
2020-06-27 18:00:24 +01:00
def LoadSurvexBlocks ( ) :
2022-10-07 21:48:41 +01:00
mem1 = get_process_memory ( )
2023-01-19 21:18:42 +00:00
print ( f " - MEM: { mem1 : 7.2f } MB now " , file = sys . stderr )
2015-01-19 22:48:50 +00:00
2023-01-19 21:18:42 +00:00
print ( " - Flushing All Survex Blocks... " )
2022-10-07 21:48:41 +01:00
# why does this increase memory use by 20 MB ?!
2023-01-19 21:18:42 +00:00
# We have foreign keys, Django needs to load the related objects
# in order to resolve how the relation should handle the deletion:
2022-10-07 21:48:41 +01:00
# https://docs.djangoproject.com/en/3.2/ref/models/fields/#django.db.models.ForeignKey.on_delete
2021-04-13 01:13:08 +01:00
SurvexBlock . objects . all ( ) . delete ( )
SurvexFile . objects . all ( ) . delete ( )
SurvexDirectory . objects . all ( ) . delete ( )
SurvexPersonRole . objects . all ( ) . delete ( )
SurvexStation . objects . all ( ) . delete ( )
2022-10-07 21:48:41 +01:00
mem1 = get_process_memory ( )
2023-01-19 21:18:42 +00:00
print ( f " - MEM: { mem1 : 7.2f } MB now. Foreign key objects loaded on deletion. " , file = sys . stderr )
2022-10-07 21:48:41 +01:00
print ( " - Flushing survex Data Issues " )
2023-01-19 21:18:42 +00:00
DataIssue . objects . filter ( parser = " survex " ) . delete ( )
DataIssue . objects . filter ( parser = " svxdate " ) . delete ( )
DataIssue . objects . filter ( parser = " survexleg " ) . delete ( )
DataIssue . objects . filter ( parser = " survexunits " ) . delete ( )
DataIssue . objects . filter ( parser = " entrances " ) . delete ( )
DataIssue . objects . filter ( parser = " xEntrances " ) . delete ( )
2022-10-07 21:48:41 +01:00
print ( " - survex Data Issues flushed " )
mem1 = get_process_memory ( )
2023-01-19 21:18:42 +00:00
print ( f " - MEM: { mem1 : 7.2f } MB now " , file = sys . stderr )
2020-06-29 21:16:13 +01:00
survexfileroot = MakeSurvexFileRoot ( )
2020-07-02 16:25:51 +01:00
# this next makes a block_object assciated with a file_object.path = SURVEX_TOPNAME
2023-01-19 21:18:42 +00:00
survexblockroot = SurvexBlock (
name = ROOTBLOCK , survexpath = " " , cave = None , survexfile = survexfileroot , legsall = 0 , legslength = 0.0
)
2022-07-22 11:42:04 +01:00
# crashes here sometimes on MariaDB complaining that cave_id should not be null. But it should be.
2023-01-19 21:18:42 +00:00
# django.db.utils.IntegrityError: (1048, "Column 'cave_id' cannot be null")
2022-08-14 20:52:14 +01:00
# fix by restarting db on server
# sudo service mariadb stop
# sudo service mariadb start
2011-07-11 00:01:12 +01:00
survexblockroot . save ( )
2023-01-19 21:18:42 +00:00
2022-10-05 19:11:18 +01:00
omitsfileroot = MakeOmitFileRoot ( " _unseens.svx " )
2023-01-19 21:18:42 +00:00
survexomitsroot = SurvexBlock (
name = OMITBLOCK , survexpath = " " , cave = None , survexfile = omitsfileroot , legsall = 0 , legslength = 0.0
)
survexomitsroot . save ( )
2020-06-23 23:34:08 +01:00
2023-01-19 21:18:42 +00:00
print ( " - Loading Survex Blocks... " )
2021-04-12 23:58:48 +01:00
memstart = get_process_memory ( )
2023-01-19 21:18:42 +00:00
# ----------------------------------------------------------------
2022-03-23 20:05:38 +00:00
FindAndLoadSurvex ( survexblockroot )
2023-01-19 21:18:42 +00:00
# ----------------------------------------------------------------
2021-04-12 23:58:48 +01:00
memend = get_process_memory ( )
2022-11-23 10:41:14 +00:00
print ( f " - MEMORY start: { memstart : .3f } MB end: { memend : .3f } MB increase= { memend - memstart : .3f } MB " )
2023-01-19 21:18:42 +00:00
2011-07-11 00:01:12 +01:00
survexblockroot . save ( )
2020-07-01 17:41:09 +01:00
2023-01-19 21:18:42 +00:00
print ( " - Loaded All Survex Blocks. " )
2011-07-11 00:01:12 +01:00
2019-02-24 13:03:34 +00:00
poslineregex = re . compile ( r " ^ \ ( \ s*([+-]? \ d* \ . \ d*), \ s*([+-]? \ d* \ . \ d*), \ s*([+-]? \ d* \ . \ d*) \ s* \ ) \ s*([^ \ s]+)$ " )
2023-01-19 21:18:42 +00:00
2020-07-04 01:10:17 +01:00
def LoadPositions ( ) :
2020-06-29 21:16:13 +01:00
""" First load the survex stations for entrances and fixed points (about 600) into the database.
2023-01-19 21:18:42 +00:00
Run cavern to produce a complete .3 d file , then run 3 dtopos to produce a table of
all survey point positions . Then lookup each position by name to see if we have it in the database
2020-06-29 21:16:13 +01:00
and if we do , then save the x / y / z coordinates . This gives us coordinates of the entrances .
2020-04-28 01:18:57 +01:00
If we don ' t have it in the database, print an error message and discard it.
"""
2020-07-02 16:25:51 +01:00
svx_t = 0
d3d_t = 0
2023-01-19 21:18:42 +00:00
2020-07-02 16:25:51 +01:00
def runcavern3d ( ) :
2023-01-19 21:18:42 +00:00
outputdir = Path ( str ( f " { topdata } .svx " ) ) . parent
2022-03-06 01:29:45 +00:00
2021-04-07 21:53:43 +01:00
# print(" - Regenerating stale cavern .log and .3d for '{}'\n days old: {:.1f} {:.1f} {:.1f}".
# format(topdata, (svx_t - d3d_t)/(24*3600), (cav_t - d3d_t)/(24*3600), (now - d3d_t)/(24*3600)))
2022-03-07 16:23:20 +00:00
2023-01-19 21:18:42 +00:00
file3d = Path ( f " { topdata } .3d " )
2022-03-06 01:29:45 +00:00
try :
2023-01-19 21:18:42 +00:00
sp = subprocess . run (
[ settings . CAVERN , " --log " , f " --output= { outputdir } " , f " { topdata } .svx " ] ,
capture_output = True ,
check = False ,
text = True ,
) # check=False means exception not raised
2022-03-06 01:29:45 +00:00
if sp . returncode != 0 :
2023-01-19 21:18:42 +00:00
message = f " ! Error: cavern: creating { file3d } in runcavern3() "
DataIssue . objects . create ( parser = " entrances " , message = message )
2022-03-07 16:23:20 +00:00
print ( message )
2023-01-19 21:18:42 +00:00
2022-03-07 16:23:20 +00:00
# find the errors in the 1623.log file
2023-01-19 21:18:42 +00:00
sp = subprocess . run (
[ " grep " , " error: " , f " { topdata } .log " ] , capture_output = True , check = False , text = True
) # check=False means exception not raised
message = f " ! Error: cavern: { sp . stdout } creating { file3d } "
DataIssue . objects . create ( parser = " entrances " , message = message )
2022-03-07 16:23:20 +00:00
print ( message )
2022-03-06 01:29:45 +00:00
2023-01-19 21:18:42 +00:00
except :
2022-10-06 19:02:15 +01:00
message = f " ! CalledProcessError ' cavern ' in runcavern3() at { topdata } . "
2023-01-19 21:18:42 +00:00
DataIssue . objects . create ( parser = " entrances " , message = message )
2022-03-07 16:23:20 +00:00
print ( message )
2023-01-19 21:18:42 +00:00
2022-03-07 16:23:20 +00:00
if file3d . is_file ( ) :
2022-10-06 19:02:15 +01:00
message = f " ! CalledProcessError. File permissions { file3d . stat ( ) . st_mode } on { str ( file3d ) } "
2023-01-19 21:18:42 +00:00
DataIssue . objects . create ( parser = " entrances " , message = message )
2022-03-07 16:23:20 +00:00
print ( message )
2023-01-19 21:18:42 +00:00
if file3d . is_file ( ) : # might be an old one though
2022-03-07 16:23:20 +00:00
try :
# print(" - Regenerating {} {}.3d in {}".format(settings.SURVEXPORT, topdata, settings.SURVEX_DATA))
2023-01-19 21:18:42 +00:00
sp = subprocess . run (
[ settings . SURVEXPORT , " --pos " , f " { file3d } " ] ,
cwd = settings . SURVEX_DATA ,
capture_output = True ,
check = False ,
text = True ,
)
2022-03-07 16:23:20 +00:00
if sp . returncode != 0 :
2023-01-19 21:18:42 +00:00
print (
f " ! Error: survexport creating { topdata } .pos in runcavern3(). \n \n "
+ str ( sp . stdout )
+ " \n \n return code: "
+ str ( sp . returncode )
)
except :
message = f " ! CalledProcessError ' survexport ' in runcavern3() at { file3d } . "
DataIssue . objects . create ( parser = " entrances " , message = message )
2022-03-07 16:23:20 +00:00
print ( message )
else :
message = f " ! Failed to find { file3d } so aborting generation of new .pos, using old one if present "
2023-01-19 21:18:42 +00:00
DataIssue . objects . create ( parser = " entrances " , message = message )
2022-03-07 16:23:20 +00:00
print ( message )
2020-07-02 16:25:51 +01:00
2021-03-24 15:46:35 +00:00
topdata = os . fspath ( Path ( settings . SURVEX_DATA ) / settings . SURVEX_TOPNAME )
2023-01-19 21:18:42 +00:00
print ( f " - Generating a list of Pos from { topdata } .svx and then loading... " )
2020-05-28 01:16:45 +01:00
2020-04-28 18:26:08 +01:00
found = 0
2023-01-19 21:18:42 +00:00
print ( " \n " ) # extra line because cavern overwrites the text buffer somehow
2020-04-28 18:26:08 +01:00
# cavern defaults to using same cwd as supplied input file
2020-07-02 16:25:51 +01:00
2023-01-19 21:18:42 +00:00
completed_process = subprocess . run ( [ " which " , f " { settings . CAVERN } " ] , capture_output = True , check = True , text = True )
2020-07-02 16:25:51 +01:00
cav_t = os . path . getmtime ( completed_process . stdout . strip ( ) )
svxpath = topdata + " .svx "
d3dpath = topdata + " .3d "
2020-07-04 01:10:17 +01:00
pospath = topdata + " .pos "
2020-07-02 16:25:51 +01:00
svx_t = os . path . getmtime ( svxpath )
if os . path . isfile ( d3dpath ) :
2020-07-03 14:53:36 +01:00
# always fails to find log file if a double directory, e.g. caves-1623/B4/B4/B4.svx Why ?
2020-07-02 16:25:51 +01:00
d3d_t = os . path . getmtime ( d3dpath )
now = time . time ( )
2020-07-04 01:10:17 +01:00
if not os . path . isfile ( pospath ) :
runcavern3d ( )
2020-07-02 16:25:51 +01:00
if not os . path . isfile ( d3dpath ) :
runcavern3d ( )
2023-01-19 21:18:42 +00:00
elif d3d_t - svx_t > 0 : # stale, 3d older than svx file
2020-07-02 16:25:51 +01:00
runcavern3d ( )
2023-01-19 21:18:42 +00:00
elif now - d3d_t > 60 * 24 * 60 * 60 : # >60 days old, re-run anyway
2020-07-02 16:25:51 +01:00
runcavern3d ( )
2023-01-19 21:18:42 +00:00
elif cav_t - d3d_t > 0 : # new version of cavern
2020-07-02 16:25:51 +01:00
runcavern3d ( )
2020-05-28 01:16:45 +01:00
mappoints = { }
for pt in MapLocations ( ) . points ( ) :
2023-01-19 21:18:42 +00:00
svxid , number , point_type , label = pt
mappoints [ svxid ] = True
2020-04-30 23:15:57 +01:00
2022-03-07 16:23:20 +00:00
if not Path ( pospath ) . is_file ( ) :
message = f " ! Failed to find { pospath } so aborting generation of entrance locations. "
2023-01-19 21:18:42 +00:00
DataIssue . objects . create ( parser = " entrances " , message = message )
2022-03-07 16:23:20 +00:00
print ( message )
return
posfile = open ( pospath )
2023-01-19 21:18:42 +00:00
posfile . readline ( ) # Drop header
2020-06-19 00:26:15 +01:00
try :
2021-04-13 01:13:08 +01:00
survexblockroot = SurvexBlock . objects . get ( name = ROOTBLOCK )
2020-06-19 00:26:15 +01:00
except :
try :
2021-04-13 01:13:08 +01:00
survexblockroot = SurvexBlock . objects . get ( id = 1 )
2020-06-19 00:26:15 +01:00
except :
2023-01-19 21:34:09 +00:00
message = " ! FAILED to find root SurvexBlock "
2020-06-19 00:26:15 +01:00
print ( message )
2023-01-19 21:18:42 +00:00
DataIssue . objects . create ( parser = " entrances " , message = message )
2020-06-19 00:26:15 +01:00
raise
2011-07-11 00:01:12 +01:00
for line in posfile . readlines ( ) :
2020-02-21 15:57:07 +00:00
r = poslineregex . match ( line )
2011-07-11 00:01:12 +01:00
if r :
2023-01-19 21:18:42 +00:00
x , y , z , id = r . groups ( )
2020-06-16 19:27:32 +01:00
for sid in mappoints :
if id . endswith ( sid ) :
2023-01-19 21:18:42 +00:00
blockpath = " . " + id [ : - len ( sid ) ] . strip ( " . " )
2020-06-29 21:16:13 +01:00
# But why are we doing this? Why do we need the survexblock id for each of these ?
# ..because mostly they don't actually appear in any SVX file. We should match them up
# via the cave data, not by this half-arsed syntactic match which almost never works. PMS.
if False :
try :
2021-04-13 01:13:08 +01:00
sbqs = SurvexBlock . objects . filter ( survexpath = blockpath )
2023-01-19 21:18:42 +00:00
if len ( sbqs ) == 1 :
2023-01-19 21:34:09 +00:00
sbqs [ 0 ]
2023-01-19 21:18:42 +00:00
if len ( sbqs ) > 1 :
2022-11-23 10:41:14 +00:00
message = f " ! MULTIPLE SurvexBlocks { len ( sbqs ) : 3 } matching Entrance point { blockpath } { sid } ' { id } ' "
2020-06-29 21:16:13 +01:00
print ( message )
2023-01-19 21:18:42 +00:00
DataIssue . objects . create ( parser = " entrances " , message = message )
2023-01-19 21:34:09 +00:00
sbqs [ 0 ]
2023-01-19 21:18:42 +00:00
elif len ( sbqs ) < = 0 :
2022-11-23 10:41:14 +00:00
message = f " ! ZERO SurvexBlocks matching Entrance point { blockpath } { sid } ' { id } ' "
2020-06-29 21:16:13 +01:00
print ( message )
2023-01-19 21:18:42 +00:00
DataIssue . objects . create ( parser = " entrances " , message = message )
2020-06-29 21:16:13 +01:00
except :
2023-01-19 21:18:42 +00:00
message = f " ! FAIL in getting SurvexBlock matching Entrance point { blockpath } { sid } "
2020-06-15 03:28:51 +01:00
print ( message )
2023-01-19 21:18:42 +00:00
DataIssue . objects . create ( parser = " entrances " , message = message )
2020-06-16 19:27:32 +01:00
try :
2021-04-13 01:13:08 +01:00
ss = SurvexStation ( name = id , block = survexblockroot )
2020-06-16 19:27:32 +01:00
ss . x = float ( x )
ss . y = float ( y )
2023-01-19 21:18:42 +00:00
ss . z = float ( z )
2020-06-16 19:27:32 +01:00
ss . save ( )
found + = 1
except :
2023-01-19 21:18:42 +00:00
message = f " ! FAIL to create SurvexStation Entrance point { blockpath } { sid } "
2020-06-16 19:27:32 +01:00
print ( message )
2023-01-19 21:18:42 +00:00
DataIssue . objects . create ( parser = " entrances " , message = message )
2020-06-16 19:27:32 +01:00
raise
2022-11-23 10:41:14 +00:00
print ( f " - { found } SurvexStation entrances found. " )