2023-01-19 18:33:04 +00:00
import copy
2020-05-28 01:16:45 +01:00
import os
import re
2021-04-03 00:34:34 +01:00
import subprocess
2023-01-19 18:33:04 +00:00
import sys
import time
2023-01-19 21:34:09 +00:00
from datetime import datetime , timezone
2021-03-24 15:46:35 +00:00
from pathlib import Path
2009-05-13 05:39:52 +01:00
2020-05-28 01:16:45 +01:00
import troggle . settings as settings
2023-03-23 19:05:25 +00:00
from troggle . core . models . caves import Cave , Entrance , GetCaveLookup
2023-01-29 16:47:46 +00:00
from troggle . core . models . logbooks import QM
2023-01-30 23:04:11 +00:00
from troggle . core . models . survex import SurvexBlock , SurvexDirectory , SurvexFile , SurvexPersonRole , SurvexStation
from troggle . core . models . wallets import Wallet
2021-04-13 01:13:08 +01:00
from troggle . core . models . troggle import DataIssue , Expedition
2023-01-19 18:33:04 +00:00
from troggle . core . utils import chaosmonkey , get_process_memory
2023-03-23 19:05:25 +00:00
#from troggle.parsers.logbooks import GetCaveLookup
2023-03-24 00:54:26 +00:00
from troggle . parsers . caves import create_new_cave
2023-01-19 21:18:42 +00:00
from troggle . parsers . people import GetPersonExpeditionNameLookup , known_foreigner
2020-05-28 01:16:45 +01:00
2023-01-19 21:18:42 +00:00
""" Imports the tree of survex files following from a defined root .svx file
2022-10-07 21:48:41 +01:00
It also scans the Loser repo for all the svx files , which it loads individually afterwards .
2023-01-19 21:18:42 +00:00
"""
2021-04-13 01:37:42 +01:00
2023-01-19 21:18:42 +00:00
todo = """
2023-01-29 01:30:10 +00:00
- Lots to do to cut down on unnecessary . save ( ) calls to avoid hitting the db so much . SHould
2023-02-02 21:50:40 +00:00
speed it up noticably .
- Obscure bug in the * team inheritance and rootblock initialization needs tracking down
- Learn to use Django . select_related ( ) and . prefetch_related ( ) to speed things up
https : / / zerotobyte . com / how - to - use - django - select - related - and - prefetch - related /
2021-11-06 21:57:51 +00:00
2022-03-01 01:30:09 +00:00
- LoadSurvexFile ( ) Creates a new current survexfile and valid . survexdirectory
The survexblock passed - in is not necessarily the parent . FIX THIS .
2022-07-08 23:54:48 +01:00
2023-03-05 23:06:06 +00:00
- Finish writing the parse_one_file ( ) function for survexfiles edited online . Perhaps
easier if this is a completely new file rather than an existing file . . nasty .
2022-11-15 22:25:39 +00:00
- When Olly implements LEG in the ' dump3d --legs ' utility , then we can use that to get the length of
2023-02-02 21:50:40 +00:00
all the legs in a survex block instead of adding them up oursleves . Which means that we can
ignore all the Units and offset stuff , that troggle will work with survex files with backsights ,
repeated readings from distox etc . . Not actually useful for pre 2022 survey data ,
but good future - proofing .
Also it will be a tiny bit more accurate as these leg lengths are after loop closure fixup .
2023-01-19 21:18:42 +00:00
"""
2020-06-15 03:28:51 +01:00
survexblockroot = None
2022-10-05 19:11:18 +01:00
survexomitsroot = None
2020-06-19 00:26:15 +01:00
ROOTBLOCK = " rootblock "
2022-10-05 19:11:18 +01:00
OMITBLOCK = " omitblock "
2020-07-08 00:00:56 +01:00
METRESINFEET = 3.28084
2023-02-28 16:18:29 +00:00
UNSEENS = " _unseens.svx "
2020-06-19 00:26:15 +01:00
2022-10-05 19:11:18 +01:00
stop_dup_warning = False
2023-02-28 18:52:04 +00:00
dup_includes = 0
2023-01-19 21:18:42 +00:00
debugprint = False # Turns on debug printout for just one *include file
2020-07-07 02:46:18 +01:00
debugprinttrigger = " ! "
2023-01-19 21:18:42 +00:00
2023-01-28 15:10:39 +00:00
dataissues = [ ]
2023-01-29 17:03:50 +00:00
class SurvexLeg :
""" No longer a models.Model subclass, so no longer a database table """
tape = 0.0
compass = 0.0
clino = 0.0
2023-03-23 19:05:25 +00:00
def IdentifyCave ( cavepath ) :
""" Given a file path for a survex file, or a survex-block path,
return the cave object
2023-09-05 19:46:10 +01:00
This is clearly getting it badly wrong , see / survexdirs report .
2023-03-23 19:05:25 +00:00
"""
caveslist = GetCaveLookup ( )
if cavepath . lower ( ) in caveslist :
return caveslist [ cavepath . lower ( ) ]
# TO DO - this predates the big revision to Gcavelookup so look at this again carefully
path_match = LoadingSurvex . rx_cave . search ( cavepath ) # use as Class method
if path_match :
sluggy = f " { path_match . group ( 1 ) } - { path_match . group ( 2 ) } "
guesses = [ sluggy . lower ( ) , path_match . group ( 2 ) . lower ( ) ]
for g in guesses :
if g in caveslist :
caveslist [ cavepath ] = caveslist [ g ]
return caveslist [ g ]
print ( f " ! Failed to find cave for { cavepath . lower ( ) } " )
else :
# not a cave, but that is fine.
# print(f' ! No regex(standard identifier) cave match for {cavepath.lower()}')
return None
2023-01-29 17:03:50 +00:00
2023-02-01 23:43:05 +00:00
def datewallet ( w , earliest ) :
""" Gets the date of the youngest survexblock associated with the wallet
REFACTOR this to do the whole date - getting task
Currently there is only one SurvexBlock , but this is in anticipation of
chnaging the schema to allow many .
"""
first = earliest
blocks = SurvexBlock . objects . filter ( scanswallet = w ) # only ONE I think ?!
for b in blocks :
if b . date :
if b . date < first :
first = b . date
if first == earliest :
# no date found
w . date = None
else :
w . date = first . isoformat ( )
return w . date
def set_walletdate ( w ) :
earliest = datetime . now ( ) . date ( )
if not w . date ( ) : # sets .walletdate as a side-effect if it gets it from JSON
d = datewallet ( w , earliest ) # Not in JSON, so checks all the survex blocks
w . walletdate = d
w . save ( )
2023-01-28 21:00:38 +00:00
def stash_data_issue ( parser = None , message = None , url = None , sb = None ) :
""" Avoid hitting the database for error messages until the end of the import """
2023-01-28 15:10:39 +00:00
global dataissues
2023-01-28 21:00:38 +00:00
dataissues . append ( ( parser , message , url , sb ) )
2023-01-28 15:10:39 +00:00
def store_data_issues ( ) :
2023-01-30 15:28:11 +00:00
""" Take the stash and store it permanently in the database instead
use BULK creation here ! """
2023-01-28 15:10:39 +00:00
global dataissues
print ( f " - Storing { len ( dataissues ) } Data Issues into database " )
2023-01-31 00:39:30 +00:00
# make a list of objects, but don't commit to database yet
di_list = [ ]
2023-01-28 21:00:38 +00:00
for issue in dataissues :
parser , message , url , sb = issue
if url is None :
if sb is not None :
url = get_offending_filename ( sb )
2023-01-31 00:39:30 +00:00
di_list . append ( DataIssue ( parser = parser , message = message , url = url ) )
# Now commit to db
DataIssue . objects . bulk_create ( di_list )
2023-01-28 21:00:38 +00:00
dataissues = [ ] # in database now, so empty cache
2023-01-19 21:18:42 +00:00
2023-01-28 21:00:38 +00:00
def get_offending_filename ( path ) :
""" Used to provide the URL for a line in the DataErrors page
which reports problems on importing data into troggle
"""
return " /survexfile/ " + path + " .svx "
2020-06-24 14:10:13 +01:00
2023-01-29 16:23:58 +00:00
trip_people_cache = { } # per survexblock, so robust wrt PUSH/POP begin/end
2023-01-28 21:00:38 +00:00
def get_team_on_trip ( survexblock ) :
""" Uses a cache to avoid a database query if it doesn ' t need to.
Only used for complete team . """
global trip_people_cache
if survexblock in trip_people_cache :
return trip_people_cache [ survexblock ]
2023-02-26 22:13:37 +00:00
qpeople = SurvexPersonRole . objects . filter ( survexblock = survexblock ) # not very good Django style
2023-01-28 21:00:38 +00:00
trip_people_cache [ survexblock ] = qpeople # this is a query list
return qpeople
def get_people_on_trip ( survexblock ) :
""" Gets the displayable names of the people on a survexbock trip.
Only used for complete team . """
qpeople = get_team_on_trip ( survexblock )
2022-09-18 21:53:04 +01:00
people = [ ]
for p in qpeople :
2023-01-19 21:18:42 +00:00
people . append ( f " { p . personname } " )
2023-01-28 21:00:38 +00:00
2022-09-18 21:53:04 +01:00
return list ( set ( people ) )
2022-10-07 21:48:41 +01:00
2023-01-31 00:39:30 +00:00
trip_person_record = { } # per survexblock, so robust wrt PUSH/POP begin/end
trip_team_cache = { } # per survexblock, so robust wrt PUSH/POP begin/end
2023-01-28 21:00:38 +00:00
def put_person_on_trip ( survexblock , personexpedition , tm ) :
""" Uses a cache to avoid a database query if it doesn ' t need to.
Only used for a single person """
2023-01-31 00:39:30 +00:00
global trip_person_record
global trip_team_cache
2023-01-28 21:00:38 +00:00
2023-01-31 00:39:30 +00:00
if ( survexblock , personexpedition ) in trip_person_record :
2023-01-28 21:00:38 +00:00
return True
try :
2023-01-31 00:39:30 +00:00
personrole = SurvexPersonRole ( # does not commit to db yet
2023-01-29 20:59:56 +00:00
survexblock = survexblock ,
person = personexpedition . person ,
personexpedition = personexpedition ,
2023-01-28 21:00:38 +00:00
personname = tm
)
except :
message = f " ! *team ' { tm } ' FAIL, already created { survexblock . survexfile . path } ( { survexblock } ) "
print ( self . insp + message )
stash_data_issue (
parser = " survex " , message = message , url = None , sb = ( survexblock . survexfile . path )
)
2023-01-31 00:39:30 +00:00
if survexblock not in trip_team_cache :
trip_team_cache [ survexblock ] = [ ]
trip_team_cache [ survexblock ] . append ( personrole )
trip_person_record [ ( survexblock , personexpedition ) ] = 1
2023-01-28 21:00:38 +00:00
return False
2023-01-31 00:39:30 +00:00
def confirm_team_on_trip ( survexblock ) :
global trip_team_cache
if survexblock not in trip_team_cache :
return
# Now commit to db
SurvexPersonRole . objects . bulk_create ( trip_team_cache [ survexblock ] )
trip_team_cache [ survexblock ] = [ ] # in database now, so empty cache
def check_team_cache ( ) :
global trip_team_cache
message = f " ! *team CACHEFAIL check_team_cache() called "
print ( message )
for block in trip_team_cache :
message = f " ! *team CACHEFAIL, already created { block . survexfile . path } ( { block } ) "
print ( message )
2023-01-29 16:23:58 +00:00
person_pending_cache = { } # per survexblock, so robust wrt PUSH/POP begin/end
2023-01-28 21:00:38 +00:00
def add_to_pending ( survexblock , tm ) :
2023-01-29 01:30:10 +00:00
""" Collects team names before we have a date so cannot validate against
expo attendance yet """
2023-01-28 21:00:38 +00:00
global person_pending_cache
if survexblock not in person_pending_cache :
person_pending_cache [ survexblock ] = set ( )
person_pending_cache [ survexblock ] . add ( tm )
def get_team_pending ( survexblock ) :
2023-01-29 01:30:10 +00:00
""" A set of *team names before we get to the *date line in a survexblock
"""
2023-01-28 21:00:38 +00:00
global person_pending_cache
if survexblock in person_pending_cache :
teamnames = person_pending_cache [ survexblock ] # a set of names
person_pending_cache [ survexblock ] = ( )
return teamnames
return
2023-01-19 21:18:42 +00:00
class LoadingSurvex :
2020-06-24 14:10:13 +01:00
""" A ' survex block ' is a *begin...*end set of cave data.
A survex file can contain many begin - end blocks , which can be nested , and which can * include
other survex files .
2021-04-26 19:50:03 +01:00
A ' scanswallet ' is what we today call a " survey scans folder " or a " wallet " .
2020-06-24 14:10:13 +01:00
"""
2023-01-19 21:18:42 +00:00
2022-11-18 20:42:03 +00:00
# python regex flags (?i) means case-insentitive, (?s) means . matches newline too
# see https://docs.python.org/3/library/re.html
2023-01-19 21:18:42 +00:00
rx_begin = re . compile ( r " (?i)begin " )
2023-01-28 21:17:16 +00:00
rx_begin2 = re . compile ( " (?i)begin$ " )
2023-01-19 21:18:42 +00:00
rx_end = re . compile ( r " (?i)end$ " )
2023-01-28 21:17:16 +00:00
rx_end2 = re . compile ( " (?i)end$ " )
2023-01-19 21:18:42 +00:00
rx_title = re . compile ( r " (?i)title$ " )
2023-01-28 21:17:16 +00:00
rx_title2 = re . compile ( " (?i)title$ " )
2023-01-19 21:18:42 +00:00
rx_ref = re . compile ( r " (?i)ref$ " )
rx_data = re . compile ( r " (?i)data$ " )
rx_flags = re . compile ( r " (?i)flags$ " )
rx_alias = re . compile ( r " (?i)alias$ " )
rx_entrance = re . compile ( r " (?i)entrance$ " )
rx_date = re . compile ( r " (?i)date$ " )
rx_units = re . compile ( r " (?i)units$ " )
rx_team = re . compile ( r " (?i)team$ " )
rx_set = re . compile ( r " (?i)set$ " )
rx_names = re . compile ( r " (?i)names " )
rx_flagsnot = re . compile ( r " not \ s " )
2020-07-07 01:35:58 +01:00
rx_linelen = re . compile ( r " [ \ d \ -+.]+$ " )
2023-07-23 20:01:01 +01:00
instruments = " (bitch|bodger|bolt|bolter|bolting|book|clino|comp|compass|consultant|disto|distox|distox2|dog|dogsbody|drawing|drill|gps|helper|inst|instr|instrument|monkey|nagging|nail|nail_polish|nail_polish_bitch|nail_polish_monkey|nail_varnish|nail_varnish_bitch|note|paint|photo|pic|point|polish|powerdrill|rig|rigger|rigging|shoot|sketch|slacker|something|tape|topodroid|unknown|useless|varnish|waiting_patiently) "
2023-01-19 21:18:42 +00:00
rx_teammem = re . compile ( r " (?i) " + instruments + " ?(?:es|s)? \ s+(.*)$ " )
rx_teamold = re . compile ( r " (?i)(.*) \ s+ " + instruments + " ?(?:es|s)?$ " )
rx_teamabs = re . compile ( r " (?i)^ \ s*( " + instruments + " )?(?:es|s)? \ s*$ " )
rx_person = re . compile ( r " (?i) and |/| / |, | , |&| & | \ + |^both$|^none$ " )
rx_qm = re . compile (
2023-03-14 03:27:05 +00:00
# r"(?i)^\s*QM(\d+)\s+(.+)\s+([\w\-\_]+)\.([\w\.\-]+)\s+(([\w\-]+)\.([\w\.\-]+)|\-)\s+(.+)$"
r " (?i)^ \ s*QM( \ d+) \ s+([^ \ s]+) \ s+([^ \ s]+) \ s+([^ \ s]+) \ s+(.+)$ "
2023-01-19 21:18:42 +00:00
)
2022-07-08 23:54:48 +01:00
# does not recognise non numeric suffix survey point ids
2023-01-19 21:18:42 +00:00
rx_qm0 = re . compile ( r " (?i)^ \ s*QM( \ d+) \ s+(.+)$ " )
rx_qm_tick = re . compile ( r " (?i)^ \ s*QM( \ d+) \ s+TICK \ s([ \ d \ -]+) \ s(.*)$ " )
# remember there is also QM_PATTERN used in views.other and set in settings.py
rx_tapelng = re . compile ( r " (?i).*(tape|length).*$ " )
rx_cave = re . compile ( r " (?i)caves-( \ d \ d \ d \ d)/([- \ d \ w]+| \ d \ d \ d \ d-? \ w+- \ d+) " )
rx_comment = re . compile ( r " ([^;]*?) \ s*(?:; \ s*(.*))? \ n?$ " )
2023-02-28 16:18:29 +00:00
rx_comminc = re . compile ( r " (?i)^ \ | \ *include[ \ s]*([- \ w/]*).*$ " ) # inserted by linear collate ;|*include
rx_commcni = re . compile ( r " (?i)^ \ | \ *edulcni[ \ s]*([- \ w/]*).*$ " ) # inserted by linear collate ;|*edulcni
2023-01-19 21:18:42 +00:00
rx_include = re . compile ( r " (?i)^ \ s*( \ *include[ \ s].*)$ " )
2023-01-28 21:00:38 +00:00
rx_include2 = re . compile ( " (?i)include$ " )
2023-01-19 21:18:42 +00:00
rx_commref = re . compile ( r " (?i)^ \ s*ref(?:erence)?[ \ s.:]*( \ d+) \ s*# \ s*(X)? \ s*( \ d+) " )
rx_ref_text = re . compile ( r ' (?i)^ \ s* \ " [^ " ]* \ " \ s*$ ' )
rx_star = re . compile ( r " (?i) \ s* \ *[ \ s,]*( \ w+) \ s*(.*?) \ s*(?:;.*)?$ " )
rx_starref = re . compile ( r " (?i)^ \ s* \ *ref[ \ s.:]*((?:19[6789] \ d)|(?:20[0123] \ d)) \ s*#? \ s*(X)? \ s*(.*? \ d+.*?)$ " )
rx_argsref = re . compile ( r " (?i)^[ \ s.:]*((?:19[6789] \ d)|(?:20[012345] \ d)) \ s*#? \ s*(X)? \ s*(.*? \ d+.*?)$ " )
rx_badmerge = re . compile ( r " (?i).*( \ > \ > \ > \ > \ >)|( \ = \ = \ = \ = \ =)|( \ < \ < \ < \ < \ <).*$ " )
rx_ref2 = re . compile ( r " (?i) \ s*ref[.;]? " )
rx_commteam = re . compile ( r " (?i) \ s*(Messteam|Zeichner) \ s*[:]?(.*) " )
2023-01-28 21:00:38 +00:00
rx_quotedtitle = re . compile ( r ' (?i)^ " (.*) " $ ' )
2020-07-02 16:25:51 +01:00
# This interprets the survex "*data normal" command which sets out the order of the fields in the data, e.g.
# *DATA normal from to length gradient bearing ignore ignore ignore ignore
2023-01-19 21:18:42 +00:00
datastardefault = { " type " : " normal " , " from " : 0 , " to " : 1 , " tape " : 2 , " compass " : 3 , " clino " : 4 }
flagsdefault = { " duplicate " : False , " surface " : False , " splay " : False , " skiplegs " : False , " splayalias " : False }
2020-07-02 16:25:51 +01:00
2023-01-19 21:18:42 +00:00
datastar = { }
2020-07-03 17:22:15 +01:00
flagsstar = { }
2020-07-07 01:35:58 +01:00
units = " metres "
2020-07-08 00:00:56 +01:00
unitsfactor = None
2020-07-04 01:10:17 +01:00
slength = 0.0
legsnumber = 0
2020-06-24 22:46:18 +01:00
depthbegin = 0
2020-06-27 17:55:59 +01:00
depthinclude = 0
2020-07-07 01:35:58 +01:00
unitsstack = [ ]
2020-07-04 01:10:17 +01:00
legsnumberstack = [ ]
slengthstack = [ ]
2023-01-29 01:30:10 +00:00
teaminheritstack = [ ]
teamcurrentstack = [ ]
2023-03-13 19:44:39 +00:00
dateinheritstack = [ ]
datecurrentstack = [ ]
2023-01-19 21:18:42 +00:00
stackbegin = [ ]
flagsstack = [ ]
datastack = [ ]
2020-07-07 01:35:58 +01:00
includestack = [ ]
2020-06-28 14:42:26 +01:00
stacksvxfiles = [ ]
2020-06-28 01:50:34 +01:00
svxfileslist = [ ]
2023-01-19 21:18:42 +00:00
svxdirs = { }
2023-09-06 19:38:45 +01:00
svxprim = { }
2023-02-28 18:52:04 +00:00
uniquefile = { } # each key is a survex path, and its value is a list of parent files
2020-07-04 13:31:46 +01:00
expos = { }
2023-09-06 19:38:45 +01:00
#survexdict = {} # each key is a directory, and its value is a list of files
2020-06-24 22:46:18 +01:00
lineno = 0
2020-06-24 14:10:13 +01:00
insp = " "
callcount = 0
2021-11-05 20:59:54 +00:00
caverncount = 0
2023-07-12 21:04:32 +01:00
ignoreprefix = [ " surface " , " kataster " , " fixedpts " , " gpx " , " deprecated " ]
2023-01-19 21:18:42 +00:00
ignorenoncave = [
" caves-1623 " ,
" caves-1623/2007-NEU " ,
" caves-1626 " ,
" caves-1624 " ,
" caves-1627 " ,
" fixedpts/gps/gps00raw " ,
" " ,
]
2023-02-28 18:52:04 +00:00
TREE = " tree "
ODDS = " oddments "
svxpass = TREE
2023-01-19 21:18:42 +00:00
includedfilename = " "
2020-06-27 17:55:59 +01:00
currentsurvexblock = None
currentsurvexfile = None
currentcave = None
2020-07-02 16:25:51 +01:00
caverndate = None
2023-01-29 01:30:10 +00:00
currentteam = set ( )
inheritteam = set ( )
2023-03-14 02:12:28 +00:00
currentdate = None
inheritdate = None
2022-07-28 16:36:57 +01:00
pending = [ ]
2023-03-06 04:52:41 +00:00
adhocload = False
2020-06-24 14:10:13 +01:00
def __init__ ( self ) :
2020-06-29 21:16:13 +01:00
self . caveslist = GetCaveLookup ( )
2020-06-24 14:10:13 +01:00
pass
2023-01-19 21:18:42 +00:00
2020-07-05 17:22:26 +01:00
def LoadSurvexFallThrough ( self , survexblock , line , cmd ) :
2020-06-28 14:42:26 +01:00
if cmd == " require " :
2023-01-19 21:18:42 +00:00
pass # should we check survex version available for processing?
elif cmd in [ " equate " , " fix " , " calibrate " , " cs " , " export " , " case " , " declination " , " infer " , " instrument " , " sd " ] :
pass # we ignore all these, which is fine.
2020-06-24 19:07:11 +01:00
else :
2023-01-19 21:18:42 +00:00
if cmd in [ " include " , " data " , " flags " , " title " , " entrance " , " set " , " units " , " alias " , " ref " ] :
message = (
f " ! Warning. Unparsed [* { cmd } ]: ' { line } ' { survexblock . survexfile . path } - not an error (probably) "
)
print ( self . insp + message )
2023-01-28 15:10:39 +00:00
stash_data_issue (
2023-01-28 21:00:38 +00:00
parser = " survex " , message = message , url = None , sb = ( survexblock . survexfile . path )
2023-01-19 21:18:42 +00:00
)
2020-06-24 19:07:11 +01:00
else :
2023-01-19 21:18:42 +00:00
message = (
f " ! Bad unrecognised svx command: [* { cmd } ] { line } ( { survexblock } ) { survexblock . survexfile . path } "
)
print ( self . insp + message )
2023-01-28 15:10:39 +00:00
stash_data_issue (
2023-01-28 21:00:38 +00:00
parser = " survex " , message = message , url = None , sb = ( survexblock . survexfile . path )
2023-01-19 21:18:42 +00:00
)
2020-06-24 19:07:11 +01:00
2023-01-29 01:30:10 +00:00
def get_team_inherited ( self , survexblock ) : # survexblock only used for debug mesgs
""" See get_team_pending(survexblock) which gets called at the same time,
when we see a * date line """
global person_pending_cache
if self . inheritteam :
message = (
2023-03-23 01:17:38 +00:00
f " - no *team INHERITING ( { survexblock . parent } )>( { survexblock } ) { survexblock . survexfile . path } ' { self . inheritteam } ' "
2023-01-29 01:30:10 +00:00
)
print ( self . insp + message )
# stash_data_issue(
# parser="survex", message=message, url=None, sb=(survexblock.survexfile.path)
# )
return self . inheritteam
2023-03-13 19:44:39 +00:00
def fix_undated ( self , survexblock ) :
2023-03-14 02:12:28 +00:00
""" Called when we reach *end of a block OR when a QM is seen.
2023-03-13 19:44:39 +00:00
Checks to see if the block has no * date , in which case it uses the
inherited date .
This is fine if the inherited date is from the same SurvexFile ,
but inheriting dates across * include files is almost certainly NOT
2023-03-14 02:12:28 +00:00
expected behaviour , even though it is syntactically " correct " ,
so triggers a Warning .
2023-03-13 19:44:39 +00:00
"""
if survexblock . parent . name == " troggle_unseens " :
# Bolluxed up if we try to inherit from this random junk, so don't.
return
2023-03-13 20:27:27 +00:00
if self . currentdate :
# already set
2023-03-14 02:12:28 +00:00
if not survexblock . date :
# error
message = (
f " ! no survexblock.date but currentdate is set. ( { survexblock } )- { survexblock . survexfile . path } { self . currentdate =} "
)
print ( self . insp + message )
stash_data_issue (
parser = " survex " , message = message , url = None , sb = ( survexblock . survexfile . path )
)
2023-03-13 20:27:27 +00:00
return
if self . inheritdate :
2023-03-14 02:12:28 +00:00
survexblock . date = self . inheritdate
self . currentdate = self . inheritdate # unecessary duplication
2023-03-13 20:27:27 +00:00
# Not an error, so not put in DataIssues, but is printed to debug output
message = (
2023-07-24 13:26:36 +01:00
f " - No *date. INHERITING date ' { self . inheritdate : %Y-%m-%d } ' from ( { survexblock . parent } )- { survexblock . parent . survexfile . path } to ( { survexblock } )- { survexblock . survexfile . path } { self . inheritdate : %Y-%m-%d } "
2023-03-13 20:27:27 +00:00
)
print ( self . insp + message )
# stash_data_issue(
2023-03-14 02:12:28 +00:00
# parser="survex", message=message, url=None, sb=(survexblock.survexfile.path) # child
2023-03-13 20:27:27 +00:00
# )
2023-03-14 02:12:28 +00:00
if survexblock . survexfile != survexblock . parent . survexfile :
2023-07-24 13:26:36 +01:00
# This is noteworthy, however.
if survexblock . parent . name == " rootblock " :
# Not a sensible thing to inherit a date from, even if a date exists, which it shouldn't...
message = (
f " - No *date. But not sensible to inherit from rootblock. From ( { survexblock . parent } )- { survexblock . parent . survexfile . path } to ( { survexblock } )- { survexblock . survexfile . path } { self . inheritdate : %Y-%m-%d } "
)
print ( self . insp + message )
# stash_data_issue(
# parser="survex", message=message, url=None, sb=(survexblock.survexfile.path)
# )
return
else :
message = (
f " - Warning *date ' { self . inheritdate : %Y-%m-%d } ' INHERITED from DIFFERENT file: \n ( { survexblock . parent } )- { survexblock . parent . survexfile . path } to ( { survexblock } )- { survexblock . survexfile . path } { self . inheritdate : %Y-%m-%d } \n { self . stackbegin } { self . inheritdate : %Y-%m-%d } "
)
print ( self . insp + message )
stash_data_issue (
parser = " survex " , message = message , url = None , sb = ( survexblock . parent . survexfile . path ) # PARENT
)
return self . inheritdate
2023-03-13 20:27:27 +00:00
else :
# This is not an error in the Expo dataset.
# Many files just holding *include lines do not have dates.
# Hardly _any_ of the ARGE survex files have dates !
pass
# message = f" ! No survexblock.date inheritable in '{survexblock}' in '{survexblock.survexfile.path}', setting to 1976"
# print(self.insp + message)
# stash_data_issue(
# parser="survex", message=message, url=None, sb=(survexblock.survexfile.path)
# )
# expoyear = "1976"
2023-03-13 19:44:39 +00:00
return
2023-01-29 01:30:10 +00:00
def fix_anonymous ( self , survexblock ) :
""" Called when we reach *end of a block
Checks to see if the block has no team attached , in which case it uses the
inherited team .
2023-03-13 19:44:39 +00:00
This is fine if the inherited team is from the same SurvexFile ,
but inheriting team across * include files is almost certainly NOT
expected behaviour , even though it is syntactically " correct " .
2023-01-29 01:30:10 +00:00
If the block has no date , then it is assumed to be an abstract container ,
with no relevant team , and anyway we can ' t attach a PersonExpedition without
knowing the year . Unless its parent has an identified expo """
if survexblock . parent . name == " troggle_unseens " :
2023-01-29 16:23:58 +00:00
# Bolluxed up if we try to inherit from this random junk, so don't.
2023-01-29 01:30:10 +00:00
return
expo = survexblock . expedition # may be None if no *date yet
if not expo :
expo = survexblock . parent . expedition # immediate parent works mostly
if not expo :
return
if not self . currentteam : # i.e. if it is a dated block and has no team
if teamnames := self . get_team_inherited ( survexblock ) : # WALRUS
for tm in teamnames :
personexpedition = GetPersonExpeditionNameLookup ( expo ) . get ( tm . lower ( ) )
if personexpedition :
put_person_on_trip ( survexblock , personexpedition , tm )
return
2020-06-24 19:07:11 +01:00
def LoadSurvexTeam ( self , survexblock , line ) :
2020-07-09 18:06:03 +01:00
""" Interpeting the *team fields means interpreting older style survex as well as current survex standard,
* team Insts Anthony Day - this is how most of our files specify the team member
* team " Anthony Day " notes pictures tape - this is how the survex documentation says it should be done
2023-01-19 21:18:42 +00:00
We have a huge variety of abbreviations and mispellings . The most laconic being
2020-07-09 18:06:03 +01:00
* team gb , bl
2023-01-19 21:18:42 +00:00
2022-11-17 01:24:39 +00:00
personrole is used to record that a person was on a survex trip , NOT the role they played .
2023-01-30 16:18:19 +00:00
( NB PersonLogEntry is a logbook thing , not a survex thing . )
2020-07-09 18:06:03 +01:00
"""
2023-01-19 21:18:42 +00:00
2022-10-07 21:48:41 +01:00
def record_team_member ( tm , survexblock ) :
2023-01-19 21:18:42 +00:00
tm = tm . strip ( " \" ' " ) . strip ( )
2022-10-07 21:48:41 +01:00
# Refactor. The dict GetPersonExpeditionNameLookup(expo) indexes by name and has values of personexpedition
2023-01-29 01:30:10 +00:00
# This is convoluted
2023-01-19 21:18:42 +00:00
2022-10-07 21:48:41 +01:00
# we need the current expedition, but if there has been no date yet in the survex file, we don't know which one it is.
# so we can't validate whether the person was on expo or not.
2023-01-19 21:18:42 +00:00
# we will have to attach them to the survexblock anyway, and then do a
2022-10-07 21:48:41 +01:00
# later check on whether they are valid when we get the date.
2023-01-28 21:00:38 +00:00
2023-01-29 01:30:10 +00:00
self . currentteam . add ( tm ) # used in push/pop block code
2023-01-19 21:18:42 +00:00
expo = survexblock . expedition # may be None if no *date yet
2023-01-28 21:00:38 +00:00
2022-10-07 21:48:41 +01:00
if expo :
2023-01-19 21:18:42 +00:00
personexpedition = GetPersonExpeditionNameLookup ( expo ) . get ( tm . lower ( ) )
if personexpedition :
2023-01-28 21:00:38 +00:00
put_person_on_trip ( survexblock , personexpedition , tm )
2023-01-29 01:30:10 +00:00
2023-01-19 21:18:42 +00:00
elif known_foreigner ( tm ) : # note, not using .lower()
2022-11-23 10:41:14 +00:00
message = f " - *team { expo . year } ' { tm } ' known foreigner on *team { survexblock . survexfile . path } ( { survexblock } ) in ' { line } ' "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
2023-01-28 21:00:38 +00:00
# stash_data_issue(parser='survex', message=message, url=None, sb=(survexblock.survexfile.path))
2022-10-07 22:48:21 +01:00
else :
2022-10-07 21:48:41 +01:00
# we know the date and expo, but can't find the person
2022-11-23 10:41:14 +00:00
message = f " ! *team { expo . year } ' { tm } ' FAIL personexpedition lookup on *team { survexblock . survexfile . path } ( { survexblock } ) in ' { line } ' "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
2023-01-28 15:10:39 +00:00
stash_data_issue (
2023-01-28 21:00:38 +00:00
parser = " survex " , message = message , url = None , sb = ( survexblock . survexfile . path )
2023-01-19 21:18:42 +00:00
)
2022-10-07 21:48:41 +01:00
else :
2023-01-28 21:00:38 +00:00
add_to_pending ( survexblock , tm )
2023-01-19 21:18:42 +00:00
# don't know the date yet, so cannot query the table about validity.
2022-10-07 22:48:21 +01:00
# assume the person is valid. It will get picked up with the *date appears
2023-01-28 21:00:38 +00:00
# There are hundreds of these..
message = (
2023-02-11 23:03:30 +00:00
f " - Team before Date: { line } ( { survexblock } ) { survexblock . survexfile . path } "
2023-01-28 21:00:38 +00:00
)
2023-02-11 23:03:30 +00:00
# print(self.insp + message)
2023-01-28 21:00:38 +00:00
# stash_data_issue(
# parser="survex team", message=message, url=None, sb=(survexblock.survexfile.path)
# )
2022-10-07 21:48:41 +01:00
2023-01-19 21:18:42 +00:00
mteammember = self . rx_teammem . match ( line ) # matches the role at the beginning
2022-10-07 21:48:41 +01:00
if not mteammember :
2023-01-19 21:18:42 +00:00
moldstyle = self . rx_teamold . match ( line ) # matches the role at the the end of the string
2022-10-07 21:48:41 +01:00
if moldstyle :
for tm in self . rx_person . split ( moldstyle . group ( 1 ) ) :
if tm :
record_team_member ( tm , survexblock )
# seems to be working
# msg = "! OLD tm='{}' line: '{}' ({}) {}".format(tm, line, survexblock, survexblock.survexfile.path)
# print(msg, file=sys.stderr)
else :
2022-11-23 10:41:14 +00:00
message = f " ! *team { survexblock . survexfile . path } ( { survexblock } ) Weird ' { mteammember . group ( 1 ) } ' oldstyle line: ' { line } ' "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
2023-01-28 15:10:39 +00:00
stash_data_issue (
2023-01-28 21:00:38 +00:00
parser = " survex " , message = message , url = None , sb = ( survexblock . survexfile . path )
2023-01-19 21:18:42 +00:00
)
2022-10-07 21:48:41 +01:00
else :
2023-01-19 21:18:42 +00:00
nullmember = self . rx_teamabs . match ( line ) # matches empty role line. Ignore these.
2022-10-07 21:48:41 +01:00
if not nullmember :
2022-11-23 10:41:14 +00:00
message = f " ! *team { survexblock . survexfile . path } ( { survexblock } ) Bad line: ' { line } ' "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
2023-01-28 15:10:39 +00:00
stash_data_issue (
2023-01-28 21:00:38 +00:00
parser = " survex " , message = message , url = None , sb = ( survexblock . survexfile . path )
2023-01-19 21:18:42 +00:00
)
2022-10-07 21:48:41 +01:00
else :
2020-06-24 19:07:11 +01:00
for tm in self . rx_person . split ( mteammember . group ( 2 ) ) :
if tm :
2022-10-07 21:48:41 +01:00
record_team_member ( tm , survexblock )
else :
2023-01-19 21:34:09 +00:00
if mteammember . group ( 2 ) . lower ( ) not in ( " none " , " both " ) :
2022-11-23 10:41:14 +00:00
message = f " ! Weird *team ' { mteammember . group ( 2 ) } ' newstyle line: ' { line } ' ( { survexblock } ) { survexblock . survexfile . path } "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
2023-01-28 15:10:39 +00:00
stash_data_issue (
2023-01-28 21:00:38 +00:00
parser = " survex " , message = message , url = None , sb = ( survexblock . survexfile . path )
2023-01-19 21:18:42 +00:00
)
2020-06-24 19:07:11 +01:00
2020-07-04 13:31:46 +01:00
def LoadSurvexEntrance ( self , survexblock , line ) :
# Not using this yet
pass
2023-01-19 21:18:42 +00:00
2020-07-04 13:31:46 +01:00
def LoadSurvexAlias ( self , survexblock , line ) :
# *alias station - ..
2023-03-12 00:35:37 +00:00
splayalias = re . match ( " (?i) \ s*station \ s* \ - \ s* \ . \ . \ s*$ " , line )
2020-07-04 13:31:46 +01:00
if splayalias :
self . flagsstar [ " splayalias " ] = True
2023-03-12 00:35:37 +00:00
print ( line )
2020-07-04 13:31:46 +01:00
else :
2022-11-23 10:41:14 +00:00
message = f " ! Bad *ALIAS: ' { line } ' ( { survexblock } ) { survexblock . survexfile . path } "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
2023-01-28 15:10:39 +00:00
stash_data_issue ( parser = " survex " , message = message )
2020-07-04 13:31:46 +01:00
2020-07-07 01:35:58 +01:00
def LoadSurvexUnits ( self , survexblock , line ) :
# all for 4 survex files with measurements in feet. bugger.
2022-11-17 01:24:39 +00:00
# Won't need this once we move to using cavern or d3dump output for lengths
2023-01-19 21:18:42 +00:00
tapeunits = self . rx_tapelng . match ( line ) # tape|length
2020-07-07 01:35:58 +01:00
if not tapeunits :
return
2023-01-19 21:18:42 +00:00
convert = re . match ( " (?i)( \ w*) \ s*([ \ . \ d]+) \ s*( \ w*) " , line )
2020-07-07 02:46:18 +01:00
if convert :
2020-07-08 00:00:56 +01:00
factor = convert . groups ( ) [ 1 ]
self . unitsfactor = float ( factor )
if debugprint :
2023-01-19 21:18:42 +00:00
message = (
f " ! *UNITS NUMERICAL conversion [ { factor } x] ' { line } ' ( { survexblock } ) { survexblock . survexfile . path } "
)
print ( self . insp + message )
2023-01-28 15:10:39 +00:00
stash_data_issue ( parser = " survexunits " , message = message )
2023-01-19 21:18:42 +00:00
feet = re . match ( " (?i).*feet$ " , line )
metres = re . match ( " (?i).*(METRIC|METRES|METERS)$ " , line )
2020-07-07 01:35:58 +01:00
if feet :
self . units = " feet "
elif metres :
self . units = " metres "
else :
2022-11-23 10:41:14 +00:00
message = f " ! *UNITS in YARDS!? - not converted ' { line } ' ( { survexblock } ) { survexblock . survexfile . path } "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
2023-01-28 15:10:39 +00:00
stash_data_issue ( parser = " survexunits " , message = message )
2023-01-19 21:18:42 +00:00
2023-03-14 02:12:28 +00:00
def get_expo_from_year ( self , year , line , survexblock ) :
2023-01-19 21:18:42 +00:00
# cacheing to save DB query on every block
2022-10-07 21:48:41 +01:00
if year in self . expos :
expo = self . expos [ year ]
else :
expeditions = Expedition . objects . filter ( year = year )
2023-01-19 21:18:42 +00:00
if len ( expeditions ) != 1 :
message = (
f " ! More than one expedition in year { year } ' { line } ' ( { survexblock } ) { survexblock . survexfile . path } "
)
print ( self . insp + message )
2023-01-28 15:10:39 +00:00
stash_data_issue (
2023-01-28 21:00:38 +00:00
parser = " survex " , message = message , url = None , sb = ( survexblock . survexfile . path )
2023-01-19 21:18:42 +00:00
)
2023-03-14 02:12:28 +00:00
if expeditions :
expo = expeditions [ 0 ]
self . expos [ year ] = expo
else :
expo = Expedition . objects . get ( year = " 1976 " )
message = f " ! DATE INCORRECT. There is no expedition for the year { year } . { survexblock . survexfile . path } ( { survexblock } ) - set to 1976. "
print ( self . insp + message )
stash_data_issue ( parser = ' survex ' , message = message , url = None , sb = ( survexblock . survexfile . path ) )
2023-01-19 21:18:42 +00:00
return expo
2020-06-24 17:55:42 +01:00
def LoadSurvexDate ( self , survexblock , line ) :
2023-01-28 21:00:38 +00:00
""" We now have a valid date for this survexblock, so we now know the expo
it relates to and can use GetPersonExpeditionNameLookup ( expo ) to check whether
the people are correct .
Note that a * team line can come before AND after a * date line """
2023-01-19 21:18:42 +00:00
2022-10-07 21:48:41 +01:00
def setdate_on_survexblock ( year ) :
2023-01-28 21:00:38 +00:00
""" Either *date comes before any *team, in which case there are no prior
PersonRoles attached , or
* team came before this * date , in which case the names are only in ' pending ' """
2023-01-31 00:39:30 +00:00
global trip_person_record
2023-01-28 21:00:38 +00:00
2023-03-14 02:12:28 +00:00
expo = self . get_expo_from_year ( year , line , survexblock )
2020-07-04 13:31:46 +01:00
survexblock . expedition = expo
2023-01-19 21:18:42 +00:00
2023-01-28 21:00:38 +00:00
team = get_team_on_trip ( survexblock ) # should be empty, should only be in 'pending'
# team = SurvexPersonRole.objects.filter(survexblock=survexblock)
if len ( team ) > 0 :
message = f " ! *team { expo . year } Multiple *date in one block? Already someone on team when *date seen. { survexblock . survexfile . path } ( { survexblock } ) in ' { line } ' "
print ( self . insp + message )
stash_data_issue ( parser = ' survex ' , message = message , url = None , sb = ( survexblock . survexfile . path ) )
if teamnames := get_team_pending ( survexblock ) : # WALRUS https://docs.python.org/3/whatsnew/3.8.html#assignment-expressions
for tm in teamnames :
if known_foreigner ( tm ) :
message = f " - *team { expo . year } ' { tm } ' known foreigner *date (misordered) { survexblock . survexfile . path } ( { survexblock } ) in ' { line } ' "
print ( self . insp + message )
# stash_data_issue(parser='survex', message=message, url=None, sb=(survexblock.survexfile.path))
else :
pe = GetPersonExpeditionNameLookup ( expo ) . get ( tm . lower ( ) )
if pe :
put_person_on_trip ( survexblock , pe , tm )
2023-01-29 01:30:10 +00:00
2023-01-28 21:00:38 +00:00
else :
message = f " ! *team { year } ' { tm } ' FAIL personexpedition lookup on *date { survexblock . survexfile . path } ( { survexblock } ) "
print ( self . insp + message )
stash_data_issue (
parser = " survex " ,
message = message ,
url = None , sb = ( survexblock . survexfile . path ) ,
2023-01-29 01:30:10 +00:00
)
2023-03-14 02:12:28 +00:00
2022-09-16 20:54:22 +01:00
oline = line
2023-07-10 11:49:14 +01:00
perps = get_people_on_trip ( survexblock ) # What, you don't know Judge Dredd slang ?
2023-01-19 21:18:42 +00:00
if len ( line ) > 10 :
2023-01-30 16:42:56 +00:00
message = " ! DATE Warning LONG DATE ' {} ' ( {} ) {} " . format ( oline , survexblock , survexblock . survexfile . path )
print ( self . insp + message )
2023-01-31 00:39:30 +00:00
stash_data_issue ( parser = ' svxdate ' , message = message , url = None , sb = ( survexblock . survexfile . path ) )
2023-01-19 21:18:42 +00:00
if line [ 10 ] == " - " : # ie a range, just look at first date
2020-07-08 00:00:56 +01:00
line = line [ 0 : 10 ]
2023-07-10 11:49:14 +01:00
2023-01-19 21:18:42 +00:00
if len ( line ) == 10 :
2020-07-08 00:00:56 +01:00
year = line [ : 4 ]
2022-09-16 20:54:22 +01:00
# TO DO set to correct Austrian timezone Europe/Vienna ?
2020-07-08 00:00:56 +01:00
# %m and %d need leading zeros. Source svx files require them.
2023-01-19 21:18:42 +00:00
survexblock . date = datetime . strptime ( line . replace ( " . " , " - " ) , " % Y- % m- %d " )
elif len ( line ) == 7 :
2020-07-08 00:00:56 +01:00
year = line [ : 4 ]
2022-09-18 21:53:04 +01:00
message = f " ! DATE Warning only accurate to the month, setting to 1st ' { oline } ' ( { survexblock } ) { survexblock . survexfile . path } { perps } "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
2023-01-28 15:10:39 +00:00
stash_data_issue (
2023-01-28 21:00:38 +00:00
parser = " svxdate " , message = message , url = None , sb = ( survexblock . survexfile . path )
2023-01-19 21:18:42 +00:00
)
survexblock . date = datetime . strptime ( line . replace ( " . " , " - " ) , " % Y- % m " ) # sets to first of month
elif len ( line ) == 4 :
2020-07-08 00:00:56 +01:00
year = line [ : 4 ]
2022-09-18 21:53:04 +01:00
message = f " ! DATE WARNING only accurate to the YEAR, setting to 1st January ' { oline } ' ( { survexblock } ) { survexblock . survexfile . path } { perps } "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
2023-01-28 15:10:39 +00:00
stash_data_issue (
2023-01-28 21:00:38 +00:00
parser = " svxdate " , message = message , url = None , sb = ( survexblock . survexfile . path )
2023-01-19 21:18:42 +00:00
)
survexblock . date = datetime . strptime ( line , " % Y " ) # sets to January 1st
2023-07-10 11:49:14 +01:00
elif len ( line ) == 9 or len ( line ) == 8 :
year = line [ : 4 ]
message = f " ! DATE format WARNING, single digit day or month number, ' { oline } ' [ { line [ - 5 ] } ][ { line [ - 2 ] } ] ( { survexblock } ) { survexblock . survexfile . path } "
print ( self . insp + message )
stash_data_issue (
parser = " svxdate " , message = message , url = None , sb = ( survexblock . survexfile . path )
)
if line [ - 2 ] == " - " or line [ - 2 ] == " . " :
line = line [ : - 1 ] + ' 0 ' + line [ - 1 ]
survexblock . date = datetime . strptime ( line . replace ( " . " , " - " ) , " % Y- % m- %d " )
print ( f " ! DATE -2 ' { line } ' ' { survexblock . date } ' " )
elif line [ - 5 ] == " - " or line [ - 5 ] == " . " :
line = line [ : - 4 ] + ' 0 ' + line [ - 4 : ]
survexblock . date = datetime . strptime ( line . replace ( " . " , " - " ) , " % Y- % m- %d " )
print ( f " ! DATE -5 ' { line } ' ' { survexblock . date } ' " )
else :
year = line [ : 4 ]
message = (
f " ! DATE Error SHORT LINE ' { line } ' ' { oline } - { survexblock } ' ( { type ( survexblock ) } ) { survexblock . survexfile . path } "
)
print ( self . insp + message )
stash_data_issue (
parser = " svxdate " , message = message , url = None , sb = ( survexblock . survexfile . path )
)
2020-07-08 00:00:56 +01:00
else :
2022-10-05 21:18:11 +01:00
# these errors are reporting the wrong survexblock, which is actually a SurvexFile (!)
2023-01-28 21:00:38 +00:00
# see To Do notes on how to trigger this. Still needs investigating..
2023-01-19 21:18:42 +00:00
message = (
f " ! DATE Error unrecognised ' { oline } - { survexblock } ' ( { type ( survexblock ) } ) { survexblock . survexfile . path } "
)
print ( self . insp + message )
2023-01-28 15:10:39 +00:00
stash_data_issue (
2023-01-28 21:00:38 +00:00
parser = " survex " , message = message , url = None , sb = ( survexblock . survexfile . path )
2023-01-19 21:18:42 +00:00
)
print ( f " { type ( survexblock ) =} " ) # survexblock.parent fails as a SurvexFile has no .parent ...ugh.
2022-10-05 21:18:11 +01:00
print ( f " { survexblock . survexpath =} " )
print ( f " { survexblock . survexfile =} " )
2023-07-10 11:49:14 +01:00
# Not setting 'year' crashes entire import on databaseReset.
year = line [ : 4 ]
perps = get_people_on_trip ( survexblock )
2023-01-19 21:18:42 +00:00
# raise
2023-01-28 21:00:38 +00:00
2023-07-10 09:03:23 +01:00
try :
2023-07-10 08:24:11 +01:00
setdate_on_survexblock ( year )
2023-07-10 09:03:23 +01:00
except NameError :
2023-07-10 11:49:14 +01:00
print ( f " >> why is year not set ?! { survexblock . survexfile . path } " )
setdate_on_survexblock ( " 1976 " )
2023-03-13 19:44:39 +00:00
if survexblock . date :
# do not actually need a distict variable 'currentdate' but it makes the code clearer
self . currentdate = survexblock . date
2023-03-14 02:12:28 +00:00
survexblock . save ( )
2020-07-08 00:00:56 +01:00
2022-10-05 21:18:11 +01:00
def LoadSurvexLeg ( self , survexblock , sline , comment , svxline ) :
2020-06-24 14:10:13 +01:00
""" This reads compass, clino and tape data but only keeps the tape lengths,
the rest is discarded after error - checking .
2020-07-07 01:35:58 +01:00
Now skipping the error checking - returns as soon as the leg is not one we count .
2023-01-19 21:18:42 +00:00
REPLACE ALL THIS by reading the . log output of cavern for the file .
2022-11-17 01:24:39 +00:00
But we need the lengths per Block , not by File . dump3d will do lengths per block .
2020-06-24 14:10:13 +01:00
"""
2020-07-04 01:10:17 +01:00
invalid_clino = 180.0
invalid_compass = 720.0
invalid_tape = 0.0
2020-07-04 13:31:46 +01:00
if self . flagsstar [ " skiplegs " ] :
2020-07-06 21:46:19 +01:00
if debugprint :
print ( " skip in " , self . flagsstar , survexblock . survexfile . path )
2020-07-04 13:31:46 +01:00
return
2020-07-04 01:10:17 +01:00
2020-07-06 21:46:19 +01:00
if debugprint :
2022-11-23 10:41:14 +00:00
print ( f " ! LEG datastar type: { self . datastar [ ' type ' ] . upper ( ) } ++ { survexblock . survexfile . path } \n { sline } " )
2020-07-03 17:22:15 +01:00
if self . datastar [ " type " ] == " passage " :
2020-07-03 14:53:36 +01:00
return
2020-07-03 17:22:15 +01:00
if self . datastar [ " type " ] == " cartesian " :
2020-07-03 14:53:36 +01:00
return
2020-07-03 17:22:15 +01:00
if self . datastar [ " type " ] == " nosurvey " :
2020-07-03 14:53:36 +01:00
return
2020-07-03 17:22:15 +01:00
if self . datastar [ " type " ] == " diving " :
2020-07-03 14:53:36 +01:00
return
2020-07-03 17:22:15 +01:00
if self . datastar [ " type " ] == " cylpolar " :
2020-07-03 14:53:36 +01:00
return
2020-07-07 01:35:58 +01:00
if debugprint :
2023-01-19 21:18:42 +00:00
print (
f " !! LEG data lineno: { self . lineno } \n !! sline: ' { sline } ' \n !! datastar[ ' tape ' ]: { self . datastar [ ' tape ' ] } "
)
if self . datastar [ " type " ] != " normal " :
2020-07-03 14:53:36 +01:00
return
2023-01-19 21:18:42 +00:00
2022-10-05 21:18:11 +01:00
ls = sline . lower ( ) . split ( )
# NORMAL, so there should be 5 fields
# from the content, this is clearly reading fixedpts/gps/gps00raw.svx, but not reporting it by that name
if len ( ls ) < 5 :
print ( " ! Fewer than 5 fields in NORMAL in " , survexblock . survexfile . path , survexfile , survexfile . parent )
print ( " datastar NORMAL: " , self . datastar )
print ( f " Line (split): { ls } , comment: { comment } " )
print ( f " Line: { sline } \n svxline: { svxline } " )
2023-01-19 21:18:42 +00:00
message = f " ! Not 5 fields in line ' { sline . lower ( ) } ' { self . datastar =} { ls =} in \n { survexblock } \n { survexblock . survexfile } \n { survexblock . survexfile . path } "
2023-01-28 15:10:39 +00:00
stash_data_issue (
2023-01-28 21:00:38 +00:00
parser = " survexleg " , message = message , url = None , sb = ( survexblock . survexfile . path )
2023-01-19 21:18:42 +00:00
)
2022-10-05 21:18:11 +01:00
2023-01-19 21:18:42 +00:00
datastar = self . datastar # shallow copy: alias but the things inside are the same things
2020-06-24 14:10:13 +01:00
survexleg = SurvexLeg ( )
2023-01-19 21:18:42 +00:00
2020-07-04 13:31:46 +01:00
# skip all splay legs
2023-03-12 00:35:37 +00:00
try :
if " splayalias " in self . flagsstar :
if ls [ datastar [ " from " ] ] == " - " or ls [ datastar [ " to " ] ] == " - " :
if debugprint :
print ( " Aliased splay in " , survexblock . survexfile . path )
return
2022-07-23 17:26:47 +01:00
if ls [ datastar [ " from " ] ] == " .. " or ls [ datastar [ " from " ] ] == " . " :
2020-07-06 21:46:19 +01:00
if debugprint :
2022-07-23 17:26:47 +01:00
print ( " Splay in " , survexblock . survexfile . path )
2020-07-04 13:31:46 +01:00
return
2022-07-23 17:26:47 +01:00
if ls [ datastar [ " to " ] ] == " .. " or ls [ datastar [ " to " ] ] == " . " :
2020-07-06 21:46:19 +01:00
if debugprint :
2022-07-23 17:26:47 +01:00
print ( " Splay in " , survexblock . survexfile . path )
2020-07-04 13:31:46 +01:00
return
2023-03-12 00:35:37 +00:00
if ls [ datastar [ " to " ] ] == " - " :
message = f " ! Suspected splay, not declared, in line { ls } in { survexblock . survexfile . path } "
print ( self . insp + message )
stash_data_issue (
parser = " survexleg " , message = message , url = None , sb = ( survexblock . survexfile . path )
)
return
2022-07-23 17:26:47 +01:00
except :
2023-01-19 21:18:42 +00:00
message = f " ! datastar parsing from/to incorrect in line { ls } in { survexblock . survexfile . path } "
print ( self . insp + message )
2023-01-28 15:10:39 +00:00
stash_data_issue (
2023-01-28 21:00:38 +00:00
parser = " survexleg " , message = message , url = None , sb = ( survexblock . survexfile . path )
2023-01-19 21:18:42 +00:00
)
2022-07-23 17:26:47 +01:00
return
2020-07-04 13:31:46 +01:00
2020-07-03 14:53:36 +01:00
try :
2020-07-03 17:22:15 +01:00
tape = ls [ datastar [ " tape " ] ]
2020-07-03 14:53:36 +01:00
except :
2023-01-19 21:18:42 +00:00
message = f " ! datastar parsing incorrect in line { ls } in { survexblock . survexfile . path } "
print ( self . insp + message )
2023-01-28 15:10:39 +00:00
stash_data_issue (
2023-01-28 21:00:38 +00:00
parser = " survexleg " , message = message , url = None , sb = ( survexblock . survexfile . path )
2023-01-19 21:18:42 +00:00
)
2020-07-03 14:53:36 +01:00
survexleg . tape = invalid_tape
return
2020-07-04 13:31:46 +01:00
# e.g. '29/09' or '(06.05)' in the tape measurement
2020-07-04 01:10:17 +01:00
# tape = tape.replace("(","") # edited original file (only one) instead
# tape = tape.replace(")","") # edited original file (only one) instead
# tape = tape.replace("/",".") # edited original file (only one) instead.
2020-07-03 14:53:36 +01:00
try :
2020-07-08 00:00:56 +01:00
if self . unitsfactor :
tape = float ( tape ) * self . unitsfactor
if debugprint :
2022-11-23 10:41:14 +00:00
message = f " ! Units: Length scaled { tape } m ' { ls } ' in ( { survexblock . survexfile . path } ) units: { self . units } factor: { self . unitsfactor } x "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
2023-01-28 15:10:39 +00:00
stash_data_issue (
2023-01-28 21:00:38 +00:00
parser = " survexleg " , message = message , url = None , sb = ( survexblock . survexfile . path )
2023-01-19 21:18:42 +00:00
)
if self . units == " feet " :
2020-07-08 00:00:56 +01:00
tape = float ( tape ) / METRESINFEET
if debugprint :
2022-11-23 10:41:14 +00:00
message = f " ! Units: converted to { tape : .3f } m from { self . units } ' { ls } ' in ( { survexblock . survexfile . path } ) "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
2023-01-28 15:10:39 +00:00
stash_data_issue (
2023-01-28 21:00:38 +00:00
parser = " survexleg " , message = message , url = None , sb = ( survexblock . survexfile . path )
2023-01-19 21:18:42 +00:00
)
2020-07-08 00:00:56 +01:00
survexleg . tape = float ( tape )
2020-07-04 01:10:17 +01:00
self . legsnumber + = 1
2020-07-03 14:53:36 +01:00
except ValueError :
2022-11-23 10:41:14 +00:00
message = f " ! Value Error: Tape misread in line ' { ls } ' in { survexblock . survexfile . path } units: { self . units } "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
2023-01-28 15:10:39 +00:00
stash_data_issue (
2023-01-28 21:00:38 +00:00
parser = " survexleg " , message = message , url = None , sb = ( survexblock . survexfile . path )
2023-01-19 21:18:42 +00:00
)
2020-07-03 14:53:36 +01:00
survexleg . tape = invalid_tape
try :
2020-07-04 13:31:46 +01:00
survexblock . legslength + = survexleg . tape
2023-01-19 21:18:42 +00:00
self . slength + = survexleg . tape
2020-07-03 14:53:36 +01:00
except ValueError :
2023-01-19 21:18:42 +00:00
message = (
f " ! Value Error: Tape length not added ' { ls } ' in { survexblock . survexfile . path } units: { self . units } "
)
print ( self . insp + message )
2023-01-28 15:10:39 +00:00
stash_data_issue (
2023-01-28 21:00:38 +00:00
parser = " survexleg " , message = message , url = None , sb = ( survexblock . survexfile . path )
2023-01-19 21:18:42 +00:00
)
2020-07-03 14:53:36 +01:00
try :
2020-07-03 17:22:15 +01:00
lcompass = ls [ datastar [ " compass " ] ]
2020-07-03 14:53:36 +01:00
except :
2023-01-19 21:18:42 +00:00
message = f " ! Value Error: Compass not found in line { ls } in { survexblock . survexfile . path } "
print ( self . insp + message )
2023-01-28 15:10:39 +00:00
stash_data_issue (
2023-01-28 21:00:38 +00:00
parser = " survexleg " , message = message , url = None , sb = ( survexblock . survexfile . path )
2023-01-19 21:18:42 +00:00
)
2020-07-03 14:53:36 +01:00
lcompass = invalid_compass
try :
2020-07-03 17:22:15 +01:00
lclino = ls [ datastar [ " clino " ] ]
2020-07-03 14:53:36 +01:00
except :
print ( ( " ! Clino misread in " , survexblock . survexfile . path ) )
2020-07-03 17:22:15 +01:00
print ( ( " datastar: " , datastar ) )
2020-07-03 14:53:36 +01:00
print ( ( " Line: " , ls ) )
2023-01-19 21:18:42 +00:00
message = f " ! Value Error: Clino misread in line ' { sline . lower ( ) } ' { datastar =} { self . datastar =} { ls =} in \n { survexblock } \n { survexblock . survexfile } \n { survexblock . survexfile . path } "
2023-01-28 15:10:39 +00:00
stash_data_issue (
2023-01-28 21:00:38 +00:00
parser = " survexleg " , message = message , url = None , sb = ( survexblock . survexfile . path )
2023-01-19 21:18:42 +00:00
)
2020-07-03 14:53:36 +01:00
lclino = invalid_clino
if lclino == " up " :
survexleg . clino = 90.0
lcompass = invalid_compass
elif lclino == " down " :
survexleg . clino = - 90.0
lcompass = invalid_compass
elif lclino == " - " or lclino == " level " :
survexleg . clino = - 90.0
try :
survexleg . compass = float ( lcompass )
except ValueError :
print ( ( " ! Compass misread in " , survexblock . survexfile . path ) )
2020-07-03 17:22:15 +01:00
print ( ( " datastar: " , datastar ) )
2020-07-03 14:53:36 +01:00
print ( ( " Line: " , ls ) )
2023-01-19 21:18:42 +00:00
message = " ! Value Error: lcompass: ' {} ' line {} in ' {} ' " . format ( lcompass , ls , survexblock . survexfile . path )
2023-01-28 15:10:39 +00:00
stash_data_issue (
2023-01-28 21:00:38 +00:00
parser = " survexleg " , message = message , url = None , sb = ( survexblock . survexfile . path )
2023-01-19 21:18:42 +00:00
)
2020-07-03 14:53:36 +01:00
survexleg . compass = invalid_compass
2020-07-04 01:10:17 +01:00
# delete the object to save memory
2020-07-03 14:53:36 +01:00
survexleg = None
2023-01-19 21:18:42 +00:00
2020-06-24 22:46:18 +01:00
def LoadSurvexRef ( self , survexblock , args ) :
2023-08-02 11:41:12 +01:00
""" Interpret the *ref record, and all the many variants
the ' letter ' was X for electronic wallets but we no longer do this . So the code
that handles ' letter ' can be removed .
"""
2023-01-19 21:18:42 +00:00
# print(self.insp+ "*REF ---- '"+ args +"'")
url = get_offending_filename ( survexblock . survexfile . path )
2020-06-24 22:46:18 +01:00
# *REF but also ; Ref years from 1960 to 2039
2020-07-04 01:10:17 +01:00
refline = self . rx_ref_text . match ( args )
if refline :
# a textual reference such as "1996-1999 Not-KH survey book pp 92-95"
2023-01-19 21:18:42 +00:00
print ( f " { self . insp } *REF quoted text so ignored: { args } in { survexblock . survexfile . path } " )
2020-07-04 01:10:17 +01:00
return
2023-01-19 21:18:42 +00:00
if len ( args ) < 4 :
2022-11-23 10:41:14 +00:00
message = f " ! Empty or BAD *REF statement ' { args } ' in ' { survexblock . survexfile . path } ' "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
2023-01-28 15:10:39 +00:00
stash_data_issue ( parser = " survex " , message = message , url = url )
2020-06-24 22:46:18 +01:00
return
2020-06-24 14:10:13 +01:00
2020-06-24 22:46:18 +01:00
argsgps = self . rx_argsref . match ( args )
if argsgps :
yr , letterx , wallet = argsgps . groups ( )
else :
2022-09-18 21:53:04 +01:00
perps = get_people_on_trip ( survexblock )
2022-10-06 19:02:15 +01:00
message = f " ! Wallet *REF bad in ' { survexblock . survexfile . path } ' malformed id ' { args } ' { perps } "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
2023-01-28 15:10:39 +00:00
stash_data_issue ( parser = " survex " , message = message , url = url )
2020-06-24 22:46:18 +01:00
return
2020-06-24 14:10:13 +01:00
2020-06-24 14:49:39 +01:00
if not letterx :
letterx = " "
else :
letterx = " X "
2023-08-02 11:41:12 +01:00
message = f " ! Wallet *REF has LETTER in ' { survexblock . survexfile . path } ' malformed id ' { args } ' { perps } "
print ( self . insp + message )
stash_data_issue ( parser = " survex " , message = message , url = url )
2023-01-19 21:18:42 +00:00
if len ( wallet ) < 2 :
2020-06-24 14:49:39 +01:00
wallet = " 0 " + wallet
2023-01-19 21:18:42 +00:00
if not ( int ( yr ) > 1960 and int ( yr ) < 2050 ) :
message = " ! Wallet year out of bounds {yr} ' {refscan} ' {survexblock.survexfile.path} "
print ( self . insp + message )
2023-01-28 15:10:39 +00:00
stash_data_issue ( parser = " survex " , message = message , url = url )
2023-01-19 21:18:42 +00:00
2022-11-23 10:41:14 +00:00
refscan = f " { yr } # { letterx } { wallet } "
2020-06-24 22:46:18 +01:00
try :
2023-01-19 21:18:42 +00:00
if int ( wallet ) > 99 :
2022-11-23 10:41:14 +00:00
message = f " ! Wallet *REF { refscan } - very big (more than 99) so probably wrong in ' { survexblock . survexfile . path } ' "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
2023-01-28 15:10:39 +00:00
stash_data_issue ( parser = " survex " , message = message , url = url )
2020-06-24 22:46:18 +01:00
except :
2022-11-23 10:41:14 +00:00
message = f " ! Wallet *REF { refscan } - not numeric in ' { survexblock . survexfile . path } ' "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
2023-01-28 15:10:39 +00:00
stash_data_issue ( parser = " survex " , message = message , url = url )
2023-01-19 21:18:42 +00:00
manywallets = Wallet . objects . filter (
walletname = refscan
) # assumes all wallets found in earlier pass of data import
2021-04-26 19:22:29 +01:00
if manywallets :
if len ( manywallets ) > 1 :
2022-11-23 10:41:14 +00:00
message = f " ! Wallet *REF { refscan } - more than one found { len ( manywallets ) } wallets in db with same id { survexblock . survexfile . path } "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
2023-01-28 15:10:39 +00:00
stash_data_issue ( parser = " survex " , message = message , url = url )
2023-01-19 21:18:42 +00:00
2022-10-06 19:02:15 +01:00
if survexblock . scanswallet :
if survexblock . scanswallet . walletname != refscan :
message = f " ! Wallet *REF { refscan } in { survexblock . survexfile . path } - Already a DIFFERENT wallet is set for this block ' { survexblock . scanswallet . walletname } ' "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
2023-01-28 15:10:39 +00:00
stash_data_issue ( parser = " survex " , message = message , url = url )
2022-10-06 19:02:15 +01:00
else :
2023-01-19 21:18:42 +00:00
survexblock . scanswallet = manywallets [ 0 ] # this is a ForeignKey field
2022-10-06 19:02:15 +01:00
survexblock . save ( )
2022-12-20 00:07:55 +00:00
# This is where we should check that the wallet JSON contains a link to the survexfile
# and that the JSON date and walletdate are set correctly to the survexblock date.
2023-02-01 23:43:05 +00:00
set_walletdate ( survexblock . scanswallet )
2020-06-24 14:49:39 +01:00
else :
2022-09-18 21:53:04 +01:00
perps = get_people_on_trip ( survexblock )
2022-10-06 19:02:15 +01:00
message = f " ! Wallet *REF bad in ' { survexblock . survexfile . path } ' ' { refscan } ' NOT in database i.e. wallet does not exist { perps } . "
2023-01-19 21:18:42 +00:00
print ( self . insp + message )
2023-01-28 15:10:39 +00:00
stash_data_issue ( parser = " survex " , message = message , url = url )
2020-06-24 14:49:39 +01:00
2023-01-19 21:18:42 +00:00
def LoadSurvexDataNormal ( self , survexblock , args ) :
2020-07-02 16:25:51 +01:00
""" Sets the order for data elements in this and following blocks, e.g.
* data normal from to compass clino tape
* data normal from to tape compass clino
We are only collecting length data so we are disinterested in from , to , LRUD etc .
"""
2020-07-03 17:22:15 +01:00
# datastardefault = { # included here as reference to help understand the code
2023-01-19 21:18:42 +00:00
# "type":"normal",
# "t":"leg",
# "from":0,
# "to":1,
# "tape":2,
# "compass":3,
# "clino":4}
2020-07-03 17:22:15 +01:00
datastar = copy . deepcopy ( self . datastardefault )
2020-07-02 16:25:51 +01:00
if args == " " :
# naked '*data' which is relevant only for passages. Ignore. Continue with previous settings.
return
2023-01-19 21:18:42 +00:00
# DEFAULT | NORMAL | CARTESIAN| NOSURVEY |PASSAGE | TOPOFIL | CYLPOLAR | DIVING
ls = args . lower ( ) . split ( )
2020-07-03 14:53:36 +01:00
if ls [ 0 ] == " default " :
2020-07-03 17:22:15 +01:00
self . datastar = copy . deepcopy ( self . datastardefault )
2020-07-03 14:53:36 +01:00
elif ls [ 0 ] == " normal " or ls [ 0 ] == " topofil " :
2020-07-03 17:22:15 +01:00
if not ( " from " in datastar and " to " in datastar ) :
2023-01-19 21:18:42 +00:00
message = (
f " ! - Unrecognised *data normal statement ' { args } ' { survexblock . name } | { survexblock . survexpath } "
)
2020-07-02 16:25:51 +01:00
print ( message )
2023-01-19 21:18:42 +00:00
print ( message , file = sys . stderr )
2023-01-28 15:10:39 +00:00
stash_data_issue (
2023-01-28 21:00:38 +00:00
parser = " survex " , message = message , url = None , sb = ( survexblock . survexfile . path )
2023-01-19 21:18:42 +00:00
)
2020-07-02 16:25:51 +01:00
return
else :
2020-07-03 17:22:15 +01:00
datastar = self . datastardefault
2020-07-02 16:25:51 +01:00
# ls = ["normal", "from", "to", "tape", "compass", "clino" ]
2023-01-19 21:18:42 +00:00
for i in range ( 1 , len ( ls ) ) : # len[0] is "normal"
if ls [ i ] . lower ( ) == " newline " :
2022-07-23 18:05:58 +01:00
message = f " ! - ABORT *data statement has NEWLINE in it in { survexblock . survexfile . path } . Not parsed by troggle. ' { args } ' "
2022-07-23 17:26:47 +01:00
print ( message )
2023-01-19 21:18:42 +00:00
print ( message , file = sys . stderr )
2023-01-28 15:10:39 +00:00
stash_data_issue (
2023-01-28 21:00:38 +00:00
parser = " survex " , message = message , url = None , sb = ( survexblock . survexfile . path )
2023-01-19 21:18:42 +00:00
)
2022-07-23 17:26:47 +01:00
return False
2023-01-19 21:18:42 +00:00
if ls [ i ] in [ " bearing " , " compass " ] :
datastar [ " compass " ] = i - 1
if ls [ i ] in [ " clino " , " gradient " ] :
datastar [ " clino " ] = i - 1
if ls [ i ] in [ " tape " , " length " ] :
datastar [ " tape " ] = i - 1
2020-07-03 17:22:15 +01:00
self . datastar = copy . deepcopy ( datastar )
2020-07-02 16:25:51 +01:00
return
2022-10-05 19:11:18 +01:00
elif ls [ 0 ] == " passage " or ls [ 0 ] == " nosurvey " or ls [ 0 ] == " diving " or ls [ 0 ] == " cylpolar " :
2023-01-19 21:18:42 +00:00
# message = " ! - *data {} blocks ignored. {}|{}" '{}' .format(ls[0].upper(), survexblock.name, survexblock.survexpath, args)
2020-07-04 01:10:17 +01:00
# print(message)
2023-01-19 21:18:42 +00:00
# print(message,file=sys.stderr)
2023-01-28 15:10:39 +00:00
# stash_data_issue(parser='survex', message=message)
2022-10-05 19:11:18 +01:00
self . datastar [ " type " ] = ls [ 0 ]
2023-01-19 21:18:42 +00:00
elif ls [ 0 ] == " cartesian " : # We should not ignore this ?! Default for Germans ?
# message = " ! - *data {} blocks ignored. {}|{}" '{}' .format(ls[0].upper(), survexblock.name, survexblock.survexpath, args)
2022-10-05 19:11:18 +01:00
# print(message)
2023-01-19 21:18:42 +00:00
# print(message,file=sys.stderr)
2023-01-28 15:10:39 +00:00
# stash_data_issue(parser='survex', message=message)
2020-07-03 17:22:15 +01:00
self . datastar [ " type " ] = ls [ 0 ]
2020-06-27 17:55:59 +01:00
else :
2022-11-23 10:41:14 +00:00
message = f " ! - Unrecognised *data statement ' { args } ' { survexblock . name } | { survexblock . survexpath } "
2020-07-02 16:25:51 +01:00
print ( message )
2023-01-19 21:18:42 +00:00
print ( message , file = sys . stderr )
2023-01-28 15:10:39 +00:00
stash_data_issue (
2023-01-28 21:00:38 +00:00
parser = " survex " , message = message , url = None , sb = ( survexblock . survexfile . path )
2023-01-19 21:18:42 +00:00
)
2022-10-05 19:11:18 +01:00
self . datastar [ " type " ] = ls [ 0 ]
2020-06-27 17:55:59 +01:00
2020-07-03 14:53:36 +01:00
def LoadSurvexFlags ( self , args ) :
# Valid flags are DUPLICATE, SPLAY, and SURFACE, and a flag may be preceded with NOT to turn it off.
# Default values are NOT any of them
2020-07-03 17:22:15 +01:00
self . flagsstar = copy . deepcopy ( self . flagsdefault )
2020-07-03 14:53:36 +01:00
flags = [ ]
2023-01-19 21:18:42 +00:00
args = self . rx_flagsnot . sub ( " not " , args )
2020-07-03 14:53:36 +01:00
argslist = args . split ( )
for s in argslist :
flags . append ( s )
2020-07-06 21:46:19 +01:00
if debugprint :
2023-01-19 21:18:42 +00:00
print (
f " ^ flagslist: { flags } " ,
)
2020-07-03 14:53:36 +01:00
if " duplicate " in flags :
2020-07-03 17:22:15 +01:00
self . flagsstar [ " duplicate " ] = True
2020-07-03 14:53:36 +01:00
if " surface " in flags :
2020-07-03 17:22:15 +01:00
self . flagsstar [ " surface " ] = True
2020-07-03 14:53:36 +01:00
if " splay " in flags :
2020-07-03 17:22:15 +01:00
self . flagsstar [ " splay " ] = True
2020-07-03 14:53:36 +01:00
if " notduplicate " in flags :
2020-07-03 17:22:15 +01:00
self . flagsstar [ " duplicate " ] = False
2020-07-03 14:53:36 +01:00
if " notsurface " in flags :
2020-07-03 17:22:15 +01:00
self . flagsstar [ " surface " ] = False
2020-07-03 14:53:36 +01:00
if " notsplay " in flags :
2020-07-03 17:22:15 +01:00
self . flagsstar [ " splay " ] = False
2020-07-03 14:53:36 +01:00
2020-07-03 17:22:15 +01:00
# if self.flagsstar["duplicate"] == True or self.flagsstar["surface"] == True or self.flagsstar["splay"] == True:
2020-07-03 14:53:36 +01:00
# actually we do want to count duplicates as this is for "effort expended in surveying underground"
2023-01-19 21:34:09 +00:00
if self . flagsstar [ " surface " ] is True or self . flagsstar [ " splay " ] is True :
2020-07-04 01:10:17 +01:00
self . flagsstar [ " skiplegs " ] = True
2020-07-06 21:46:19 +01:00
if debugprint :
2023-01-19 21:18:42 +00:00
print (
f " $ flagslist: { flags } " ,
)
2020-06-27 17:55:59 +01:00
2020-06-29 21:16:13 +01:00
def GetSurvexDirectory ( self , headpath ) :
2020-07-01 17:41:09 +01:00
""" This creates a SurvexDirectory if it has not been seen before, and on creation
it sets the primarysurvexfile . This is correct as it should be set on the first file
in the directory , where first is defined by the * include ordering . Which is what we
are doing .
2023-09-05 19:46:10 +01:00
2020-07-01 17:41:09 +01:00
"""
2023-09-06 19:38:45 +01:00
# all = SurvexDirectory.objects.all()
# if not all:
# sd0 = SurvexDirectory(path=headpath, primarysurvexfile=self.currentsurvexfiley)
# sd0 = SurvexDirectory.objects.filter(id=1)[0]
2020-06-29 21:16:13 +01:00
if not headpath :
2023-09-06 15:19:20 +01:00
# This is normal for .svx file in the root of the :loser: repo
# message = f" ! GetSurvexDirectory NO headpath given at {self.currentsurvexfile}"
# print("\n"+message,file=sys.stderr)
# stash_data_issue(parser="survex", message=message, url=f"/survexfile/{self.currentsurvexfile}")
2023-09-06 19:38:45 +01:00
return self . currentsurvexfile
2023-09-06 15:19:20 +01:00
2023-09-06 19:38:45 +01:00
if headpath . lower ( ) not in self . svxprim :
2023-09-06 15:19:20 +01:00
primary = self . currentsurvexfile
2023-09-06 19:38:45 +01:00
# self.svxdirs[headpath.lower()] = sd0 #SurvexDirectory(path=headpath, primarysurvexfile=primary) # NOT .lower()
# self.svxdirs[headpath.lower()].save()
self . svxprim [ headpath . lower ( ) ] = primary
#self.survexdict[self.svxdirs[headpath.lower()]] = [] # list of the files in the directory
return self . svxprim [ headpath . lower ( ) ]
2020-06-29 21:16:13 +01:00
2022-07-23 17:26:47 +01:00
def ReportNonCaveIncludes ( self , headpath , includelabel , depth ) :
2023-01-19 21:18:42 +00:00
""" Ignore surface, kataser and gpx *include survex files """
2022-07-28 16:36:57 +01:00
if not self . pending :
self . pending = set ( )
fpending = Path ( settings . CAVEDESCRIPTIONS , " pendingcaves.txt " )
if fpending . is_file ( ) :
with open ( fpending , " r " ) as fo :
cids = fo . readlines ( )
for cid in cids :
2023-01-19 21:18:42 +00:00
id = cid . strip ( ) . rstrip ( " \n " ) . upper ( )
2022-09-25 22:18:41 +01:00
if cid . startswith ( " 162 " ) :
self . pending . add ( id )
else :
self . pending . add ( " 1623- " + id )
2022-07-28 16:36:57 +01:00
2020-06-30 15:39:24 +01:00
if headpath in self . ignorenoncave :
2022-07-28 13:15:11 +01:00
message = f " - { headpath } is <ignorenoncave> (while creating ' { includelabel } ' sfile & sdirectory) "
2023-01-19 21:18:42 +00:00
# print("\n"+message)
# print("\n"+message,file=sys.stderr)
2020-06-30 15:39:24 +01:00
return
for i in self . ignoreprefix :
if headpath . startswith ( i ) :
2023-01-19 21:18:42 +00:00
message = (
f " - { headpath } starts with <ignoreprefix> (while creating ' { includelabel } ' sfile & sdirectory) "
)
2022-07-28 13:15:11 +01:00
# print("\n"+message)
# print("\n"+message,file=sys.stderr)
2020-06-30 15:39:24 +01:00
return
2023-01-19 21:18:42 +00:00
caveid = f " { headpath [ 6 : 10 ] } - { headpath [ 11 : ] } " . upper ( )
2022-07-28 16:36:57 +01:00
if caveid in self . pending :
2023-01-19 21:18:42 +00:00
# Yes we didn't find this cave, but we know it is a pending one. So not an error.
# print(f'! ALREADY PENDING {caveid}',file=sys.stderr)
return
2022-10-05 21:18:11 +01:00
id = caveid [ 5 : ]
if id in self . pending :
2023-01-19 21:18:42 +00:00
print ( f " ! ALREADY PENDING { id } " , file = sys . stderr )
return
2023-03-24 00:54:26 +00:00
# It is too late to add it to the pending caves list here, they were already
# processed in parsers/caves.py So we have to do a bespoke creation.
cave = create_new_cave ( includelabel )
2023-08-04 23:26:50 +01:00
message = f " ! Warning: cave identifier ' { caveid } ' or { id } (guessed from file path) is not a known cave. Need to add to expoweb/cave_data/pendingcaves.txt ? In ' { includelabel } .svx ' at depth:[ { len ( depth ) } ]. "
2023-01-19 21:18:42 +00:00
print ( " \n " + message )
print ( " \n " + message , file = sys . stderr )
print ( f " { self . pending } " , end = " " , file = sys . stderr )
2023-01-28 21:00:38 +00:00
stash_data_issue ( parser = " survex " , message = message , url = None , sb = ( includelabel ) )
2023-03-24 00:54:26 +00:00
# It is too late to add it to pending caves here, they were already processed in parsers/caves.py
# and something else is creating them...
# cave = create_new_cave(includelabel)
2020-07-01 22:49:38 +01:00
def LoadSurvexFile ( self , svxid ) :
2020-06-28 14:42:26 +01:00
""" Creates SurvexFile in the database, and SurvexDirectory if needed
2020-07-01 22:49:38 +01:00
Creates a new current survexfile and valid . survexdirectory
2023-02-28 16:18:29 +00:00
Inspects the parent folder of the survexfile and uses that to decide if this is
a cave we know .
2023-02-28 18:52:04 +00:00
If we see a duplicate cave , this is TOO LATE . It has already been included into the
long linear file . We prevent duplication when the long linear file is created , so
if we see a duplicate here , it is a serious error .
2023-02-28 16:18:29 +00:00
2020-06-28 14:42:26 +01:00
The survexblock passed - in is not necessarily the parent . FIX THIS .
2020-06-27 17:55:59 +01:00
"""
2023-02-28 16:18:29 +00:00
global dup_includes
2020-07-06 21:46:19 +01:00
if debugprint :
2022-11-23 10:41:14 +00:00
print ( f " # datastack in LoadSurvexFile: { svxid } ' type ' : " , end = " " )
2020-07-06 21:46:19 +01:00
for dict in self . datastack :
2022-11-23 10:41:14 +00:00
print ( f " ' { dict [ ' type ' ] . upper ( ) } ' " , end = " " )
2020-07-06 21:46:19 +01:00
print ( " " )
2020-07-03 17:22:15 +01:00
2020-06-28 01:50:34 +01:00
depth = " " * self . depthbegin
2023-02-28 16:18:29 +00:00
print ( " {:2} {} - NEW survexfile: ' {} ' " . format ( self . depthbegin , depth , svxid ) )
2020-07-01 22:49:38 +01:00
headpath = os . path . dirname ( svxid )
2020-06-27 17:55:59 +01:00
2023-02-28 16:18:29 +00:00
newfile , created = SurvexFile . objects . update_or_create ( path = svxid )
if not created :
dup_includes + = 1
2023-02-28 18:52:04 +00:00
message = f " ! DUPLICATE SurvexFile ' { svxid } ' create attempt in LoadSurvexFile() "
2023-02-28 16:18:29 +00:00
print ( message )
# print(message, file=sys.stderr)
stash_data_issue ( parser = " survex " , message = message , url = f " /survexfile/ { svxid } " )
self . currentsurvexfile = newfile
return # abort as everything already done for object creation
2023-01-19 21:18:42 +00:00
newfile . save ( ) # until we do this there is no internal id so no foreign key works
self . currentsurvexfile = newfile
2023-09-06 19:38:45 +01:00
primary = self . GetSurvexDirectory ( headpath )
#newdirectory.save()
#newfile.survexdirectory = newdirectory
#self.survexdict[newdirectory].append(newfile)
2023-09-06 15:19:20 +01:00
newfile . primary = primary
2020-07-01 17:41:09 +01:00
2023-09-06 19:38:45 +01:00
# if not newdirectory:
# message = f" ! 'None' SurvexDirectory returned from GetSurvexDirectory({headpath})"
# print(message)
# print(message, file=sys.stderr)
# stash_data_issue(parser="survex", message=message, url=f"/survexfile/{svxid}")
2020-06-29 21:16:13 +01:00
2023-09-06 15:19:20 +01:00
# REPLACE all this IdentifyCave() stuff with GCaveLookup ?
2023-03-24 00:54:26 +00:00
cave = IdentifyCave ( headpath ) # cave already exists in db
if not cave :
# probably a surface survey, or a cave in a new area
# e.g. 1624 not previously managed, and not in the pending list
self . ReportNonCaveIncludes ( headpath , svxid , depth )
#try again
cave = IdentifyCave ( headpath )
2020-06-28 01:50:34 +01:00
if cave :
2023-01-19 21:18:42 +00:00
newfile . cave = cave
2022-07-28 13:15:11 +01:00
# print(f"\n - New directory '{newdirectory}' for cave '{cave}'",file=sys.stderr)
2023-03-24 00:54:26 +00:00
2023-09-06 19:38:45 +01:00
# if not newfile.survexdirectory:
# message = f" ! .survexdirectory NOT SET in new SurvexFile {svxid} "
# print(message)
# print(message, file=sys.stderr)
# stash_data_issue(parser="survex", message=message)
2023-09-06 15:19:20 +01:00
if not newfile . primary :
message = f " ! .primary NOT SET in new SurvexFile { svxid } "
2020-06-30 15:39:24 +01:00
print ( message )
2023-01-19 21:18:42 +00:00
print ( message , file = sys . stderr )
2023-01-28 15:10:39 +00:00
stash_data_issue ( parser = " survex " , message = message )
2023-01-19 21:18:42 +00:00
self . currentsurvexfile . save ( ) # django insists on this although it is already saved !?
2023-09-06 19:38:45 +01:00
# try:
# newdirectory.save()
# except:
# print(newdirectory, file=sys.stderr)
# print(newdirectory.primarysurvexfile, file=sys.stderr)
# raise
2023-01-19 21:18:42 +00:00
2023-02-28 16:18:29 +00:00
2020-06-28 14:42:26 +01:00
def ProcessIncludeLine ( self , included ) :
2023-02-28 16:18:29 +00:00
""" As we read the long linear file, we come across lines telling us that the
content from this point on is from a particular included file
"""
2020-07-06 21:46:19 +01:00
global debugprint
2020-06-28 01:50:34 +01:00
svxid = included . groups ( ) [ 0 ]
2020-07-06 21:46:19 +01:00
if svxid . lower ( ) == debugprinttrigger . lower ( ) :
debugprint = True
2020-06-28 14:42:26 +01:00
self . LoadSurvexFile ( svxid )
self . stacksvxfiles . append ( self . currentsurvexfile )
def ProcessEdulcniLine ( self , edulcni ) :
2023-02-28 16:18:29 +00:00
""" As we read the long linear file, we come across lines telling us that the
we are about to pop back out of the contents of an included file
Saves the current survexfile object in the db to include the data parsed from it """
2020-07-06 21:46:19 +01:00
global debugprint
2020-06-28 01:50:34 +01:00
svxid = edulcni . groups ( ) [ 0 ]
2020-07-06 21:46:19 +01:00
if debugprint :
depth = " " * self . depthbegin
2022-11-23 10:41:14 +00:00
print ( f " { self . depthbegin : 2 } { depth } - Edulcni survexfile: ' { svxid } ' " )
2020-07-06 21:46:19 +01:00
if svxid . lower ( ) == debugprinttrigger . lower ( ) :
debugprint = False
2020-06-28 14:42:26 +01:00
self . currentsurvexfile . save ( )
self . currentsurvexfile = self . stacksvxfiles . pop ( )
2023-03-14 03:27:05 +00:00
2023-03-13 19:01:30 +00:00
def TickSurvexQM ( self , survexblock , qmtick ) :
""" Interpret the specially formatted comment which is a QM TICKED statement """
# Now we need to find the correct QM object. It will be in the same block and have the same number.
try :
2023-03-14 03:27:05 +00:00
# could try to search on blockname instead?
# but the QMn TICK has to be in the same block anyway
2023-03-13 19:01:30 +00:00
qm = QM . objects . filter ( block = survexblock , number = int ( qmtick . group ( 1 ) ) )
except :
# raise
2023-03-14 03:27:05 +00:00
message = f ' ! QM TICK find FAIL QM { qmtick . group ( 1 ) } date: " { qmtick . group ( 2 ) } " qmlist: " { qm } " in " { survexblock . survexfile . path } " + completion_description: " { qmtick . group ( 3 ) } " '
2023-03-13 19:01:30 +00:00
print ( message )
stash_data_issue (
parser = " survex " , message = message , url = None , sb = ( survexblock . survexfile . path )
)
if len ( qm ) > 1 :
2023-03-14 03:27:05 +00:00
message = f ' ! QM TICK MULTIPLE found FAIL QM { qmtick . group ( 1 ) } date: " { qmtick . group ( 2 ) } " in " { survexblock . survexfile . path } " + completion_description: " { qmtick . group ( 3 ) } " '
2023-03-13 19:01:30 +00:00
print ( message )
stash_data_issue (
parser = " survex " , message = message , url = None , sb = ( survexblock . survexfile . path )
)
qm [ 0 ] . ticked = True
2023-03-14 03:27:05 +00:00
# qm[0].ticked_date = qmtick.group(2) # not in data model yet
qm [ 0 ] . completion_description = qmtick . group ( 3 )
2023-03-13 19:01:30 +00:00
qm [ 0 ] . save ( )
def LoadSurvexQM ( self , survexblock , qmline ) :
""" Interpret the specially formatted comment which is a QM definition """
2023-03-14 03:27:05 +00:00
# r"(?i)^\s*QM(\d+)\s+(.+)\s+([\w\-\_]+)\.([\w\.\-]+)\s+(([\w\-]+)\.([\w\.\-]+)|\-)\s+(.+)$"
# r"(?i)^\s*QM(\d+)\s+([^\s]+)\s+([^\s]+)\s+([^\s]+)\s+(.+)$"
# rx_qm_tick QMnn TICK date comment
# (r"(?i)^\s*QM(\d+)\s+TICK\s([\d\-]+)\s(.*)$")
2023-03-13 19:01:30 +00:00
insp = self . insp
2023-03-14 03:27:05 +00:00
# create a short, hopefully-unique name for this block to be used in the QM id
2023-03-17 20:01:52 +00:00
if len ( survexblock . name ) < 7 :
blockname = survexblock . name
else :
blockname = survexblock . name [ : 6 ] + survexblock . name [ - 1 : ]
2023-03-18 00:57:40 +00:00
# logslug = f'D{int(qmyear)}_{blockname}_{int(qm_no):03d}'
2023-03-16 21:06:52 +00:00
qm_ticked = False # default
2023-03-13 19:01:30 +00:00
qm_no = qmline . group ( 1 ) # this is NOT unique across multiple survex files
2023-03-14 03:27:05 +00:00
qm_grade = qmline . group ( 2 ) . strip ( ) . upper ( ) # TICK or [a-dA-DvVxX?]
if qm_grade == " TICK " :
self . TickSurvexQM ( survexblock , qmline )
return
2023-03-13 19:01:30 +00:00
2023-03-17 20:01:52 +00:00
if qm_grade not in [ " A " , " B " , " C " , " D " , " X " ] : # "V", "?" not allowed in survex file QMs
2023-03-14 03:27:05 +00:00
message = f " ! QM { qm_no } INVALID code ' { qm_grade } ' [ { blockname } ] ' { survexblock . survexfile . path } ' "
2023-03-13 19:01:30 +00:00
print ( insp + message )
stash_data_issue (
parser = " survex " , message = message , url = None , sb = ( survexblock . survexfile . path )
)
2023-03-14 03:27:05 +00:00
qm_nearest = qmline . group ( 3 )
# if qmline.group(3): # usual closest survey station
# qm_nearest = qmline.group(3)
# if qmline.group(4):
# qm_nearest = qm_nearest + "." + qmline.group(4)
2023-03-16 21:06:52 +00:00
resolution_station_name = qmline . group ( 4 )
if ( resolution_station_name == " - " ) :
pass
else :
qm_ticked = True
2023-03-17 20:01:52 +00:00
# print(f"{survexblock.survexfile.cave} {survexblock}:{qm_no}{qm_grade} {qmline.group(4)}", file=sys.stderr)
2023-03-18 00:57:40 +00:00
2023-03-14 03:27:05 +00:00
qm_notes = qmline . group ( 5 )
# qm_notes = qmline.group(8)
2023-03-13 19:01:30 +00:00
# Spec of QM in SVX files:
# ;Serial number grade(A/B/C/D/V/X) nearest-station resolution-station description
# ;QM1 a hobnob_hallway_2.42 hobnob-hallway_3.42 junction of keyhole passage
# ;QM1 a hobnob_hallway_2.42 - junction of keyhole passage
#;QM1 A B6 - see plan drawing there is definitely a QM
# NB none of the SurveyStations are in the DB now, so if we want to link to aSurvexStation
# we would have to create one. But that is not obligatory and no QMs loaded from CSVs have one
# Older troggle/CSV assumes a logbook entry 'found_by' for each QM, with a date.
# We don't need this anymore so we don't need to create a placeholder logbook entry.
2023-03-14 03:27:05 +00:00
2023-03-13 19:01:30 +00:00
if survexblock . survexfile . cave :
survexblock . survexfile . cave . slug ( )
2023-03-13 20:27:27 +00:00
self . fix_undated ( survexblock ) # null-op if already set
2023-07-10 09:33:57 +01:00
try :
expoyear = str ( survexblock . date . year )
2023-07-10 11:49:14 +01:00
except :
print ( f " >> why is survexblock not set ?! in LoadSurvexQM()/n { survexblock . survexfile . path } " )
2023-07-10 09:33:57 +01:00
expoyear = " 1970 "
2023-03-13 20:27:27 +00:00
2023-03-13 19:01:30 +00:00
try :
qm = QM . objects . create (
number = qm_no ,
# nearest_station=a_survex_station_object, # can be null
2023-03-16 21:06:52 +00:00
resolution_station_name = resolution_station_name ,
2023-03-13 19:01:30 +00:00
nearest_station_name = qm_nearest ,
2023-03-16 21:06:52 +00:00
ticked = qm_ticked ,
2023-03-13 19:01:30 +00:00
grade = qm_grade . upper ( ) ,
location_description = qm_notes ,
block = survexblock , # only set for survex-imported QMs
blockname = blockname , # only set for survex-imported QMs
expoyear = expoyear ,
cave = survexblock . survexfile . cave ,
)
qm . save
except :
qms = QM . objects . filter (
number = qm_no ,
# nearest_station=a_survex_station_object, # can be null
2023-03-16 21:06:52 +00:00
resolution_station_name = resolution_station_name ,
2023-03-13 19:01:30 +00:00
nearest_station_name = qm_nearest ,
2023-03-16 21:06:52 +00:00
ticked = qm_ticked ,
2023-03-13 19:01:30 +00:00
grade = qm_grade . upper ( ) ,
location_description = qm_notes ,
block = survexblock , # only set for survex-imported QMs
blockname = blockname , # only set for survex-imported QMs
expoyear = expoyear ,
cave = survexblock . survexfile . cave ,
)
2023-03-13 20:27:27 +00:00
message = f " ! QM { qm_no } FAIL to create { qm_nearest } in ' { survexblock . survexfile . path } ' found { len ( qms ) } : { qms } "
2023-03-13 19:01:30 +00:00
print ( insp + message )
stash_data_issue (
parser = " survex " , message = message , url = None , sb = ( survexblock . survexfile . path )
)
2020-06-24 14:49:39 +01:00
2023-03-13 16:31:42 +00:00
def ProcessQM ( self , survexblock , qml , comment ) :
""" Process the line beginning
; QM
which is a QM new declaration or a QM TICK closing declaration .
It _should_ recognise a non - numeric survey station ID , but currently doesn ' t.
Valid QM types are [ a - dA - DvVxX ? ] A - D , V for Vertical , X for horrible and ? for unknown
"""
# rx_qm : r"(?i)^\s*QM(\d+)\s+?(.+)\s+([\w\-\_]+)(\.([\w\.\-]+)?)\s+(([\w\-]+)\.([\w\.\-]+)|\-)\s+(.+)$)
qmline = self . rx_qm . match ( comment )
if qmline :
self . LoadSurvexQM ( survexblock , qmline )
else :
2023-03-14 03:27:05 +00:00
message = f ' ! QM Unrecognised as valid in " { survexblock . survexfile . path } " QM { qml . group ( 1 ) } " { qml . group ( 2 ) } " : regex failure typo? '
print ( message )
stash_data_issue (
parser = " survex " , message = message , url = None , sb = ( survexblock . survexfile . path )
)
2023-03-13 16:31:42 +00:00
2020-06-24 22:46:18 +01:00
def LoadSurvexComment ( self , survexblock , comment ) :
2020-07-05 17:22:26 +01:00
# ignore all comments except ;ref, ; wallet and ;QM and ;*include (for collated survex file)
2021-12-19 14:24:20 +00:00
# rx_ref2 = re.compile(r'(?i)\s*ref[.;]?')
2023-01-19 21:18:42 +00:00
2023-03-13 16:31:42 +00:00
# This _should_ also check that the QM survey point exists in the block
2023-01-29 01:30:10 +00:00
depth = " " * self . depthbegin
2021-12-19 14:24:20 +00:00
2020-07-05 17:22:26 +01:00
refline = self . rx_commref . match ( comment )
2020-06-24 22:46:18 +01:00
if refline :
2023-01-19 21:18:42 +00:00
comment = self . rx_ref2 . sub ( " " , comment . strip ( ) )
2023-01-29 01:30:10 +00:00
print ( f " { self . depthbegin : 2 } { depth } - rx_ref2 -- { comment =} in { survexblock . survexfile . path } :: { survexblock } " )
2020-07-05 17:22:26 +01:00
self . LoadSurvexRef ( survexblock , comment )
2023-01-19 21:18:42 +00:00
2022-09-18 21:53:04 +01:00
# handle
# ; Messteam: Jörg Haussmann, Robert Eckardt, Thilo Müller
# ; Zeichner: Thilo Müller
# But none of these will be valid teammembers because they are not actually on our expo
2023-01-19 21:18:42 +00:00
2022-09-18 21:53:04 +01:00
team = self . rx_commteam . match ( comment )
2023-01-19 21:18:42 +00:00
if team :
2022-09-19 18:55:34 +01:00
# print(f'rx_commteam -- {comment=} in {survexblock.survexfile.path} :: {survexblock}')
2022-09-18 21:53:04 +01:00
pass
2020-06-24 14:10:13 +01:00
2023-03-13 16:31:42 +00:00
# rx_qm0 = re.compile(r"(?i)^\s*QM(\d+)\s+(.+)$")
2022-07-08 18:08:42 +01:00
qml = self . rx_qm0 . match ( comment )
if qml :
2023-03-13 16:31:42 +00:00
self . ProcessQM ( survexblock , qml , comment )
2022-07-08 18:08:42 +01:00
2020-06-27 17:55:59 +01:00
included = self . rx_comminc . match ( comment )
2023-02-28 16:18:29 +00:00
# ;|*include means 'we have been included'; whereas *include means 'proceed to include'
# No test here to check that this file has not already been included. Ouch.
2020-06-27 17:55:59 +01:00
if included :
2020-06-28 14:42:26 +01:00
self . ProcessIncludeLine ( included )
2020-06-27 17:55:59 +01:00
edulcni = self . rx_commcni . match ( comment )
2020-06-28 01:50:34 +01:00
# ;*edulcni means we are returning from an included file
2020-06-27 17:55:59 +01:00
if edulcni :
2020-06-28 14:42:26 +01:00
self . ProcessEdulcniLine ( edulcni )
2023-03-23 19:05:25 +00:00
def get_cave ( self , path ) :
""" Read the file path to a survex file and guesses the cave
"""
path_match = re . search ( r " caves-( \ d \ d \ d \ d)/( \ d+| \ d \ d \ d \ d-? \ w+- \ d+)/ " , path )
2020-06-24 14:10:13 +01:00
if path_match :
2023-01-19 21:18:42 +00:00
pos_cave = f " { path_match . group ( 1 ) } - { path_match . group ( 2 ) } "
2021-04-17 01:41:06 +01:00
cave = getCaveByReference ( pos_cave )
2023-03-23 19:05:25 +00:00
return cave
return None
2020-06-24 22:46:18 +01:00
2022-10-07 08:57:30 +01:00
def LinearLoad ( self , survexblock , path , collatefilename ) :
2020-06-27 17:55:59 +01:00
""" Loads a single survex file. Usually used to import all the survex files which have been collated
2023-03-06 16:37:54 +00:00
into a single file : either the tree or the _unseens .
Also used for loading a single file which has been edited online .
Loads the begin / end blocks using a stack for labels .
2022-10-07 21:48:41 +01:00
Uses the python generator idiom to avoid loading the whole file ( 21 MB ) into memory .
2020-06-27 17:55:59 +01:00
"""
2020-07-04 01:10:17 +01:00
blkid = None
pathlist = None
args = None
oldflags = None
blockcount = 0
self . lineno = 0
slengthtotal = 0.0
nlegstotal = 0
2020-06-27 17:55:59 +01:00
self . relativefilename = path
2023-03-23 19:05:25 +00:00
IdentifyCave ( path ) # this will produce null for survex files which are geographic collections
2023-01-19 21:18:42 +00:00
2020-06-28 14:42:26 +01:00
self . currentsurvexfile = survexblock . survexfile
2023-01-19 21:18:42 +00:00
self . currentsurvexfile . save ( ) # django insists on this although it is already saved !?
2020-07-03 14:53:36 +01:00
2020-07-03 17:22:15 +01:00
self . datastar = copy . deepcopy ( self . datastardefault )
self . flagsstar = copy . deepcopy ( self . flagsdefault )
2020-07-03 18:08:59 +01:00
2020-06-28 14:42:26 +01:00
def tickle ( ) :
nonlocal blockcount
2023-01-19 21:18:42 +00:00
blockcount + = 1
2023-07-24 12:24:39 +01:00
if blockcount % 40 == 0 :
2023-01-19 21:18:42 +00:00
print ( " . " , file = sys . stderr , end = " " )
2023-07-24 12:24:39 +01:00
if blockcount % 1600 == 0 :
2023-01-19 21:18:42 +00:00
print ( " \n " , file = sys . stderr , end = " " )
mem = get_process_memory ( )
print ( f " - MEM: { mem : 7.2f } MB in use " , file = sys . stderr )
print ( " " , file = sys . stderr , end = " " )
2020-07-01 22:49:38 +01:00
sys . stderr . flush ( )
2020-06-28 14:42:26 +01:00
2020-07-03 18:08:59 +01:00
def printbegin ( ) :
nonlocal blkid
nonlocal pathlist
depth = " " * self . depthbegin
2020-07-04 01:10:17 +01:00
self . insp = depth
2020-07-07 01:35:58 +01:00
if debugprint :
2022-11-23 10:41:14 +00:00
print ( f " { self . depthbegin : 2 } { depth } - Begin for : ' { blkid } ' " )
2020-07-03 18:08:59 +01:00
pathlist = " "
for id in self . stackbegin :
if len ( id ) > 0 :
pathlist + = " . " + id
def printend ( ) :
nonlocal args
depth = " " * self . depthbegin
2020-07-07 01:35:58 +01:00
if debugprint :
2022-11-23 10:41:14 +00:00
print ( f " { self . depthbegin : 2 } { depth } - End from: ' { args } ' " )
2023-01-19 21:18:42 +00:00
print (
" {:2} {} - LEGS: {} (n: {} , length: {} units: {} ) " . format (
self . depthbegin , depth , self . slength , self . slength , self . legsnumber , self . units
)
)
2020-07-03 18:08:59 +01:00
def pushblock ( ) :
nonlocal blkid
2020-07-06 21:46:19 +01:00
if debugprint :
2022-11-23 10:41:14 +00:00
print ( f " # datastack at 1 *begin { blkid } ' type ' : " , end = " " )
2020-07-06 21:46:19 +01:00
for dict in self . datastack :
2022-11-23 10:41:14 +00:00
print ( f " ' { dict [ ' type ' ] . upper ( ) } ' " , end = " " )
2020-07-06 21:46:19 +01:00
print ( " " )
2022-11-23 10:41:14 +00:00
print ( f " ' { self . datastar [ ' type ' ] . upper ( ) } ' self.datastar " )
2020-07-03 18:08:59 +01:00
# ------------ * DATA
self . datastack . append ( copy . deepcopy ( self . datastar ) )
# ------------ * DATA
2020-07-06 21:46:19 +01:00
if debugprint :
2022-11-23 10:41:14 +00:00
print ( f " # datastack at 2 *begin { blkid } ' type ' : " , end = " " )
2020-07-06 21:46:19 +01:00
for dict in self . datastack :
2022-11-23 10:41:14 +00:00
print ( f " ' { dict [ ' type ' ] . upper ( ) } ' " , end = " " )
2020-07-06 21:46:19 +01:00
print ( " " )
2022-11-23 10:41:14 +00:00
print ( f " ' { self . datastar [ ' type ' ] . upper ( ) } ' self.datastar " )
2023-01-19 21:18:42 +00:00
2020-07-03 18:08:59 +01:00
# ------------ * FLAGS
self . flagsstack . append ( copy . deepcopy ( self . flagsstar ) )
# ------------ * FLAGS
2020-07-04 13:31:46 +01:00
pass
2020-07-03 18:08:59 +01:00
def popblock ( ) :
nonlocal blkid
nonlocal oldflags
2020-07-06 21:46:19 +01:00
if debugprint :
2022-11-23 10:41:14 +00:00
print ( f " # datastack at *end ' { blkid } ' type ' : " , end = " " )
2020-07-06 21:46:19 +01:00
for dict in self . datastack :
2022-11-23 10:41:14 +00:00
print ( f " ' { dict [ ' type ' ] . upper ( ) } ' " , end = " " )
2020-07-06 21:46:19 +01:00
print ( " " )
2022-11-23 10:41:14 +00:00
print ( f " ' { self . datastar [ ' type ' ] . upper ( ) } ' self.datastar " )
2020-07-03 18:08:59 +01:00
# ------------ * DATA
2023-01-19 21:18:42 +00:00
self . datastar = copy . deepcopy ( self . datastack . pop ( ) )
2020-07-03 18:08:59 +01:00
# ------------ * DATA
2020-07-06 21:46:19 +01:00
if debugprint :
2022-11-23 10:41:14 +00:00
print ( f " # datastack after *end ' { blkid } ' type ' : " , end = " " )
2020-07-06 21:46:19 +01:00
for dict in self . datastack :
2022-11-23 10:41:14 +00:00
print ( f " ' { dict [ ' type ' ] . upper ( ) } ' " , end = " " )
2020-07-06 21:46:19 +01:00
print ( " " )
2022-11-23 10:41:14 +00:00
print ( f " ' { self . datastar [ ' type ' ] . upper ( ) } ' self.datastar " )
2023-01-19 21:18:42 +00:00
2020-07-03 18:08:59 +01:00
# ------------ * FLAGS
2023-01-19 21:18:42 +00:00
self . flagsstar = copy . deepcopy ( self . flagsstack . pop ( ) )
2020-07-03 18:08:59 +01:00
# ------------ * FLAGS
2020-07-06 21:46:19 +01:00
if debugprint :
if oldflags [ " skiplegs " ] != self . flagsstar [ " skiplegs " ] :
2022-11-23 10:41:14 +00:00
print ( f " # POP ' any ' flag now: ' { self . flagsstar [ ' skiplegs ' ] } ' was: { oldflags [ ' skiplegs ' ] } " )
2020-07-03 18:08:59 +01:00
2020-07-04 13:31:46 +01:00
def starstatement ( star ) :
2023-01-19 21:18:42 +00:00
""" Interprets a survex comamnd where * is the first character on the line, e.g. *begin """
2020-07-03 18:08:59 +01:00
nonlocal survexblock
nonlocal blkid
nonlocal pathlist
nonlocal args
nonlocal oldflags
2020-07-04 01:10:17 +01:00
nonlocal slengthtotal
nonlocal nlegstotal
2020-07-03 18:08:59 +01:00
2020-07-04 13:31:46 +01:00
cmd , args = star . groups ( )
2020-07-03 18:08:59 +01:00
cmd = cmd . lower ( )
# ------------------------BEGIN
2020-07-05 17:22:26 +01:00
if self . rx_begin . match ( cmd ) :
2020-07-03 18:08:59 +01:00
blkid = args . lower ( )
# PUSH state ++++++++++++++
2023-01-29 01:30:10 +00:00
self . depthbegin + = 1
2020-07-03 18:08:59 +01:00
self . stackbegin . append ( blkid )
2020-07-08 00:00:56 +01:00
self . unitsstack . append ( ( self . units , self . unitsfactor ) )
2020-07-04 01:10:17 +01:00
self . legsnumberstack . append ( self . legsnumber )
self . slengthstack . append ( self . slength )
2023-01-29 01:30:10 +00:00
self . teaminheritstack . append ( self . inheritteam )
self . teamcurrentstack . append ( self . currentteam )
2023-03-13 19:44:39 +00:00
self . dateinheritstack . append ( self . inheritdate )
self . datecurrentstack . append ( self . currentdate )
2020-07-03 18:08:59 +01:00
pushblock ( )
# PUSH state ++++++++++++++
2020-07-04 01:10:17 +01:00
self . legsnumber = 0
self . slength = 0.0
2020-07-07 01:35:58 +01:00
self . units = " metres "
2023-01-29 01:30:10 +00:00
self . inheritteam = self . currentteam
self . currentteam = set ( ) # zero the current team when we start a new block
2023-03-13 19:44:39 +00:00
self . inheritdate = self . currentdate
2023-03-14 02:12:28 +00:00
self . currentdate = None # zero the current date when we start a new block
2020-07-03 18:08:59 +01:00
printbegin ( )
2023-01-19 21:18:42 +00:00
newsurvexblock = SurvexBlock (
name = blkid ,
parent = survexblock ,
survexpath = pathlist ,
survexfile = self . currentsurvexfile ,
legsall = 0 ,
legslength = 0.0 ,
)
2020-07-03 18:08:59 +01:00
newsurvexblock . save ( )
2023-03-23 19:05:25 +00:00
print ( f " SB: # { newsurvexblock . id } ' { newsurvexblock } ' parent: { newsurvexblock . parent } f: { newsurvexblock . survexfile } " )
2023-01-19 21:18:42 +00:00
newsurvexblock . title = (
" ( " + survexblock . title + " ) "
) # copy parent inititally, overwrite if it has its own
2020-07-03 18:08:59 +01:00
survexblock = newsurvexblock
2023-01-19 21:18:42 +00:00
survexblock . save ( ) # django insists on this , but we want to save at the end !
2020-07-03 18:08:59 +01:00
tickle ( )
# ---------------------------END
2020-07-05 17:22:26 +01:00
elif self . rx_end . match ( cmd ) :
2020-07-04 01:10:17 +01:00
survexblock . legsall = self . legsnumber
2020-07-04 13:31:46 +01:00
survexblock . legslength = self . slength
2020-07-03 18:08:59 +01:00
printend ( )
2020-07-04 01:10:17 +01:00
slengthtotal + = self . slength
nlegstotal + = self . legsnumber
2023-01-29 01:30:10 +00:00
2023-03-13 19:44:39 +00:00
self . fix_undated ( survexblock )
2023-01-29 01:30:10 +00:00
self . fix_anonymous ( survexblock )
2020-07-03 18:08:59 +01:00
try :
2023-01-19 21:18:42 +00:00
survexblock . parent . save ( ) # django insists on this although it is already saved !?
2020-07-03 18:08:59 +01:00
except :
print ( survexblock . parent , file = sys . stderr )
raise
try :
2023-01-19 21:18:42 +00:00
survexblock . save ( ) # save to db at end of block
2020-07-03 18:08:59 +01:00
except :
print ( survexblock , file = sys . stderr )
raise
2023-01-31 00:39:30 +00:00
confirm_team_on_trip ( survexblock )
2023-01-19 21:18:42 +00:00
# POP state ++++++++++++++
2020-07-03 18:08:59 +01:00
popblock ( )
2023-01-29 01:30:10 +00:00
self . inheritteam = self . teaminheritstack . pop ( )
self . currentteam = self . teamcurrentstack . pop ( )
2023-03-13 19:44:39 +00:00
self . inheritdate = self . dateinheritstack . pop ( )
self . currentdate = self . datecurrentstack . pop ( )
2020-07-04 01:10:17 +01:00
self . legsnumber = self . legsnumberstack . pop ( )
2020-07-08 00:00:56 +01:00
self . units , self . unitsfactor = self . unitsstack . pop ( )
2020-07-04 01:10:17 +01:00
self . slength = self . slengthstack . pop ( )
2020-07-03 18:08:59 +01:00
blkid = self . stackbegin . pop ( )
self . currentsurvexblock = survexblock . parent
survexblock = survexblock . parent
oldflags = self . flagsstar
self . depthbegin - = 1
2020-07-04 01:10:17 +01:00
# POP state ++++++++++++++
2020-07-03 18:08:59 +01:00
# -----------------------------
2020-07-05 17:22:26 +01:00
elif self . rx_title . match ( cmd ) :
2023-01-28 21:00:38 +00:00
quotedtitle = self . rx_quotedtitle . match ( args )
2020-07-05 17:22:26 +01:00
if quotedtitle :
survexblock . title = quotedtitle . groups ( ) [ 0 ]
else :
2023-01-19 21:18:42 +00:00
survexblock . title = args
2020-07-05 17:22:26 +01:00
elif self . rx_ref . match ( cmd ) :
2020-07-03 18:08:59 +01:00
self . LoadSurvexRef ( survexblock , args )
2020-07-05 17:22:26 +01:00
elif self . rx_flags . match ( cmd ) :
2020-07-03 18:08:59 +01:00
oldflags = self . flagsstar
self . LoadSurvexFlags ( args )
2020-07-06 21:46:19 +01:00
if debugprint :
if oldflags [ " skiplegs " ] != self . flagsstar [ " skiplegs " ] :
2023-01-19 21:18:42 +00:00
print ( f " # CHANGE ' any ' flag now: ' { self . flagsstar [ ' skiplegs ' ] } ' was: { oldflags [ ' skiplegs ' ] } " )
2020-07-03 18:08:59 +01:00
2020-07-05 17:22:26 +01:00
elif self . rx_data . match ( cmd ) :
2022-07-23 17:26:47 +01:00
if self . LoadSurvexDataNormal ( survexblock , args ) :
pass
else :
# Abort, we do not cope with this *data format
return
2020-07-07 01:35:58 +01:00
elif self . rx_alias . match ( cmd ) :
2020-07-04 13:31:46 +01:00
self . LoadSurvexAlias ( survexblock , args )
2020-07-07 01:35:58 +01:00
elif self . rx_entrance . match ( cmd ) :
2020-07-04 13:31:46 +01:00
self . LoadSurvexEntrance ( survexblock , args )
2020-07-07 01:35:58 +01:00
elif self . rx_date . match ( cmd ) :
2020-07-03 18:08:59 +01:00
self . LoadSurvexDate ( survexblock , args )
2020-07-07 01:35:58 +01:00
elif self . rx_units . match ( cmd ) :
self . LoadSurvexUnits ( survexblock , args )
elif self . rx_team . match ( cmd ) :
2020-07-03 18:08:59 +01:00
self . LoadSurvexTeam ( survexblock , args )
2020-07-07 01:35:58 +01:00
elif self . rx_set . match ( cmd ) and self . rx_names . match ( cmd ) :
2020-07-03 18:08:59 +01:00
pass
2020-07-07 01:35:58 +01:00
elif self . rx_include . match ( cmd ) :
2022-11-23 10:41:14 +00:00
message = f " ! -ERROR *include command not expected here { path } . Re-run a full Survex import. "
2020-07-03 18:08:59 +01:00
print ( message )
2023-01-19 21:18:42 +00:00
print ( message , file = sys . stderr )
2023-01-28 15:10:39 +00:00
stash_data_issue (
2023-01-19 21:18:42 +00:00
parser = " survex " ,
message = message ,
)
2020-07-03 18:08:59 +01:00
else :
2020-07-05 17:22:26 +01:00
self . LoadSurvexFallThrough ( survexblock , args , cmd )
2020-07-03 18:08:59 +01:00
2023-01-19 21:18:42 +00:00
# this is a python generator idiom.
2022-10-07 08:57:30 +01:00
# see https://realpython.com/introduction-to-python-generators/
2023-07-31 13:49:54 +01:00
# this is the first use of generators in troggle (Oct.2022) and saves 21 MB of memory
2022-10-07 08:57:30 +01:00
with open ( collatefilename , " r " ) as fcollate :
for svxline in fcollate :
self . lineno + = 1
sline , comment = self . rx_comment . match ( svxline ) . groups ( )
if comment :
2023-02-28 16:18:29 +00:00
# this catches the ;|*include NEWFILE and ;|*edulcni ENDOFFILE lines too
2023-01-19 21:18:42 +00:00
self . LoadSurvexComment ( survexblock , comment )
2023-07-31 13:49:54 +01:00
else :
# detect a merge failure inserted by version control
mfail = self . rx_badmerge . match ( sline )
if mfail :
message = f " \n ! - ERROR version control merge failure \n - ' { sline } ' \n "
message = (
message + f " - line { self . lineno } in { blkid } in { survexblock } \n - NERD++ needed to fix it "
)
print ( message )
print ( message , file = sys . stderr )
stash_data_issue ( parser = " survex " , message = message )
continue # skip this line
2022-10-07 08:57:30 +01:00
if not sline :
2023-01-19 21:18:42 +00:00
continue # skip blank lines
2022-10-07 08:57:30 +01:00
# detect a star command
star = self . rx_star . match ( sline )
2023-01-19 21:18:42 +00:00
if star :
2022-10-07 08:57:30 +01:00
# yes we are reading a *command
starstatement ( star )
2023-01-19 21:18:42 +00:00
else : # not a *cmd so we are reading data OR a ";" rx_comment failed. We hope.
2022-10-07 08:57:30 +01:00
self . LoadSurvexLeg ( survexblock , sline , comment , svxline )
self . legsnumber = nlegstotal
2023-01-19 21:18:42 +00:00
self . slength = slengthtotal
2022-10-07 09:41:46 +01:00
def PushdownStackScan ( self , survexblock , path , finname , flinear , fcollate ) :
""" Follows the *include links in all the survex files from the root file (usually 1623.svx)
2020-06-27 17:55:59 +01:00
and reads only the * include and * begin and * end statements . It produces a linearised
2020-07-05 17:22:26 +01:00
list of the include tree and detects blocks included more than once .
2020-06-27 12:08:02 +01:00
"""
2022-10-05 19:11:18 +01:00
global stop_dup_warning
2020-06-27 12:08:02 +01:00
2022-10-07 09:41:46 +01:00
def process_line ( svxline ) :
2020-06-27 12:08:02 +01:00
self . lineno + = 1
2022-03-03 00:26:04 +00:00
# detect a merge failure inserted by version control
mfail = self . rx_badmerge . match ( svxline )
2023-01-19 21:18:42 +00:00
if mfail :
2022-03-03 00:26:04 +00:00
message = f " \n !! - ERROR version control merge failure \n - ' { svxline } ' \n "
2023-07-31 13:49:54 +01:00
message = message + f " - in ' { path } ' at line { self . lineno } \n "
2023-01-19 21:18:42 +00:00
message = (
message + f " - line { self . lineno } { survexblock } \n - Parsing aborted. NERD++ needed to fix it "
)
2022-03-03 00:26:04 +00:00
print ( message )
2023-01-19 21:18:42 +00:00
print ( message , file = sys . stderr )
2023-01-28 21:00:38 +00:00
stash_data_issue ( parser = " survex " , message = message , url = None , sb = ( path ) )
2023-01-19 21:18:42 +00:00
return # skip this survex file and all things *included in it
2022-03-03 00:26:04 +00:00
2023-01-19 21:18:42 +00:00
includestmt = self . rx_include . match ( svxline )
2020-06-27 17:55:59 +01:00
if not includestmt :
2022-11-23 10:41:14 +00:00
fcollate . write ( f " { svxline . strip ( ) } \n " )
2020-06-27 17:55:59 +01:00
2020-06-27 12:08:02 +01:00
sline , comment = self . rx_comment . match ( svxline . strip ( ) ) . groups ( )
2020-07-04 13:31:46 +01:00
star = self . rx_star . match ( sline )
2023-01-19 21:18:42 +00:00
if star : # yes we are reading a *cmd
2020-07-04 13:31:46 +01:00
cmd , args = star . groups ( )
2020-06-27 12:08:02 +01:00
cmd = cmd . lower ( )
2023-01-28 21:00:38 +00:00
if self . rx_include2 . match ( cmd ) :
# rx_include2 = re.compile("(?i)include$")
# if re.match("(?i)include$", cmd):
2023-02-28 16:18:29 +00:00
includepath = os . path . normpath ( os . path . join ( os . path . split ( path ) [ 0 ] , re . sub ( r " \ .svx$ " , " " , args ) ) ) # normalises path syntax
if self . never_seen ( includepath , path ) :
fullpath = os . path . join ( settings . SURVEX_DATA , includepath + " .svx " )
self . RunSurvexIfNeeded ( os . path . join ( settings . SURVEX_DATA , includepath ) , path )
self . check_unique_name ( os . path . join ( settings . SURVEX_DATA , includepath ) )
if os . path . isfile ( fullpath ) :
# --------------------------------------------------------
self . depthinclude + = 1
# fininclude = open(fullpath,'r')
finincludename = fullpath
fcollate . write ( f " ;|*include { includepath } \n " )
flinear . write ( f " { self . depthinclude : 2 } { indent } *include { includepath } \n " )
push = includepath . lower ( )
self . includestack . append ( push )
# -----------------
self . PushdownStackScan ( survexblock , includepath , finincludename , flinear , fcollate )
# -----------------
pop = self . includestack . pop ( )
if pop != push :
message = " !! ERROR mismatch *include pop!=push {} " . format ( pop , push , self . includestack )
print ( message )
print ( message , file = flinear )
print ( message , file = sys . stderr )
stash_data_issue ( parser = " survex " , message = message , url = None , sb = ( path ) )
flinear . write ( f " { self . depthinclude : 2 } { indent } *edulcni { pop } \n " )
fcollate . write ( f " ;|*edulcni { pop } \n " )
# fininclude.close()
self . depthinclude - = 1
# --------------------------------------------------------
else :
message = f " ! ERROR *include file ' { includepath } ' not found, listed in ' { fin . name } ' "
2020-06-27 17:55:59 +01:00
print ( message )
2023-01-19 21:18:42 +00:00
print ( message , file = sys . stderr )
2023-01-28 21:00:38 +00:00
stash_data_issue ( parser = " survex " , message = message , url = None , sb = ( path ) )
2023-01-28 21:17:16 +00:00
elif self . rx_begin2 . match ( cmd ) :
#elif re.match("(?i)begin$", cmd):
2020-06-27 12:08:02 +01:00
self . depthbegin + = 1
depth = " " * self . depthbegin
if args :
pushargs = args
else :
pushargs = " "
self . stackbegin . append ( pushargs . lower ( ) )
2022-11-23 10:41:14 +00:00
flinear . write ( f " { self . depthbegin : 2 } { depth } *begin { args } \n " )
2020-06-27 12:08:02 +01:00
pass
2023-01-28 21:17:16 +00:00
elif self . rx_end2 . match ( cmd ) :
# elif re.match("(?i)end$", cmd):
2020-06-27 12:08:02 +01:00
depth = " " * self . depthbegin
2022-11-23 10:41:14 +00:00
flinear . write ( f " { self . depthbegin : 2 } { depth } *end { args } \n " )
2020-06-27 12:08:02 +01:00
if not args :
args = " "
popargs = self . stackbegin . pop ( )
if popargs != args . lower ( ) :
2023-01-19 21:18:42 +00:00
message = (
f " !! ERROR mismatch in BEGIN/END labels pop!=push ' { popargs } ' != ' { args } ' \n { self . stackbegin } "
)
2020-06-27 17:55:59 +01:00
print ( message )
2023-01-19 21:18:42 +00:00
print ( message , file = flinear )
print ( message , file = sys . stderr )
2023-01-28 21:00:38 +00:00
stash_data_issue ( parser = " survex " , message = message , url = None , sb = ( path ) )
2020-06-27 12:08:02 +01:00
self . depthbegin - = 1
pass
2023-01-28 21:17:16 +00:00
elif self . rx_title2 . match ( cmd ) :
# elif re.match("(?i)title$", cmd):
2020-06-28 14:42:26 +01:00
depth = " " * self . depthbegin
2022-11-23 10:41:14 +00:00
flinear . write ( f " { self . depthbegin : 2 } { depth } *title { args } \n " )
2020-06-28 14:42:26 +01:00
pass
2020-06-23 23:34:08 +01:00
2022-10-07 09:41:46 +01:00
indent = " " * self . depthinclude
2023-01-19 21:18:42 +00:00
sys . stderr . flush ( )
self . callcount + = 1
if self . callcount % 10 == 0 :
print ( " . " , file = sys . stderr , end = " " )
if self . callcount % 500 == 0 :
print ( " \n " , file = sys . stderr , end = " " )
2022-10-07 09:41:46 +01:00
if path in self . svxfileslist :
# We have already used os.normpath() so this is OK. "/../" and "//" have been simplified already.
if stop_dup_warning :
2023-01-19 21:18:42 +00:00
# print("D",end="", file=sys.stderr)
2022-10-07 09:41:46 +01:00
pass
else :
message = f " * Warning. Duplicate detected. We have already seen this *include ' { path } ' from another survex file. Detected at callcount: { self . callcount } depth: { self . depthinclude } "
print ( message )
2023-01-19 21:18:42 +00:00
print ( message , file = flinear )
# print(message,file=sys.stderr)
2023-01-28 21:00:38 +00:00
stash_data_issue ( parser = " survex " , message = message , url = None , sb = ( path ) )
2022-10-07 09:41:46 +01:00
if self . svxfileslist . count ( path ) > 2 :
2022-11-23 10:41:14 +00:00
message = f " ! ERROR. Should have been caught before this. Survex file already *included 2x. Probably an infinite loop so fix your *include statements that include this. Aborting. { path } "
2022-10-07 09:41:46 +01:00
print ( message )
2023-01-19 21:18:42 +00:00
print ( message , file = flinear )
# print(message,file=sys.stderr)
2023-01-28 21:00:38 +00:00
stash_data_issue ( parser = " survex " , message = message , url = None , sb = ( path ) )
2022-10-07 09:41:46 +01:00
return
return
try :
2022-10-07 21:48:41 +01:00
# python generator idiom again. Not important here as these are small files
2022-10-07 09:41:46 +01:00
with open ( finname , " r " ) as fin :
for svxline in fin :
process_line ( svxline )
2023-01-19 21:18:42 +00:00
2022-10-07 09:41:46 +01:00
self . svxfileslist . append ( path )
2021-11-05 20:59:54 +00:00
2022-10-07 09:41:46 +01:00
except UnicodeDecodeError :
# some bugger put an umlaut in a non-UTF survex file ?!
message = f " ! ERROR *include file ' { path } ' in ' { survexblock } ' has UnicodeDecodeError. Omitted. "
print ( message )
2023-01-19 21:18:42 +00:00
print ( message , file = sys . stderr )
2023-01-28 21:00:38 +00:00
stash_data_issue ( parser = " survex " , message = message , url = None , sb = ( path ) )
2023-01-19 21:18:42 +00:00
return # skip this survex file and all things *included in it
except :
2023-07-31 13:49:54 +01:00
message = f " ! ERROR *include file ' { path } ' in ' { survexblock } ' has unexpected error on opening or reading file. OMITTED! "
2022-10-07 09:41:46 +01:00
print ( message )
2023-01-19 21:18:42 +00:00
print ( message , file = sys . stderr )
2023-01-28 21:00:38 +00:00
stash_data_issue ( parser = " survex " , message = message , url = None , sb = ( path ) )
2023-02-28 16:18:29 +00:00
raise
2023-01-19 21:18:42 +00:00
return # skip this survex file and all things *included in it
2023-02-28 16:18:29 +00:00
def never_seen ( self , incpath , parent ) :
""" The _unseen files may include survex files we have already seen, and we do not
want to process them again . For the _unseens this is not an error , but for the main
* include tree it is an error .
"""
if incpath in self . uniquefile :
self . uniquefile [ incpath ] . append ( parent )
2023-02-28 18:52:04 +00:00
if self . svxpass == self . TREE :
message = (
f " DUP: skipping non-unique survex filepath, ' { incpath } ' - # { len ( self . uniquefile [ incpath ] ) } ' { self . uniquefile [ incpath ] } ' "
)
print ( message )
stash_data_issue ( parser = ' survex ' , message = message )
for p in self . uniquefile [ incpath ] :
if p in self . uniquefile :
print ( f " { p } <- { self . uniquefile [ p ] } " )
2023-02-28 16:18:29 +00:00
return False
else :
self . uniquefile [ incpath ] = [ parent ]
return True
def check_unique_name ( self , fullpath ) :
""" This only checks whether the last bit of the name of the survex file is unique,
e . g . " bigpitch " , not whether the whole path of the survexfile has been seen before .
We don ' t care about this any more.
"""
return
2021-11-05 20:59:54 +00:00
2023-01-19 21:18:42 +00:00
def RunSurvexIfNeeded ( self , fullpath , calledpath ) :
2020-07-02 16:25:51 +01:00
now = time . time ( )
2023-01-19 21:18:42 +00:00
cav_t = now - 365 * 24 * 3600
log_t = now - 365 * 24 * 3600
svx_t = now - 365 * 24 * 3600
2020-07-02 16:25:51 +01:00
def runcavern ( ) :
2023-01-19 21:18:42 +00:00
""" regenerates the .3d file from the .svx if it is older than the svx file, or older than the software,
2022-03-07 16:23:20 +00:00
or randomly using chaosmonkey ( ) just to keep things ticking over .
2023-01-19 21:18:42 +00:00
"""
2023-02-24 17:21:56 +00:00
try :
2023-01-19 21:18:42 +00:00
print (
2023-02-24 17:21:56 +00:00
f " - Regenerating stale (or chaos-monkeyed) cavern .log and .3d for ' { fullpath } ' \n at ' { logpath } ' \n "
)
print (
f " days svx old: { ( svx_t - log_t ) / ( 24 * 3600 ) : .1f } cav: { ( cav_t - log_t ) / ( 24 * 3600 ) : .1f } log old: { ( now - log_t ) / ( 24 * 3600 ) : .1f } "
2023-01-19 21:18:42 +00:00
)
2023-02-24 17:21:56 +00:00
outputdir = Path ( str ( f " { fullpath } .svx " ) ) . parent
sp = subprocess . run (
[ settings . CAVERN , " --log " , f " --output= { outputdir } " , f " { fullpath } .svx " ] ,
capture_output = True ,
check = False ,
text = True ,
)
if sp . returncode != 0 :
message = f " ! Error running { settings . CAVERN } : { fullpath } "
url = f " /survexfile { fullpath } .svx " . replace ( str ( settings . SURVEX_DATA ) , " " )
stash_data_issue ( parser = " xEntrances " , message = message , url = url )
print ( message )
print (
" stderr: \n \n " + str ( sp . stderr ) + " \n \n " + str ( sp . stdout ) + " \n \n return code: " + str ( sp . returncode )
)
self . caverncount + = 1
# should also collect all the .err files too and create a DataIssue for each one which
# - is nonzero in size AND
# - has Error greater than 5% anywhere, or some other more serious error
errpath = Path ( fullpath + " .err " )
if errpath . is_file ( ) :
if errpath . stat ( ) . st_size == 0 :
errpath . unlink ( ) # delete empty closure error file
except :
message = f ' ! FAIL running cavern on survex file " { fullpath } " specified in *include in { calledpath } '
stash_data_issue ( parser = " survex " , message = message )
print ( message )
2020-07-02 16:25:51 +01:00
2022-03-07 16:23:20 +00:00
svxpath = Path ( fullpath + " .svx " )
logpath = Path ( fullpath + " .log " )
2023-01-19 21:34:09 +00:00
Path ( svxpath ) . parent
2020-07-02 16:25:51 +01:00
2023-01-19 21:18:42 +00:00
if not svxpath . is_file ( ) :
2023-03-03 15:15:17 +00:00
message = f ' ! BAD. " { fullpath } " is not a file, specified in *include in { calledpath } '
2023-02-24 17:21:56 +00:00
stash_data_issue ( parser = " survex " , message = message )
2022-07-23 17:26:47 +01:00
print ( message )
return
2023-01-19 21:18:42 +00:00
if not logpath . is_file ( ) : # always run if logfile not there
2020-07-02 16:25:51 +01:00
runcavern ( )
return
2023-01-19 21:18:42 +00:00
self . caverndate = now - 2 * 365 * 24 * 3600
2022-03-07 16:23:20 +00:00
2020-07-02 16:25:51 +01:00
if not self . caverndate :
2023-01-19 21:18:42 +00:00
sp = subprocess . run ( [ " which " , f " { settings . CAVERN } " ] , capture_output = True , check = False , text = True )
2022-03-07 16:23:20 +00:00
if sp . returncode != 0 :
2023-01-19 21:18:42 +00:00
message = f ' ! Error running " which " on { settings . CAVERN } '
2023-02-24 17:21:56 +00:00
stash_data_issue ( parser = " survex " , message = message )
2022-03-07 16:23:20 +00:00
print ( message )
2023-01-19 21:18:42 +00:00
print (
2023-01-19 21:34:09 +00:00
" stderr: \n \n " + str ( sp . stderr ) + " \n \n " + str ( sp . stdout ) + " \n \n return code: " + str ( sp . returncode )
2023-01-19 21:18:42 +00:00
)
2022-03-07 16:23:20 +00:00
self . caverndate = os . path . getmtime ( sp . stdout . strip ( ) )
else :
2023-01-19 21:18:42 +00:00
self . caverndate = now - 2 * 365 * 24 * 3600
2020-07-02 16:25:51 +01:00
cav_t = self . caverndate
log_t = os . path . getmtime ( logpath )
svx_t = os . path . getmtime ( svxpath )
now = time . time ( )
2023-01-19 21:18:42 +00:00
if svx_t - log_t > 0 : # stale, svx file is newer than log
2020-07-02 16:25:51 +01:00
runcavern ( )
return
2023-01-19 21:18:42 +00:00
if now - log_t > 60 * 24 * 60 * 60 : # >60 days, re-run anyway
2020-07-02 16:25:51 +01:00
runcavern ( )
return
2023-01-19 21:18:42 +00:00
if cav_t - log_t > 0 : # new version of cavern
2020-07-02 16:25:51 +01:00
runcavern ( )
return
2023-01-19 21:18:42 +00:00
if chaosmonkey ( 350 ) : # one in every 350 runs
2020-07-02 16:25:51 +01:00
runcavern ( )
2020-06-27 17:55:59 +01:00
2023-01-19 21:18:42 +00:00
2020-06-27 18:00:24 +01:00
def FindAndLoadSurvex ( survexblockroot ) :
2023-02-28 16:18:29 +00:00
""" Follows the *include links successively to find survex files
This proceeds in 3 phases :
1. The root survex file is read and all the * include files are found , using PushdownStackScan ( )
2. All the other survex files in the : loser : repo are found , and their * includes found ,
using another PushdownStackScan ( ) [ duplicates omitted ]
3. The combined expanded file containing all the survex data is parsed as a single file ,
using LinearLoad ( ) """
2022-10-05 19:11:18 +01:00
global stop_dup_warning
2023-01-19 21:18:42 +00:00
print ( " - redirecting stdout to svxblks.log... " )
2020-06-23 23:34:08 +01:00
stdout_orig = sys . stdout
# Redirect sys.stdout to the file
2023-01-19 21:18:42 +00:00
sys . stdout = open ( " svxblks.log " , " w " )
2020-06-24 14:10:13 +01:00
2023-01-19 21:18:42 +00:00
print ( f " - Scanning Survex Blocks tree from { settings . SURVEX_TOPNAME } .svx ... " , file = sys . stderr )
survexfileroot = survexblockroot . survexfile # i.e. SURVEX_TOPNAME only
2020-06-27 17:55:59 +01:00
collatefilename = " _ " + survexfileroot . path + " .svx "
2020-06-27 18:00:24 +01:00
svx_scan = LoadingSurvex ( )
2020-06-27 17:55:59 +01:00
svx_scan . callcount = 0
svx_scan . depthinclude = 0
2020-07-02 16:25:51 +01:00
fullpathtotop = os . path . join ( survexfileroot . survexdirectory . path , survexfileroot . path )
2023-01-19 21:18:42 +00:00
2022-11-23 10:41:14 +00:00
print ( f " - RunSurvexIfNeeded cavern on ' { fullpathtotop } ' " , file = sys . stderr )
2022-10-05 19:11:18 +01:00
svx_scan . RunSurvexIfNeeded ( fullpathtotop , fullpathtotop )
2023-02-28 16:18:29 +00:00
svx_scan . check_unique_name ( fullpathtotop )
svx_scan . uniquefile [ str ( survexfileroot ) ] = [ " 0 " ]
2023-01-19 21:18:42 +00:00
indent = " "
fcollate = open ( collatefilename , " w " )
2020-06-27 17:55:59 +01:00
2021-04-12 23:58:48 +01:00
mem0 = get_process_memory ( )
2023-02-28 16:18:29 +00:00
print ( f " - MEM: { mem0 : 7.2f } MB START ' { survexfileroot } ' " , file = sys . stderr )
2023-01-19 21:18:42 +00:00
flinear = open ( " svxlinear.log " , " w " )
2023-02-28 16:18:29 +00:00
flinear . write ( f " - MEM: { mem0 : 7.2f } MB START ' { survexfileroot . path } ' \n " )
2023-01-19 21:18:42 +00:00
print ( " " , file = sys . stderr , end = " " )
2020-06-27 00:50:40 +01:00
2022-10-07 09:41:46 +01:00
finrootname = Path ( settings . SURVEX_DATA , survexfileroot . path + " .svx " )
2022-11-23 10:41:14 +00:00
fcollate . write ( f " ;*include { survexfileroot . path } \n " )
flinear . write ( f " { svx_scan . depthinclude : 2 } { indent } *include { survexfileroot . path } \n " )
2021-11-05 20:59:54 +00:00
2023-01-19 18:33:04 +00:00
import cProfile
import pstats
2021-11-05 20:59:54 +00:00
from pstats import SortKey
2023-01-19 21:18:42 +00:00
2021-11-05 20:59:54 +00:00
pr = cProfile . Profile ( )
pr . enable ( )
2023-02-28 18:52:04 +00:00
svx_scan . svxpass = svx_scan . TREE
2023-01-19 21:18:42 +00:00
# ----------------------------------------------------------------
2022-10-07 09:41:46 +01:00
svx_scan . PushdownStackScan ( survexblockroot , survexfileroot . path , finrootname , flinear , fcollate )
2023-01-19 21:18:42 +00:00
# ----------------------------------------------------------------
2023-02-28 18:52:04 +00:00
svx_scan . svxpass = " "
2021-11-05 20:59:54 +00:00
pr . disable ( )
2023-01-19 21:18:42 +00:00
with open ( " PushdownStackScan.prof " , " w " ) as f :
2021-11-05 20:59:54 +00:00
ps = pstats . Stats ( pr , stream = f )
ps . sort_stats ( SortKey . CUMULATIVE )
ps . print_stats ( )
2023-01-19 21:18:42 +00:00
2022-11-23 10:41:14 +00:00
flinear . write ( f " { svx_scan . depthinclude : 2 } { indent } *edulcni { survexfileroot . path } \n " )
fcollate . write ( f " ;*edulcni { survexfileroot . path } \n " )
2021-04-12 23:58:48 +01:00
mem1 = get_process_memory ( )
2022-11-23 10:41:14 +00:00
flinear . write ( f " \n - MEM: { mem1 : .2f } MB STOP { survexfileroot . path } \n " )
flinear . write ( f " - MEM: { mem1 - mem0 : .3f } MB ADDITIONALLY USED \n " )
flinear . write ( f " - { len ( svx_scan . svxfileslist ) : , } survex files in linear include list \n " )
2023-02-28 16:18:29 +00:00
flinear . write ( f " - { len ( svx_scan . uniquefile ) : , } unique survex files in linear include list \n " )
for j in svx_scan . svxfileslist :
if j not in svx_scan . uniquefile :
flinear . write ( f " - ' { j } ' { type ( j ) } not in unique list \n " )
for f in svx_scan . uniquefile :
# flinear.write(f" - '{f}' {type(f)} {svx_scan.uniquefile[f]} \n")
if len ( svx_scan . uniquefile [ f ] ) > 1 :
flinear . write ( f " - ' { f } ' { type ( f ) } { svx_scan . uniquefile [ f ] } dup survex files \n " )
2023-01-19 21:18:42 +00:00
print ( f " \n - { svx_scan . caverncount : , } runs of survex ' cavern ' refreshing .3d files " , file = sys . stderr )
print ( f " - { len ( svx_scan . svxfileslist ) : , } survex files from tree in linear include list " , file = sys . stderr )
2023-02-28 16:18:29 +00:00
print ( f " - { len ( svx_scan . uniquefile ) : , } unique survex files from tree in linear include list " , file = sys . stderr )
2021-04-12 23:58:48 +01:00
mem1 = get_process_memory ( )
2023-01-19 21:18:42 +00:00
print ( f " - MEM: { mem1 : 7.2f } MB END " , file = sys . stderr )
print ( f " - MEM: { mem1 - mem0 : 7.3f } MB ADDITIONALLY USED " , file = sys . stderr )
2023-02-28 16:18:29 +00:00
2022-10-05 19:11:18 +01:00
#
# Process all the omitted files in :loser: with some exceptions
unseens = set ( )
2023-01-19 21:18:42 +00:00
b = [ ]
for p in Path ( settings . SURVEX_DATA ) . rglob ( " *.svx " ) :
2022-10-03 22:00:55 +01:00
if p . is_file ( ) :
po = p . relative_to ( Path ( settings . SURVEX_DATA ) )
2023-01-19 21:18:42 +00:00
pox = po . with_suffix ( " " )
2022-10-05 19:11:18 +01:00
if str ( pox ) not in svx_scan . svxfileslist :
# print(f"[{pox}]", file=sys.stderr)
unseens . add ( pox )
2022-10-03 22:00:55 +01:00
else :
b . append ( pox )
2023-01-19 21:18:42 +00:00
2022-10-05 19:11:18 +01:00
if len ( b ) != len ( svx_scan . svxfileslist ) :
2023-01-19 21:18:42 +00:00
print (
f " ! Mismatch. { len ( b ) } survex files found which should be { len ( svx_scan . svxfileslist ) } in main tree) " ,
file = sys . stderr ,
)
2023-02-28 16:18:29 +00:00
unseensroot = re . sub ( r " \ .svx$ " , " " , UNSEENS )
2023-07-24 12:24:39 +01:00
excpts = [ " surface/terrain " , " kataster/kataster-boundaries " , " template " , " docs " , " deprecated " , " subsections " , unseensroot ]
2022-10-05 19:11:18 +01:00
removals = [ ]
for x in unseens :
for o in excpts :
2023-01-19 21:18:42 +00:00
if str ( x ) . strip ( ) . startswith ( o ) :
2022-10-05 19:11:18 +01:00
removals . append ( x )
2023-02-28 16:18:29 +00:00
# special fix for .svx file not actually in survex format
2022-10-05 21:18:11 +01:00
unseens . remove ( Path ( " fixedpts/gps/gps00raw " ) )
2023-01-19 21:18:42 +00:00
2022-10-05 19:11:18 +01:00
for x in removals :
unseens . remove ( x )
2023-01-19 21:18:42 +00:00
print (
f " \n - { len ( unseens ) } survex files found which were not included in main tree. ( { len ( svx_scan . svxfileslist ) } in main tree) " ,
file = sys . stderr ,
)
2023-01-31 00:39:30 +00:00
check_team_cache ( )
2023-07-24 12:24:39 +01:00
print ( f " -- Now loading the previously-omitted survex files. " , file = sys . stderr )
2023-07-24 13:26:36 +01:00
print ( f " - (except: { excpts } ) " , file = sys . stderr )
2023-01-19 21:18:42 +00:00
2023-02-28 16:18:29 +00:00
with open ( Path ( settings . SURVEX_DATA , UNSEENS ) , " w " ) as u :
2023-01-19 21:18:42 +00:00
u . write (
f " ; { len ( unseens ) : , } survex files not *included by { settings . SURVEX_TOPNAME } (which are { len ( svx_scan . svxfileslist ) : , } files) \n "
)
2022-10-05 19:11:18 +01:00
u . write ( f " ; autogenerated by parser/survex.py from databasereset.py on ' { datetime . now ( timezone . utc ) } ' \n " )
u . write ( f " ; omitting any file beginning with { excpts } \n \n " )
2023-01-29 01:30:10 +00:00
u . write ( " *begin troggle_unseens \n " )
2023-03-13 16:31:42 +00:00
u . write ( " *title \" Collated unseen and unlinked survex files \" \n " )
2022-10-05 19:11:18 +01:00
for x in sorted ( unseens ) :
u . write ( f " *include { x } \n " )
2023-01-29 01:30:10 +00:00
u . write ( " *end troggle_unseens \n " )
2023-01-19 21:18:42 +00:00
survexfileroot = survexblockroot . survexfile # i.e. SURVEX_TOPNAME only
2022-10-05 19:11:18 +01:00
omit_scan = LoadingSurvex ( )
omit_scan . callcount = 0
omit_scan . depthinclude = 0
2023-02-28 16:18:29 +00:00
fullpathtotop = os . path . join ( survexfileroot . survexdirectory . path , UNSEENS )
2023-01-19 21:18:42 +00:00
2022-10-05 19:11:18 +01:00
# copy the list to prime the next pass through the files
omit_scan . svxfileslist = svx_scan . svxfileslist [ : ]
2023-01-19 21:18:42 +00:00
svx_scan . svxfileslist = [ ] # free memory
svx_scan = None # Hmm. Does this actually delete all the instance variables if they are lists, dicts etc.?
2023-02-28 16:18:29 +00:00
omit_scan . check_unique_name ( fullpathtotop )
omit_scan . uniquefile [ unseensroot ] = [ " 0 " ]
2023-01-19 21:18:42 +00:00
2022-10-05 19:11:18 +01:00
mem0 = get_process_memory ( )
2023-02-28 16:18:29 +00:00
print ( f " - MEM: { mem0 : 7.2f } MB START ' { unseensroot } ' " , file = sys . stderr )
2023-01-19 21:18:42 +00:00
# flinear = open('svxlinear.log', 'w')
2023-02-28 16:18:29 +00:00
flinear . write ( f " - MEM: { mem0 : 7.2f } MB START ' { unseensroot } ' \n " )
2023-01-19 21:18:42 +00:00
print ( " " , file = sys . stderr , end = " " )
2022-10-05 19:11:18 +01:00
2023-02-28 16:18:29 +00:00
# this is a bit tricky as some unseen files will *include files we have already seen, which
# we should not process again.
2022-10-07 09:41:46 +01:00
finrootname = fullpathtotop
2023-02-28 16:18:29 +00:00
fcollate . write ( f " ;*include { UNSEENS } \n " )
flinear . write ( f " { omit_scan . depthinclude : 2 } { indent } *include { unseensroot } \n " )
2023-02-28 18:52:04 +00:00
omit_scan . svxpass = omit_scan . ODDS
2023-02-28 16:18:29 +00:00
# stop_dup_warning = True
2023-01-19 21:18:42 +00:00
# ----------------------------------------------------------------
2023-02-28 16:18:29 +00:00
omit_scan . PushdownStackScan ( survexblockroot , unseensroot , finrootname , flinear , fcollate )
2023-01-19 21:18:42 +00:00
# ----------------------------------------------------------------
2023-02-28 16:18:29 +00:00
# stop_dup_warning = False
2023-02-28 18:52:04 +00:00
omit_scan . svxpass = " "
2022-10-05 19:11:18 +01:00
2023-02-28 16:18:29 +00:00
flinear . write ( f " { omit_scan . depthinclude : 2 } { indent } *edulcni { unseensroot } \n " )
fcollate . write ( f " ;*edulcni { UNSEENS } \n " )
2023-01-31 00:39:30 +00:00
check_team_cache ( )
2022-10-05 19:11:18 +01:00
mem1 = get_process_memory ( )
2023-02-28 16:18:29 +00:00
flinear . write ( f " \n - MEM: { mem1 : .2f } MB STOP { UNSEENS } Unseen Oddments \n " )
flinear . write ( f " - MEM: { mem1 - mem0 : .3f } MB ADDITIONALLY USED Unseen Oddments \n " )
flinear . write ( f " - { len ( omit_scan . svxfileslist ) : , } survex files in linear include list Unseen Oddments \n " )
2023-01-19 21:18:42 +00:00
2022-10-05 19:11:18 +01:00
flinear . close ( )
fcollate . close ( )
2023-01-19 21:18:42 +00:00
print (
f " \n - { omit_scan . caverncount : , } runs of survex ' cavern ' refreshing .3d files in the unseen list " ,
file = sys . stderr ,
)
print (
f " - { len ( omit_scan . svxfileslist ) : , } survex files in linear include list including previously unseen ones \n " ,
file = sys . stderr ,
)
omit_scan = None # Hmm. Does this actually delete all the instance variables if they are lists, dicts etc.?
2022-10-05 19:11:18 +01:00
mem1 = get_process_memory ( )
2023-01-19 21:18:42 +00:00
print ( f " - MEM: { mem1 : 7.2f } MB END " , file = sys . stderr )
print ( f " - MEM: { mem1 - mem0 : 7.3f } MB ADDITIONALLY USED " , file = sys . stderr )
2022-10-05 19:11:18 +01:00
2020-06-27 17:55:59 +01:00
# Before doing this, it would be good to identify the *equate and *entrance we need that are relevant to the
# entrance locations currently loaded after this by LoadPos(), but could better be done before ?
# look in MapLocations() for how we find the entrances
2023-01-19 21:18:42 +00:00
print ( " \n - Loading All Survex Blocks (LinearLoad) " , file = sys . stderr )
2020-06-27 18:00:24 +01:00
svx_load = LoadingSurvex ( )
2020-07-03 17:22:15 +01:00
2023-09-06 19:38:45 +01:00
#svx_load.survexdict[survexfileroot.survexdirectory] = []
#svx_load.survexdict[survexfileroot.survexdirectory].append(survexfileroot)
#svx_load.svxdirs[""] = survexfileroot.survexdirectory
2020-07-20 22:53:26 +01:00
2023-01-19 21:18:42 +00:00
# pr2 = cProfile.Profile()
# pr2.enable()
print ( " " , file = sys . stderr , end = " " )
# ----------------------------------------------------------------
2022-10-07 21:48:41 +01:00
svx_load . LinearLoad ( survexblockroot , survexfileroot . path , collatefilename )
2023-01-19 21:18:42 +00:00
# ----------------------------------------------------------------
# pr2.disable()
2021-12-19 14:24:20 +00:00
# with open('LinearLoad.prof', 'w') as f:
2023-01-19 21:18:42 +00:00
# ps = pstats.Stats(pr2, stream=f)
# ps.sort_stats(SortKey.CUMULATIVE)
# ps.print_stats()
2022-10-05 21:18:11 +01:00
mem1 = get_process_memory ( )
2023-01-19 21:18:42 +00:00
print ( f " \n - MEM: { mem1 : 7.2f } MB STOP " , file = sys . stderr )
print ( f " - MEM: { mem1 - mem0 : 7.3f } MB ADDITIONALLY USED " , file = sys . stderr )
2020-06-27 17:55:59 +01:00
2021-11-05 20:59:54 +00:00
# Close the logging file, Restore sys.stdout to our old saved file handle
sys . stdout . close ( )
print ( " + " , file = sys . stderr )
2023-01-19 21:18:42 +00:00
sys . stderr . flush ( )
2021-11-05 20:59:54 +00:00
sys . stdout = stdout_orig
2020-07-04 01:10:17 +01:00
legsnumber = svx_load . legsnumber
2021-04-12 23:58:48 +01:00
mem1 = get_process_memory ( )
2020-07-01 17:41:09 +01:00
2023-09-06 19:38:45 +01:00
print ( f " - Number of SurvexDirectories: { len ( svx_load . svxprim ) : , } " )
2023-01-19 21:18:42 +00:00
tf = 0
2023-09-06 19:38:45 +01:00
# for d in svx_load.survexdict:
# tf += len(svx_load.survexdict[d])
2022-10-05 19:11:18 +01:00
print ( f " - Number of SurvexFiles: { tf : , } " )
print ( f " - Number of Survex legs: { legsnumber : , } " )
2020-06-27 17:55:59 +01:00
svx_load = None
2020-07-07 01:35:58 +01:00
return legsnumber
2020-06-23 23:34:08 +01:00
2023-03-06 16:37:54 +00:00
def display_contents ( blocks ) :
for b in blocks :
print ( f " B { b } { b . parent =} { b . expedition =} " )
sfs = SurvexFile . objects . filter ( survexblock = b )
for sf in sfs :
print ( f " SF { sf } " )
2023-09-05 21:14:48 +01:00
# print(f" SD {sf.survexdirectory} {sf.survexdirectory.cave}")
print ( f " SD { sf . survexdirectory } { sf . survexdirectory . path } " )
2023-03-06 16:37:54 +00:00
ws = Wallet . objects . filter ( survexblock = b )
for w in ws :
print ( f " W { w } " )
sfs = QM . objects . filter ( block = b )
for sf in sfs :
print ( f " QM { sf } " )
sfs = SurvexStation . objects . filter ( block = b )
for sf in sfs :
print ( f " SS { sf } " )
2023-03-03 15:15:17 +00:00
def parse_one_file ( fpath ) : # --------------------------------------in progress-------------------
""" Parse just one file. Use when re-loading after editing.
2023-03-05 23:06:06 +00:00
2023-03-06 04:52:41 +00:00
NOTE : * include lines are ignored .
In the initial file parsing in databaseReset , the * include expansion is done
in an earlier stange than LinearLoad ( ) . By the time LinearLoad ( ) is called ,
all the * include expansion has happened .
2023-03-03 15:15:17 +00:00
"""
2023-03-23 21:26:16 +00:00
def parse_new_svx ( fpath , svx_load , svxfileroot = None ) :
2023-03-23 19:05:25 +00:00
""" We need a dummy survex block which has the survexfile being parsed
as its . survexfile field . But it is used in two ways , it is also
set as the parent block for the new blocks being created . This has to be fixed
later .
This all needs refactoring .
2023-03-23 21:26:16 +00:00
We also need to re - plumb the fileroot after importing , so that
the new survexfile appears in the survexdirectory lists
2023-03-23 19:05:25 +00:00
"""
2023-03-23 01:17:38 +00:00
if svxfileroot == None :
2023-03-23 21:26:16 +00:00
svxfileroot = MakeFileRoot ( fpath )
2023-03-23 01:17:38 +00:00
svxfileroot . save ( )
2023-03-23 19:05:25 +00:00
# It is vital that the block has attached the survexfile object which is being parsed.
block_dummy = SurvexBlock (
name = " dummy " , survexpath = " " , survexfile = svxfileroot , legsall = 0 , legslength = 0.0
)
svxfileroot . save ( )
block_dummy . save ( )
newname = f " # { block_dummy . id } _ " + str ( Path ( str ( svxfileroot ) ) . name )
block_dummy . name = newname
block_dummy . save ( )
print ( f " - block_dummy now ' { block_dummy } ' { type ( block_dummy ) } id= { block_dummy . id } f: { block_dummy . survexfile } " )
2023-03-05 23:06:06 +00:00
2023-09-06 19:38:45 +01:00
# svx_load.survexdict[svxfileroot.survexdirectory] = []
# svx_load.survexdict[svxfileroot.survexdirectory].append(svxfileroot)
#svx_load.svxdirs[""] = svxfileroot.survexdirectory
2023-03-06 16:37:54 +00:00
2023-03-05 23:06:06 +00:00
# ----------------------------------------------------------------
2023-03-23 19:05:25 +00:00
svx_load . LinearLoad ( block_dummy , svxfileroot . path , fname )
2023-03-05 23:06:06 +00:00
# ----------------------------------------------------------------
2023-03-23 19:05:25 +00:00
# Now we don't need or want the dummy any more
block_dummy . delete ( )
2023-03-06 16:37:54 +00:00
2023-03-23 21:26:16 +00:00
#svxfile.survexdirectory =
2023-03-23 19:05:25 +00:00
global svx_load
2023-03-06 16:37:54 +00:00
print ( f " \n - Loading One Survex file ' { fpath } ' " , file = sys . stderr )
svx_load = LoadingSurvex ( )
2023-09-06 19:38:45 +01:00
#svx_load.survexdict = {}
2023-03-06 16:37:54 +00:00
fname = Path ( settings . SURVEX_DATA , ( fpath + " .svx " ) )
svxs = SurvexFile . objects . filter ( path = fpath )
if svxs :
if len ( svxs ) > 1 :
print ( f " ! Mistake? More than one survex file object in database with the same file-path { svxs } " )
print ( f " - Aborting file parsing & import into database. " )
2023-03-23 01:17:38 +00:00
return False
2023-03-06 16:37:54 +00:00
print ( f " - Pre-existing survexfile { svxs } . " )
2023-03-23 01:17:38 +00:00
existingsvx = SurvexFile . objects . get ( path = fpath )
existingcave = existingsvx . cave
2023-03-23 19:05:25 +00:00
print ( f " - survexfile id= { existingsvx . id } { existingsvx } { existingcave } " )
2023-03-23 01:17:38 +00:00
sbs = existingsvx . survexblock_set . all ( )
existingparent = None
parents = set ( )
if sbs :
for sb in sbs :
2023-03-23 19:05:25 +00:00
# print(f" - {sb.id} checking survex block {sb=}")
2023-03-23 01:17:38 +00:00
try :
if sb . parent :
parents . add ( sb . parent )
2023-03-23 19:05:25 +00:00
# print(f" - adding {sb.parent=}")
2023-03-23 01:17:38 +00:00
except :
2023-03-23 19:05:25 +00:00
print ( f " ! FAILURE to access sb.parent { sb =} \n ! { sb . parent_id =} " ) # \n{dir(sb)}
# even though the parent_id exists.. hmm.
for sb in sbs :
# print(f" - {sb.id} {sb.pk} {sb}")
sb_keep = sb
if sb not in parents :
# print(f" - {sb.id} Deleting survex block {sb=}")
sb . delete ( )
2023-03-23 01:17:38 +00:00
if parents :
2023-03-23 19:05:25 +00:00
# print(f" - parents get {parents}")
2023-03-23 01:17:38 +00:00
if len ( parents ) > 1 :
print ( f " - WARNING more than one parent survex block! " )
2023-03-23 19:05:25 +00:00
existingparent = parents . pop ( ) # removes it
parents . add ( existingparent ) # restores it
2023-03-23 01:17:38 +00:00
2023-03-23 19:05:25 +00:00
print ( f " - Reloading and parsing this survexfile ' { fpath } ' Loading... " )
# Logic is that we need an SB which links to the survexfile we are parsing for the parser
# to work, but we delete all those before we start parsing. Urk.
#===========
2023-03-23 21:26:16 +00:00
parse_new_svx ( fpath , svx_load , svxfileroot = existingsvx )
2023-03-23 19:05:25 +00:00
#===========
2023-03-23 01:17:38 +00:00
2023-03-23 19:05:25 +00:00
print ( f " - survexfile id= { existingsvx . id } update " )
2023-03-23 01:17:38 +00:00
if parents :
2023-03-23 19:05:25 +00:00
print ( f " - parents set { parents } " )
sbs = existingsvx . survexblock_set . all ( )
if len ( sbs ) < 1 :
print ( f " ! No survex blocks found. Parser failure... " )
2023-03-23 01:17:38 +00:00
for sb in sbs :
2023-03-23 19:05:25 +00:00
print ( f " - { sb . id } re-setting survex block parent { sb =} " )
2023-03-23 01:17:38 +00:00
sb . parent = existingparent # should be all the same
sb . save ( )
2023-03-05 23:06:06 +00:00
else :
2023-03-23 19:05:25 +00:00
print ( f " - Not seen this survexfile before ' { fpath } ' Loading. .. " )
#===========
2023-03-23 21:26:16 +00:00
parse_new_svx ( fpath , svx_load )
2023-03-23 19:05:25 +00:00
#===========
# print(f" - Number of SurvexDirectories: {len(svx_load.survexdict):,}")
# tf = 0
# for d in svx_load.survexdict:
# print(f" - SD: {d}")
# tf += len(svx_load.survexdict[d])
# print(f" - Number of SurvexFiles: {tf:,}")
# print(f" - Number of Survex legs: {svx_load.legsnumber:,}")
# print(f" - Length of Survex legs: {svx_load.slength:.2f} m")
2023-03-03 15:15:17 +00:00
svx_load = None
2023-03-23 01:17:38 +00:00
return True
2023-01-19 21:18:42 +00:00
2020-06-29 21:16:13 +01:00
def MakeSurvexFileRoot ( ) :
2023-01-19 21:18:42 +00:00
""" Returns a file_object.path = SURVEX_TOPNAME associated with directory_object.path = SURVEX_DATA """
2022-07-15 14:17:40 +01:00
# find a cave, any cave..
2023-01-28 15:10:39 +00:00
smk = Cave . objects . filter ( kataster_number = " 000 " ) # returns a list, a QuerySet
2023-01-19 21:18:42 +00:00
2021-04-13 01:13:08 +01:00
fileroot = SurvexFile ( path = settings . SURVEX_TOPNAME , cave = None )
2020-07-02 16:25:51 +01:00
fileroot . save ( )
2023-09-05 21:14:48 +01:00
directoryroot = SurvexDirectory ( path = settings . SURVEX_DATA , primarysurvexfile = fileroot )
2022-07-15 14:17:40 +01:00
# MariaDB doesn't like this hack. Complains about non-null cave_id EVEN THOUGH our model file says this is OK:
# cave = models.ForeignKey('Cave', blank=True, null=True,on_delete=models.SET_NULL)
2020-07-02 16:25:51 +01:00
directoryroot . save ( )
2023-01-19 21:18:42 +00:00
fileroot . survexdirectory = directoryroot # i.e. SURVEX_DATA/SURVEX_TOPNAME
fileroot . save ( ) # mutually dependent objects need a double-save like this
2020-07-02 16:25:51 +01:00
return fileroot
2023-01-19 21:18:42 +00:00
2023-03-23 21:26:16 +00:00
def MakeFileRoot ( fn ) :
2023-03-06 16:37:54 +00:00
""" Returns a file_object.path = _unseens.svx associated with directory_object.path = SURVEX_DATA
2023-03-23 21:26:16 +00:00
or tries to find the primary survex file for this cave
2023-03-06 16:37:54 +00:00
"""
2023-03-23 21:26:16 +00:00
cave = IdentifyCave ( fn )
2023-03-24 00:54:26 +00:00
if not cave :
if fn != UNSEENS :
cave = create_new_cave ( fn )
2023-03-23 21:26:16 +00:00
print ( f " - Making/finding a new root survexfile for this import: { fn } " )
2023-03-24 00:54:26 +00:00
2023-03-23 19:05:25 +00:00
fileroot = SurvexFile ( path = fn , cave = cave )
2023-07-24 12:24:39 +01:00
try :
fileroot . survexdirectory = SurvexDirectory . objects . get ( id = 1 ) # default
except :
2023-09-06 15:19:20 +01:00
fileroot . survexdirectory = None
2023-07-24 12:24:39 +01:00
2023-03-23 21:26:16 +00:00
2023-09-05 21:14:48 +01:00
# if cave:
# # But setting the SurvexDirectory does work !
# # The fluffy stuff is because of errors in the original setting of survex directories
# # which needs to be cleaned up..
# for sd in cave.survexdirectory_set.filter(cave=cave):
# if f"{sd.primarysurvexfile}".replace("caves-","").startswith(f"{sd.cave}"[:4]):
# fileroot.survexdirectory = sd
2023-01-19 21:18:42 +00:00
fileroot . save ( )
2023-03-23 19:05:25 +00:00
fileroot . cave = cave
2023-03-23 21:26:16 +00:00
print ( f " - new fileroot { type ( fileroot ) } for { fn } with cave { cave } - { fileroot } " )
2022-10-05 19:11:18 +01:00
return fileroot
2020-06-23 23:34:08 +01:00
2023-01-19 21:18:42 +00:00
2020-06-27 18:00:24 +01:00
def LoadSurvexBlocks ( ) :
2023-02-28 16:18:29 +00:00
global dup_includes
2022-10-07 21:48:41 +01:00
mem1 = get_process_memory ( )
2023-01-19 21:18:42 +00:00
print ( f " - MEM: { mem1 : 7.2f } MB now " , file = sys . stderr )
2023-01-28 15:10:39 +00:00
start = time . time ( )
2015-01-19 22:48:50 +00:00
2023-01-19 21:18:42 +00:00
print ( " - Flushing All Survex Blocks... " )
2022-10-07 21:48:41 +01:00
# why does this increase memory use by 20 MB ?!
2023-01-19 21:18:42 +00:00
# We have foreign keys, Django needs to load the related objects
# in order to resolve how the relation should handle the deletion:
2023-02-10 00:05:04 +00:00
# https://docs.djangoproject.com/en/dev/ref/models/fields/#django.db.models.ForeignKey.on_delete
2021-04-13 01:13:08 +01:00
SurvexBlock . objects . all ( ) . delete ( )
SurvexFile . objects . all ( ) . delete ( )
SurvexDirectory . objects . all ( ) . delete ( )
SurvexPersonRole . objects . all ( ) . delete ( )
SurvexStation . objects . all ( ) . delete ( )
2022-10-07 21:48:41 +01:00
mem1 = get_process_memory ( )
2023-01-19 21:18:42 +00:00
print ( f " - MEM: { mem1 : 7.2f } MB now. Foreign key objects loaded on deletion. " , file = sys . stderr )
2022-10-07 21:48:41 +01:00
print ( " - Flushing survex Data Issues " )
2023-01-28 15:10:39 +00:00
global dataissues
dataissues = [ ]
2023-01-19 21:18:42 +00:00
DataIssue . objects . filter ( parser = " survex " ) . delete ( )
DataIssue . objects . filter ( parser = " svxdate " ) . delete ( )
DataIssue . objects . filter ( parser = " survexleg " ) . delete ( )
DataIssue . objects . filter ( parser = " survexunits " ) . delete ( )
2023-01-28 21:00:38 +00:00
DataIssue . objects . filter ( parser = " survex team " ) . delete ( )
2023-08-02 16:23:04 +01:00
# DataIssue.objects.filter(parser="xEntrances").delete()
2022-10-07 21:48:41 +01:00
print ( " - survex Data Issues flushed " )
mem1 = get_process_memory ( )
2023-01-19 21:18:42 +00:00
print ( f " - MEM: { mem1 : 7.2f } MB now " , file = sys . stderr )
2020-06-29 21:16:13 +01:00
survexfileroot = MakeSurvexFileRoot ( )
2020-07-02 16:25:51 +01:00
# this next makes a block_object assciated with a file_object.path = SURVEX_TOPNAME
2023-01-19 21:18:42 +00:00
survexblockroot = SurvexBlock (
2023-01-30 22:27:17 +00:00
name = ROOTBLOCK , survexpath = " " , survexfile = survexfileroot , legsall = 0 , legslength = 0.0
2023-01-19 21:18:42 +00:00
)
2022-07-22 11:42:04 +01:00
# crashes here sometimes on MariaDB complaining that cave_id should not be null. But it should be.
2023-01-19 21:18:42 +00:00
# django.db.utils.IntegrityError: (1048, "Column 'cave_id' cannot be null")
2022-08-14 20:52:14 +01:00
# fix by restarting db on server
# sudo service mariadb stop
# sudo service mariadb start
2011-07-11 00:01:12 +01:00
survexblockroot . save ( )
2023-01-19 21:18:42 +00:00
2023-03-23 21:26:16 +00:00
omitsfileroot = MakeFileRoot ( UNSEENS )
2023-01-19 21:18:42 +00:00
survexomitsroot = SurvexBlock (
2023-01-30 22:27:17 +00:00
name = OMITBLOCK , survexpath = " " , survexfile = omitsfileroot , legsall = 0 , legslength = 0.0
2023-01-19 21:18:42 +00:00
)
survexomitsroot . save ( )
2020-06-23 23:34:08 +01:00
2023-01-19 21:18:42 +00:00
print ( " - Loading Survex Blocks... " )
2021-04-12 23:58:48 +01:00
memstart = get_process_memory ( )
2023-01-19 21:18:42 +00:00
# ----------------------------------------------------------------
2022-03-23 20:05:38 +00:00
FindAndLoadSurvex ( survexblockroot )
2023-01-19 21:18:42 +00:00
# ----------------------------------------------------------------
2021-04-12 23:58:48 +01:00
memend = get_process_memory ( )
2022-11-23 10:41:14 +00:00
print ( f " - MEMORY start: { memstart : .3f } MB end: { memend : .3f } MB increase= { memend - memstart : .3f } MB " )
2023-01-19 21:18:42 +00:00
2011-07-11 00:01:12 +01:00
survexblockroot . save ( )
2023-01-28 21:00:38 +00:00
global person_pending_cache
for sb in person_pending_cache :
if len ( person_pending_cache [ sb ] ) > 0 :
print ( f " " )
message = f " ! PENDING team list not emptied { sb . survexfile . path } { len ( person_pending_cache [ sb ] ) } people: { person_pending_cache [ sb ] } "
stash_data_issue ( parser = " survex " , message = message , url = None , sb = ( sb . survexfile . path ) )
print ( message )
2023-01-28 15:10:39 +00:00
# duration = time.time() - start
# print(f" - TIME: {duration:7.2f} s", file=sys.stderr)
store_data_issues ( )
# duration = time.time() - start
# print(f" - TIME: {duration:7.2f} s", file=sys.stderr)
2023-02-28 18:52:04 +00:00
if dup_includes > 0 :
print ( f " - ERROR: There are { dup_includes } duplicate *includes in the final list " )
2023-01-19 21:18:42 +00:00
print ( " - Loaded All Survex Blocks. " )