mirror of
https://expo.survex.com/repositories/troggle/.git
synced 2024-11-25 16:51:54 +00:00
adding *ref to troggle svx parser
This commit is contained in:
parent
2918b4b92c
commit
e4290c4ab0
@ -457,7 +457,7 @@ class Cave(TroggleModel):
|
|||||||
return urlparse.urljoin(settings.URL_ROOT, reverse('cave',kwargs={'cave_id':href,}))
|
return urlparse.urljoin(settings.URL_ROOT, reverse('cave',kwargs={'cave_id':href,}))
|
||||||
|
|
||||||
def __unicode__(self, sep = u": "):
|
def __unicode__(self, sep = u": "):
|
||||||
return unicode(self.slug())
|
return unicode("slug:"+self.slug())
|
||||||
|
|
||||||
def get_QMs(self):
|
def get_QMs(self):
|
||||||
return QM.objects.filter(found_by__cave_slug=self.caveslug_set.all())
|
return QM.objects.filter(found_by__cave_slug=self.caveslug_set.all())
|
||||||
|
@ -266,16 +266,17 @@ class JobQueue():
|
|||||||
print "-- ", settings.DATABASES['default']['NAME'], settings.DATABASES['default']['ENGINE']
|
print "-- ", settings.DATABASES['default']['NAME'], settings.DATABASES['default']['ENGINE']
|
||||||
|
|
||||||
# but because the user may be expecting to add this to a db with lots of tables already there,
|
# but because the user may be expecting to add this to a db with lots of tables already there,
|
||||||
# the jobque may not start from scratch so we need to initialise the db properly first.
|
# the jobque may not start from scratch so we need to initialise the db properly first
|
||||||
|
# because we are using an empty :memory: database
|
||||||
# But initiating twice crashes, so be sure to do it once only.
|
# But initiating twice crashes, so be sure to do it once only.
|
||||||
if ("reinit",reinit_db) not in self.queue:
|
if ("reinit",reinit_db) not in self.queue:
|
||||||
reinit_db()
|
reinit_db()
|
||||||
if ("dirsredirect",dirsredirect) not in self.queue:
|
if ("dirsredirect",dirsredirect) not in self.queue:
|
||||||
dirsredirect()
|
dirsredirect()
|
||||||
if ("caves",import_caves) not in self.queue:
|
if ("caves",import_caves) not in self.queue:
|
||||||
import_caves()
|
import_caves() # sometime extract the initialising code from this and put in reinit
|
||||||
if ("people",import_people) not in self.queue:
|
if ("people",import_people) not in self.queue:
|
||||||
import_people()
|
import_people() # sometime extract the initialising code from this and put in reinit
|
||||||
|
|
||||||
django.db.close_old_connections() # maybe not needed here
|
django.db.close_old_connections() # maybe not needed here
|
||||||
|
|
||||||
@ -290,6 +291,7 @@ class JobQueue():
|
|||||||
settings.DATABASES['default']['NAME'] = dbname
|
settings.DATABASES['default']['NAME'] = dbname
|
||||||
print "-- ", settings.DATABASES['default']['NAME'], settings.DATABASES['default']['ENGINE']
|
print "-- ", settings.DATABASES['default']['NAME'], settings.DATABASES['default']['ENGINE']
|
||||||
|
|
||||||
|
django.db.close_old_connections() # maybe not needed here
|
||||||
for j in self.results_order:
|
for j in self.results_order:
|
||||||
self.results[j].pop() # throw away results from :memory: run
|
self.results[j].pop() # throw away results from :memory: run
|
||||||
self.results[j].append(None) # append a placeholder
|
self.results[j].append(None) # append a placeholder
|
||||||
|
@ -96,8 +96,10 @@ stardatadefault = {"type":"normal", "t":"leg", "from":0, "to":1, "tape":2, "comp
|
|||||||
stardataparamconvert = {"length":"tape", "bearing":"compass", "gradient":"clino"}
|
stardataparamconvert = {"length":"tape", "bearing":"compass", "gradient":"clino"}
|
||||||
|
|
||||||
regex_comment = re.compile(r"([^;]*?)\s*(?:;\s*(.*))?\n?$")
|
regex_comment = re.compile(r"([^;]*?)\s*(?:;\s*(.*))?\n?$")
|
||||||
regex_ref = re.compile(r'.*?ref.*?(\d+)\s*#\s*(\d+)')
|
regex_ref = re.compile(r'.*?ref.*?(\d+)\s*#\s*(X)?\s*(\d+)')
|
||||||
regex_star = re.compile(r'\s*\*[\s,]*(\w+)\s*(.*?)\s*(?:;.*)?$')
|
regex_star = re.compile(r'\s*\*[\s,]*(\w+)\s*(.*?)\s*(?:;.*)?$')
|
||||||
|
# years from 1960 to 2039
|
||||||
|
regex_starref = re.compile(r'^\s*\*ref[\s.:]*((?:19[6789]\d)|(?:20[0123]\d))\s*#?\s*(X)?\s*(.*?\d+.*?)$(?i)')
|
||||||
regex_team = re.compile(r"(Insts|Notes|Tape|Dog|Useless|Pics|Helper|Disto|Consultant)\s+(.*)$(?i)")
|
regex_team = re.compile(r"(Insts|Notes|Tape|Dog|Useless|Pics|Helper|Disto|Consultant)\s+(.*)$(?i)")
|
||||||
regex_team_member = re.compile(r" and | / |, | & | \+ |^both$|^none$(?i)")
|
regex_team_member = re.compile(r" and | / |, | & | \+ |^both$|^none$(?i)")
|
||||||
regex_qm = re.compile(r'^\s*QM(\d)\s+?([a-dA-DxX])\s+([\w\-]+)\.(\d+)\s+(([\w\-]+)\.(\d+)|\-)\s+(.+)$')
|
regex_qm = re.compile(r'^\s*QM(\d)\s+?([a-dA-DxX])\s+([\w\-]+)\.(\d+)\s+(([\w\-]+)\.(\d+)|\-)\s+(.+)$')
|
||||||
@ -145,14 +147,23 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
|
|||||||
# detect ref line pointing to the scans directory
|
# detect ref line pointing to the scans directory
|
||||||
mref = comment and regex_ref.match(comment)
|
mref = comment and regex_ref.match(comment)
|
||||||
if mref:
|
if mref:
|
||||||
refscan = "%s#%s" % (mref.group(1), mref.group(2))
|
yr, letterx, wallet = mref.groups()
|
||||||
|
if not letterx:
|
||||||
|
letterx = ""
|
||||||
|
else:
|
||||||
|
letterx = "X"
|
||||||
|
if len(wallet)<2:
|
||||||
|
wallet = "0" + wallet
|
||||||
|
refscan = "%s#%s%s" % (yr, letterx, wallet )
|
||||||
|
#print(' - Wallet ;ref - %s - looking for survexscansfolder' % refscan)
|
||||||
survexscansfolders = models.SurvexScansFolder.objects.filter(walletname=refscan)
|
survexscansfolders = models.SurvexScansFolder.objects.filter(walletname=refscan)
|
||||||
if survexscansfolders:
|
if survexscansfolders:
|
||||||
survexblock.survexscansfolder = survexscansfolders[0]
|
survexblock.survexscansfolder = survexscansfolders[0]
|
||||||
#survexblock.refscandir = "%s/%s%%23%s" % (mref.group(1), mref.group(1), mref.group(2))
|
#survexblock.refscandir = "%s/%s%%23%s" % (mref.group(1), mref.group(1), mref.group(2))
|
||||||
survexblock.save()
|
survexblock.save()
|
||||||
print(' - Wallet *ref - %s' % refscan)
|
# print(' - Wallet ; ref - %s - found in survexscansfolders' % refscan)
|
||||||
continue
|
else:
|
||||||
|
print(' - Wallet ; ref - %s - NOT found in survexscansfolders %s-%s-%s' % (refscan,yr,letterx,wallet))
|
||||||
|
|
||||||
# This whole section should be moved if we can have *QM become a proper survex command
|
# This whole section should be moved if we can have *QM become a proper survex command
|
||||||
# Spec of QM in SVX files, currently commented out need to add to survex
|
# Spec of QM in SVX files, currently commented out need to add to survex
|
||||||
@ -203,6 +214,28 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
|
|||||||
if not sline:
|
if not sline:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
# detect the star ref command
|
||||||
|
mstar = regex_starref.match(sline)
|
||||||
|
if mstar:
|
||||||
|
yr,letterx,wallet = mstar.groups()
|
||||||
|
if not letterx:
|
||||||
|
letterx = ""
|
||||||
|
else:
|
||||||
|
letterx = "X"
|
||||||
|
if len(wallet)<2:
|
||||||
|
wallet = "0" + wallet
|
||||||
|
assert (int(yr)>1960 and int(yr)<2039), "Wallet year out of bounds: %s" % yr
|
||||||
|
assert (int(wallet)<100), "Wallet number more than 100: %s" % wallet
|
||||||
|
refscan = "%s#%s%s" % (yr, letterx, wallet)
|
||||||
|
survexscansfolders = models.SurvexScansFolder.objects.filter(walletname=refscan)
|
||||||
|
if survexscansfolders:
|
||||||
|
survexblock.survexscansfolder = survexscansfolders[0]
|
||||||
|
survexblock.save()
|
||||||
|
# print(' - Wallet *REF - %s - found in survexscansfolders' % refscan)
|
||||||
|
else:
|
||||||
|
print(' - Wallet *REF - %s - NOT found in survexscansfolders %s-%s-%s' % (refscan,yr,letterx,wallet))
|
||||||
|
continue
|
||||||
|
|
||||||
# detect the star command
|
# detect the star command
|
||||||
mstar = regex_star.match(sline)
|
mstar = regex_star.match(sline)
|
||||||
if not mstar:
|
if not mstar:
|
||||||
@ -224,7 +257,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
|
|||||||
cmd = cmd.lower()
|
cmd = cmd.lower()
|
||||||
if re.match("include$(?i)", cmd):
|
if re.match("include$(?i)", cmd):
|
||||||
includepath = os.path.join(os.path.split(survexfile.path)[0], re.sub(r"\.svx$", "", line))
|
includepath = os.path.join(os.path.split(survexfile.path)[0], re.sub(r"\.svx$", "", line))
|
||||||
print(' - Include file found including - ' + includepath)
|
print(' - Include path found including - ' + includepath)
|
||||||
# Try to find the cave in the DB if not use the string as before
|
# Try to find the cave in the DB if not use the string as before
|
||||||
path_match = re.search(r"caves-(\d\d\d\d)/(\d+|\d\d\d\d-?\w+-\d+)/", includepath)
|
path_match = re.search(r"caves-(\d\d\d\d)/(\d+|\d\d\d\d-?\w+-\d+)/", includepath)
|
||||||
if path_match:
|
if path_match:
|
||||||
@ -234,7 +267,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
|
|||||||
if cave:
|
if cave:
|
||||||
survexfile.cave = cave
|
survexfile.cave = cave
|
||||||
else:
|
else:
|
||||||
print(' - No match (i) for %s' % includepath)
|
print(' - No match in DB (i) for %s, so loading..' % includepath)
|
||||||
includesurvexfile = models.SurvexFile(path=includepath)
|
includesurvexfile = models.SurvexFile(path=includepath)
|
||||||
includesurvexfile.save()
|
includesurvexfile.save()
|
||||||
includesurvexfile.SetDirectory()
|
includesurvexfile.SetDirectory()
|
||||||
@ -345,7 +378,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
|
|||||||
else:
|
else:
|
||||||
#print(' - Stuff')
|
#print(' - Stuff')
|
||||||
if cmd not in ["sd", "include", "units", "entrance", "data", "flags", "title", "export", "instrument",
|
if cmd not in ["sd", "include", "units", "entrance", "data", "flags", "title", "export", "instrument",
|
||||||
"calibrate", "set", "infer", "alias", "ref", "cs", "declination", "case"]:
|
"calibrate", "set", "infer", "alias", "cs", "declination", "case"]:
|
||||||
print("Unrecognised command in line:", cmd, line, survexblock, survexblock.survexfile.path)
|
print("Unrecognised command in line:", cmd, line, survexblock, survexblock.survexfile.path)
|
||||||
endstamp = datetime.now()
|
endstamp = datetime.now()
|
||||||
timetaken = endstamp - stamp
|
timetaken = endstamp - stamp
|
||||||
@ -414,14 +447,17 @@ def LoadPos():
|
|||||||
cachefile = settings.SURVEX_DATA + "posnotfound.cache"
|
cachefile = settings.SURVEX_DATA + "posnotfound.cache"
|
||||||
notfoundbefore = {}
|
notfoundbefore = {}
|
||||||
if os.path.isfile(cachefile):
|
if os.path.isfile(cachefile):
|
||||||
|
# this is not a good test. 1623.svx may never change but *included files may have done.
|
||||||
|
# When the *include is unrolled, we will have a proper timestamp to use
|
||||||
|
# and can increase the timeout from 3 days to 30 days.
|
||||||
updtsvx = os.path.getmtime(topdata + ".svx")
|
updtsvx = os.path.getmtime(topdata + ".svx")
|
||||||
updtcache = os.path.getmtime(cachefile)
|
updtcache = os.path.getmtime(cachefile)
|
||||||
age = updtcache - updtsvx
|
age = updtcache - updtsvx
|
||||||
print(' svx: %s cache: %s not-found cache is fresher by: %s' % (updtsvx, updtcache, str(timedelta(seconds=age) )))
|
print(' svx: %s cache: %s not-found cache is fresher by: %s' % (updtsvx, updtcache, str(timedelta(seconds=age) )))
|
||||||
|
|
||||||
now = time.time()
|
now = time.time()
|
||||||
if now - updtcache > 30*24*60*60:
|
if now - updtcache > 3*24*60*60:
|
||||||
print " cache is more than 30 days old. Deleting."
|
print " cache is more than 3 days old. Deleting."
|
||||||
os.remove(cachefile)
|
os.remove(cachefile)
|
||||||
if age < 0 :
|
if age < 0 :
|
||||||
print " cache is stale."
|
print " cache is stale."
|
||||||
|
Loading…
Reference in New Issue
Block a user