troggle-unchained/parsers/surveys.py

199 lines
7.6 KiB
Python
Raw Normal View History

2020-05-24 13:30:39 +01:00
import sys
import os
import types
import logging
import stat
2011-07-11 02:10:22 +01:00
import csv
import re
import datetime
2020-05-24 13:30:39 +01:00
2020-05-24 01:57:06 +01:00
from PIL import Image
2011-07-11 02:10:22 +01:00
from utils import save_carefully
2020-05-24 13:30:39 +01:00
from functools import reduce
import settings
#from troggle.core.models import *
#from troggle.core.models_caves import *
from troggle.core.models_survex import SurvexScanSingle, ScansFolder, TunnelFile
2011-07-11 02:10:22 +01:00
def get_or_create_placeholder(year):
""" All surveys must be related to a logbookentry. We don't have a way to
automatically figure out which survey went with which logbookentry,
so we create a survey placeholder logbook entry for each year. This
function always returns such a placeholder, and creates it if it doesn't
exist yet.
"""
lookupAttribs={'date__year':int(year), 'title':"placeholder for surveys",}
nonLookupAttribs={'text':"surveys temporarily attached to this should be re-attached to their actual trips", 'date':datetime.date(int(year),1,1)}
placeholder_logbook_entry, newly_created = save_carefully(LogbookEntry, lookupAttribs, nonLookupAttribs)
return placeholder_logbook_entry
2020-05-24 01:57:06 +01:00
def listdir(*directories):
try:
return os.listdir(os.path.join(settings.SURVEYS, *directories))
except:
import urllib.request, urllib.parse, urllib.error
url = settings.SURVEYS + reduce(lambda x, y: x + "/" + y, ["listdir"] + list(directories))
folders = urllib.request.urlopen(url.replace("#", "%23")).readlines()
return [folder.rstrip(r"/") for folder in folders]
2011-07-11 02:10:22 +01:00
# handles url or file, so we can refer to a set of scans on another server
def GetListDir(sdir):
res = [ ]
if sdir[:7] == "http://":
assert False, "Not written"
2020-05-24 13:30:39 +01:00
s = urllib.request.urlopen(sdir)
2011-07-11 02:10:22 +01:00
else:
for f in os.listdir(sdir):
if f[0] != ".":
ff = os.path.join(sdir, f)
res.append((f, ff, os.path.isdir(ff)))
return res
def LoadListScansFile(scansfolder):
2011-07-11 02:10:22 +01:00
gld = [ ]
2020-04-27 23:51:41 +01:00
# flatten out any directories in these wallet folders - should not be any
for (fyf, ffyf, fisdiryf) in GetListDir(scansfolder.fpath):
2011-07-11 02:10:22 +01:00
if fisdiryf:
gld.extend(GetListDir(ffyf))
else:
gld.append((fyf, ffyf, fisdiryf))
c=0
2011-07-11 02:10:22 +01:00
for (fyf, ffyf, fisdiryf) in gld:
#assert not fisdiryf, ffyf
if re.search(r"\.(?:png|jpg|jpeg|pdf|svg|gif)(?i)$", fyf):
survexscansingle = SurvexScanSingle(ffile=ffyf, name=fyf, scansfolder=scansfolder)
2011-07-11 02:10:22 +01:00
survexscansingle.save()
c+=1
if c>=10:
2020-06-01 00:42:48 +01:00
print(".", end='')
c = 0
2011-07-11 02:10:22 +01:00
# this iterates through the scans directories (either here or on the remote server)
# and builds up the models we can access later
def LoadListScans():
2015-01-19 22:41:48 +00:00
print(' - Loading Survey Scans')
2015-01-19 22:48:50 +00:00
2011-07-11 02:10:22 +01:00
SurvexScanSingle.objects.all().delete()
ScansFolder.objects.all().delete()
print(' - deleting all scansFolder and scansSingle objects')
2011-07-11 02:10:22 +01:00
# first do the smkhs (large kh survey scans) directory
manyscansfoldersmkhs = ScansFolder(fpath=os.path.join(settings.SURVEY_SCANS, "../surveys/smkhs"), walletname="smkhs")
print("smkhs", end=' ')
if os.path.isdir(manyscansfoldersmkhs.fpath):
manyscansfoldersmkhs.save()
LoadListScansFile(manyscansfoldersmkhs)
2011-07-11 02:10:22 +01:00
# iterate into the surveyscans directory
2020-05-24 13:30:39 +01:00
print(' - ', end=' ')
for f, ff, fisdir in GetListDir(settings.SURVEY_SCANS):
2011-07-11 02:10:22 +01:00
if not fisdir:
continue
# do the year folders
2019-02-24 14:29:14 +00:00
if re.match(r"\d\d\d\d$", f):
2020-05-24 13:30:39 +01:00
print("%s" % f, end=' ')
2011-07-11 02:10:22 +01:00
for fy, ffy, fisdiry in GetListDir(ff):
if fisdiry:
assert fisdiry, ffy
scansfolder = ScansFolder(fpath=ffy, walletname=fy)
scansfolder.save()
LoadListScansFile(scansfolder)
2011-07-11 02:10:22 +01:00
# do the
elif f != "thumbs":
scansfolder = ScansFolder(fpath=ff, walletname=f)
scansfolder.save()
LoadListScansFile(scansfolder)
2011-07-11 02:10:22 +01:00
def FindTunnelScan(tunnelfile, path):
scansfolder, scansfile = None, None
2020-05-24 01:57:06 +01:00
mscansdir = re.search(rb"(\d\d\d\d#X?\d+\w?|1995-96kh|92-94Surveybookkh|1991surveybook|smkhs)/(.*?(?:png|jpg|pdf|jpeg))$", path)
2011-07-11 02:10:22 +01:00
if mscansdir:
scansfolderl = ScansFolder.objects.filter(walletname=mscansdir.group(1))
2011-07-11 02:10:22 +01:00
if len(scansfolderl):
assert len(scansfolderl) == 1
scansfolder = scansfolderl[0]
if scansfolder:
scansfilel = scansfolder.survexscansingle_set.filter(name=mscansdir.group(2))
if len(scansfilel):
if len(scansfilel) > 1:
2020-05-24 13:30:39 +01:00
print("BORK more than one image filename matches filter query. ", scansfilel[0])
print("BORK ", tunnelfile.tunnelpath, path)
print("BORK ", mscansdir.group(1), mscansdir.group(2), len(scansfilel))
#assert len(scansfilel) == 1
2011-07-11 02:10:22 +01:00
scansfile = scansfilel[0]
if scansfolder:
tunnelfile.manyscansfolders.add(scansfolder)
2011-07-11 02:10:22 +01:00
if scansfile:
tunnelfile.survexscans.add(scansfile)
2020-05-24 01:57:06 +01:00
elif path and not re.search(rb"\.(?:png|jpg|pdf|jpeg)$(?i)", path):
2011-07-11 02:10:22 +01:00
name = os.path.split(path)[1]
#print("debug-tunnelfileobjects ", tunnelfile.tunnelpath, path, name)
2011-07-11 02:10:22 +01:00
rtunnelfilel = TunnelFile.objects.filter(tunnelname=name)
if len(rtunnelfilel):
assert len(rtunnelfilel) == 1, ("two paths with name of", path, "need more discrimination coded")
rtunnelfile = rtunnelfilel[0]
#print "ttt", tunnelfile.tunnelpath, path, name, rtunnelfile.tunnelpath
tunnelfile.tunnelcontains.add(rtunnelfile)
tunnelfile.save()
def SetTunnelfileInfo(tunnelfile):
ff = os.path.join(settings.TUNNEL_DATA, tunnelfile.tunnelpath)
tunnelfile.filesize = os.stat(ff)[stat.ST_SIZE]
2020-05-24 13:30:39 +01:00
fin = open(ff,'rb')
2011-07-11 02:10:22 +01:00
ttext = fin.read()
fin.close()
if tunnelfile.filesize <= 0:
2020-05-24 13:30:39 +01:00
print("DEBUG - zero length xml file", ff)
return
2020-05-24 01:57:06 +01:00
mtype = re.search(rb"<(fontcolours|sketch)", ttext)
2011-07-11 02:10:22 +01:00
assert mtype, ff
tunnelfile.bfontcolours = (mtype.group(1)=="fontcolours")
2020-05-24 01:57:06 +01:00
tunnelfile.npaths = len(re.findall(rb"<skpath", ttext))
2011-07-11 02:10:22 +01:00
tunnelfile.save()
# <tunnelxml tunnelversion="version2009-06-21 Matienzo" tunnelproject="ireby" tunneluser="goatchurch" tunneldate="2009-06-29 23:22:17">
# <pcarea area_signal="frame" sfscaledown="12.282584" sfrotatedeg="-90.76982" sfxtrans="11.676667377221136" sfytrans="-15.677173422877454" sfsketch="204description/scans/plan(38).png" sfstyle="" nodeconnzsetrelative="0.0">
2020-05-24 01:57:06 +01:00
for path, style in re.findall(rb'<pcarea area_signal="frame".*?sfsketch="([^"]*)" sfstyle="([^"]*)"', ttext):
2011-07-11 02:10:22 +01:00
FindTunnelScan(tunnelfile, path)
# should also scan and look for survex blocks that might have been included
# and also survex titles as well.
tunnelfile.save()
def LoadTunnelFiles():
tunneldatadir = settings.TUNNEL_DATA
TunnelFile.objects.all().delete()
tunneldirs = [ "" ]
while tunneldirs:
tunneldir = tunneldirs.pop()
for f in os.listdir(os.path.join(tunneldatadir, tunneldir)):
if f[0] == "." or f[-1] == "~":
continue
lf = os.path.join(tunneldir, f)
ff = os.path.join(tunneldatadir, lf)
if os.path.isdir(ff):
tunneldirs.append(lf)
elif f[-4:] == ".xml":
tunnelfile = TunnelFile(tunnelpath=lf, tunnelname=os.path.split(f[:-4])[1])
tunnelfile.save()
for tunnelfile in TunnelFile.objects.all():
SetTunnelfileInfo(tunnelfile)