unused code commented out

This commit is contained in:
Philip Sargent 2020-05-24 13:30:39 +01:00
parent a3e564855a
commit 40ad04b79f

View File

@ -1,16 +1,21 @@
import sys, os, types, logging, stat from __future__ import (absolute_import, division,
#sys.path.append('C:\\Expo\\expoweb') print_function, unicode_literals)
#from troggle import *
#os.environ['DJANGO_SETTINGS_MODULE']='troggle.settings' import sys
import settings import os
from troggle.core.models import * import types
from PIL import Image import logging
#import settings import stat
#import core.models as models
import csv import csv
import re import re
import datetime import datetime
#from PIL import Image
from utils import save_carefully from utils import save_carefully
from functools import reduce
import settings
from troggle.core.models import *
def get_or_create_placeholder(year): def get_or_create_placeholder(year):
""" All surveys must be related to a logbookentry. We don't have a way to """ All surveys must be related to a logbookentry. We don't have a way to
@ -24,146 +29,146 @@ def get_or_create_placeholder(year):
placeholder_logbook_entry, newly_created = save_carefully(LogbookEntry, lookupAttribs, nonLookupAttribs) placeholder_logbook_entry, newly_created = save_carefully(LogbookEntry, lookupAttribs, nonLookupAttribs)
return placeholder_logbook_entry return placeholder_logbook_entry
# dead # obsolete surveys.csv does not exist.
def readSurveysFromCSV(): # def readSurveysFromCSV():
try: # could probably combine these two # try: # could probably combine these two
surveytab = open(os.path.join(settings.SURVEY_SCANS, "Surveys.csv")) # surveytab = open(os.path.join(settings.SURVEY_SCANS, "Surveys.csv"))
except IOError: # except IOError:
import cStringIO, urllib # import io, urllib.request, urllib.parse, urllib.error
surveytab = cStringIO.StringIO(urllib.urlopen(settings.SURVEY_SCANS + "/Surveys.csv").read()) # surveytab = io.StringIO(urllib.request.urlopen(settings.SURVEY_SCANS + "/Surveys.csv").read())
dialect=csv.Sniffer().sniff(surveytab.read()) # dialect=csv.Sniffer().sniff(surveytab.read())
surveytab.seek(0,0) # surveytab.seek(0,0)
surveyreader = csv.reader(surveytab,dialect=dialect) # surveyreader = csv.reader(surveytab,dialect=dialect)
headers = surveyreader.next() # headers = next(surveyreader)
header = dict(zip(headers, range(len(headers)))) #set up a dictionary where the indexes are header names and the values are column numbers # header = dict(list(zip(headers, list(range(len(headers)))))) #set up a dictionary where the indexes are header names and the values are column numbers
# test if the expeditions have been added yet # # test if the expeditions have been added yet
if Expedition.objects.count()==0: # if Expedition.objects.count()==0:
print("There are no expeditions in the database. Please run the logbook parser.") # print("There are no expeditions in the database. Please run the logbook parser.")
sys.exit() # sys.exit()
logging.info("Deleting all scanned images") # logging.info("Deleting all scanned images")
ScannedImage.objects.all().delete() # ScannedImage.objects.all().delete()
logging.info("Deleting all survey objects") # logging.info("Deleting all survey objects")
Survey.objects.all().delete() # Survey.objects.all().delete()
logging.info("Beginning to import surveys from "+str(os.path.join(settings.SURVEYS, "Surveys.csv"))+"\n"+"-"*60+"\n") # logging.info("Beginning to import surveys from "+str(os.path.join(settings.SURVEYS, "Surveys.csv"))+"\n"+"-"*60+"\n")
for survey in surveyreader: # for survey in surveyreader:
#I hate this, but some surveys have a letter eg 2000#34a. The next line deals with that. # #I hate this, but some surveys have a letter eg 2000#34a. The next line deals with that.
walletNumberLetter = re.match(r'(?P<number>\d*)(?P<letter>[a-zA-Z]*)',survey[header['Survey Number']]) # walletNumberLetter = re.match(r'(?P<number>\d*)(?P<letter>[a-zA-Z]*)',survey[header['Survey Number']])
# print(walletNumberLetter.groups()) # # print(walletNumberLetter.groups())
year=survey[header['Year']] # year=survey[header['Year']]
surveyobj = Survey( # surveyobj = Survey(
expedition = Expedition.objects.filter(year=year)[0], # expedition = Expedition.objects.filter(year=year)[0],
wallet_number = walletNumberLetter.group('number'), # wallet_number = walletNumberLetter.group('number'),
logbook_entry = get_or_create_placeholder(year), # logbook_entry = get_or_create_placeholder(year),
comments = survey[header['Comments']], # comments = survey[header['Comments']],
location = survey[header['Location']] # location = survey[header['Location']]
) # )
surveyobj.wallet_letter = walletNumberLetter.group('letter') # surveyobj.wallet_letter = walletNumberLetter.group('letter')
if survey[header['Finished']]=='Yes': # if survey[header['Finished']]=='Yes':
#try and find the sketch_scan # #try and find the sketch_scan
pass # pass
surveyobj.save() # surveyobj.save()
logging.info("added survey " + survey[header['Year']] + "#" + surveyobj.wallet_number + "\r") # logging.info("added survey " + survey[header['Year']] + "#" + surveyobj.wallet_number + "\r")
# dead # dead
def listdir(*directories): def listdir(*directories):
try: try:
return os.listdir(os.path.join(settings.SURVEYS, *directories)) return os.listdir(os.path.join(settings.SURVEYS, *directories))
except: except:
import urllib import urllib.request, urllib.parse, urllib.error
url = settings.SURVEYS + reduce(lambda x, y: x + "/" + y, ["listdir"] + list(directories)) url = settings.SURVEYS + reduce(lambda x, y: x + "/" + y, ["listdir"] + list(directories))
folders = urllib.urlopen(url.replace("#", "%23")).readlines() folders = urllib.request.urlopen(url.replace("#", "%23")).readlines()
return [folder.rstrip(r"/") for folder in folders] return [folder.rstrip(r"/") for folder in folders]
# add survey scans # add survey scans
def parseSurveyScans(expedition, logfile=None): # def parseSurveyScans(expedition, logfile=None):
# yearFileList = listdir(expedition.year) # # yearFileList = listdir(expedition.year)
try: # try:
yearPath=os.path.join(settings.SURVEY_SCANS, "surveyscans", expedition.year) # yearPath=os.path.join(settings.SURVEY_SCANS, "surveyscans", expedition.year)
yearFileList=os.listdir(yearPath) # yearFileList=os.listdir(yearPath)
print(yearFileList) # print(yearFileList)
for surveyFolder in yearFileList: # for surveyFolder in yearFileList:
try: # try:
surveyNumber=re.match(r'\d\d\d\d#(X?)0*(\d+)',surveyFolder).groups() # surveyNumber=re.match(rb'\d\d\d\d#(X?)0*(\d+)',surveyFolder).groups()
#scanList = listdir(expedition.year, surveyFolder) # #scanList = listdir(expedition.year, surveyFolder)
scanList=os.listdir(os.path.join(yearPath,surveyFolder)) # scanList=os.listdir(os.path.join(yearPath,surveyFolder))
except AttributeError: # except AttributeError:
print("Ignoring file in year folder: " + surveyFolder + "\r") # print(("Ignoring file in year folder: " + surveyFolder + "\r"))
continue # continue
for scan in scanList: # for scan in scanList:
# Why does this insist on renaming all the scanned image files? # # Why does this insist on renaming all the scanned image files?
# It produces duplicates names and all images have type .jpg in the scanObj. # # It produces duplicates names and all images have type .jpg in the scanObj.
# It seems to rely on end users being particularly diligent in filenames which is NGtH # # It seems to rely on end users being particularly diligent in filenames which is NGtH
try: # try:
#scanChopped=re.match(r'(?i).*(notes|elev|plan|extend|elevation)-?(\d*)\.(png|jpg|jpeg|pdf)',scan).groups() # #scanChopped=re.match(rb'(?i).*(notes|elev|plan|extend|elevation)-?(\d*)\.(png|jpg|jpeg|pdf)',scan).groups()
scanChopped=re.match(r'(?i)([a-z_-]*\d?[a-z_-]*)(\d*)\.(png|jpg|jpeg|pdf|top|dxf|svg|tdr|th2|xml|txt)',scan).groups() # scanChopped=re.match(rb'(?i)([a-z_-]*\d?[a-z_-]*)(\d*)\.(png|jpg|jpeg|pdf|top|dxf|svg|tdr|th2|xml|txt)',scan).groups()
scanType,scanNumber,scanFormat=scanChopped # scanType,scanNumber,scanFormat=scanChopped
except AttributeError: # except AttributeError:
print("Ignored (bad name format): " + surveyFolder + '/' + scan + "\r") # print(("Ignored (bad name format): " + surveyFolder + '/' + scan + "\r"))
continue # continue
scanTest = scanType # scanTest = scanType
scanType = 'notes' # scanType = 'notes'
match = re.search(r'(?i)(elev|extend)',scanTest) # match = re.search(rb'(?i)(elev|extend)',scanTest)
if match: # if match:
scanType = 'elevation' # scanType = 'elevation'
match = re.search(r'(?i)(plan)',scanTest) # match = re.search(rb'(?i)(plan)',scanTest)
if match: # if match:
scanType = 'plan' # scanType = 'plan'
if scanNumber=='': # if scanNumber=='':
scanNumber=1 # scanNumber=1
if type(surveyNumber)==types.TupleType: # if isinstance(surveyNumber, tuple):
surveyLetter=surveyNumber[0] # surveyLetter=surveyNumber[0]
surveyNumber=surveyNumber[1] # surveyNumber=surveyNumber[1]
try: # try:
placeholder=get_or_create_placeholder(year=int(expedition.year)) # placeholder=get_or_create_placeholder(year=int(expedition.year))
survey=Survey.objects.get_or_create(wallet_number=surveyNumber, wallet_letter=surveyLetter, expedition=expedition, defaults={'logbook_entry':placeholder})[0] # survey=Survey.objects.get_or_create(wallet_number=surveyNumber, wallet_letter=surveyLetter, expedition=expedition, defaults={'logbook_entry':placeholder})[0]
except Survey.MultipleObjectsReturned: # except Survey.MultipleObjectsReturned:
survey=Survey.objects.filter(wallet_number=surveyNumber, wallet_letter=surveyLetter, expedition=expedition)[0] # survey=Survey.objects.filter(wallet_number=surveyNumber, wallet_letter=surveyLetter, expedition=expedition)[0]
file_=os.path.join(yearPath, surveyFolder, scan) # file_=os.path.join(yearPath, surveyFolder, scan)
scanObj = ScannedImage( # scanObj = ScannedImage(
file=file_, # file=file_,
contents=scanType, # contents=scanType,
number_in_wallet=scanNumber, # number_in_wallet=scanNumber,
survey=survey, # survey=survey,
new_since_parsing=False, # new_since_parsing=False,
) # )
print("Added scanned image at " + str(scanObj)) # print(("Added scanned image at " + str(scanObj)))
#if scanFormat=="png": # #if scanFormat=="png":
#if isInterlacedPNG(os.path.join(settings.SURVEY_SCANS, "surveyscans", file_)): # #if isInterlacedPNG(os.path.join(settings.SURVEY_SCANS, "surveyscans", file_)):
# print file_+ " is an interlaced PNG. No can do." # # print file_+ " is an interlaced PNG. No can do."
#continue # #continue
scanObj.save() # scanObj.save()
except (IOError, OSError): # except (IOError, OSError):
yearPath=os.path.join(settings.SURVEY_SCANS, "surveyscans", expedition.year) # yearPath=os.path.join(settings.SURVEY_SCANS, "surveyscans", expedition.year)
print(" ! No folder found for " + expedition.year + " at:- " + yearPath) # print((" ! No folder found for " + expedition.year + " at:- " + yearPath))
# dead # dead
def parseSurveys(logfile=None): # def parseSurveys(logfile=None):
try: # try:
readSurveysFromCSV() # readSurveysFromCSV()
except (IOError, OSError): # except (IOError, OSError):
print(" ! Survey CSV not found..") # print(" ! Survey CSV not found..")
pass # pass
print " - Loading scans by expedition year" # print(" - Loading scans by expedition year")
for expedition in Expedition.objects.filter(year__gte=2000): #expos since 2000, because paths and filenames were nonstandard before then # for expedition in Expedition.objects.filter(year__gte=2000): #expos since 2000, because paths and filenames were nonstandard before then
print "%s" % expedition, # print("%s" % expedition, end=' ')
parseSurveyScans(expedition) # parseSurveyScans(expedition)
# dead # dead
def isInterlacedPNG(filePath): #We need to check for interlaced PNGs because the thumbnail engine can't handle them (uses PIL) def isInterlacedPNG(filePath): #We need to check for interlaced PNGs because the thumbnail engine can't handle them (uses PIL)
@ -180,7 +185,7 @@ def GetListDir(sdir):
res = [ ] res = [ ]
if sdir[:7] == "http://": if sdir[:7] == "http://":
assert False, "Not written" assert False, "Not written"
s = urllib.urlopen(sdir) s = urllib.request.urlopen(sdir)
else: else:
for f in os.listdir(sdir): for f in os.listdir(sdir):
if f[0] != ".": if f[0] != ".":
@ -223,14 +228,14 @@ def LoadListScans():
# iterate into the surveyscans directory # iterate into the surveyscans directory
print ' - ', print(' - ', end=' ')
for f, ff, fisdir in GetListDir(os.path.join(settings.SURVEY_SCANS, "surveyscans")): for f, ff, fisdir in GetListDir(os.path.join(settings.SURVEY_SCANS, "surveyscans")):
if not fisdir: if not fisdir:
continue continue
# do the year folders # do the year folders
if re.match(r"\d\d\d\d$", f): if re.match(r"\d\d\d\d$", f):
print "%s" % f, print("%s" % f, end=' ')
for fy, ffy, fisdiry in GetListDir(ff): for fy, ffy, fisdiry in GetListDir(ff):
if fisdiry: if fisdiry:
assert fisdiry, ffy assert fisdiry, ffy
@ -257,9 +262,9 @@ def FindTunnelScan(tunnelfile, path):
scansfilel = scansfolder.survexscansingle_set.filter(name=mscansdir.group(2)) scansfilel = scansfolder.survexscansingle_set.filter(name=mscansdir.group(2))
if len(scansfilel): if len(scansfilel):
if len(scansfilel) > 1: if len(scansfilel) > 1:
print "BORK more than one image filename matches filter query. ", scansfilel[0] print("BORK more than one image filename matches filter query. ", scansfilel[0])
print "BORK ", tunnelfile.tunnelpath, path print("BORK ", tunnelfile.tunnelpath, path)
print "BORK ", mscansdir.group(1), mscansdir.group(2), len(scansfilel) print("BORK ", mscansdir.group(1), mscansdir.group(2), len(scansfilel))
#assert len(scansfilel) == 1 #assert len(scansfilel) == 1
scansfile = scansfilel[0] scansfile = scansfilel[0]
@ -284,22 +289,22 @@ def FindTunnelScan(tunnelfile, path):
def SetTunnelfileInfo(tunnelfile): def SetTunnelfileInfo(tunnelfile):
ff = os.path.join(settings.TUNNEL_DATA, tunnelfile.tunnelpath) ff = os.path.join(settings.TUNNEL_DATA, tunnelfile.tunnelpath)
tunnelfile.filesize = os.stat(ff)[stat.ST_SIZE] tunnelfile.filesize = os.stat(ff)[stat.ST_SIZE]
fin = open(ff) fin = open(ff,'rb')
ttext = fin.read() ttext = fin.read()
fin.close() fin.close()
if tunnelfile.filesize <= 0: if tunnelfile.filesize <= 0:
print "DEBUG - zero length xml file", ff print("DEBUG - zero length xml file", ff)
return return
mtype = re.search("<(fontcolours|sketch)", ttext) mtype = re.search(r"<(fontcolours|sketch)", ttext)
assert mtype, ff assert mtype, ff
tunnelfile.bfontcolours = (mtype.group(1)=="fontcolours") tunnelfile.bfontcolours = (mtype.group(1)=="fontcolours")
tunnelfile.npaths = len(re.findall("<skpath", ttext)) tunnelfile.npaths = len(re.findall(r"<skpath", ttext))
tunnelfile.save() tunnelfile.save()
# <tunnelxml tunnelversion="version2009-06-21 Matienzo" tunnelproject="ireby" tunneluser="goatchurch" tunneldate="2009-06-29 23:22:17"> # <tunnelxml tunnelversion="version2009-06-21 Matienzo" tunnelproject="ireby" tunneluser="goatchurch" tunneldate="2009-06-29 23:22:17">
# <pcarea area_signal="frame" sfscaledown="12.282584" sfrotatedeg="-90.76982" sfxtrans="11.676667377221136" sfytrans="-15.677173422877454" sfsketch="204description/scans/plan(38).png" sfstyle="" nodeconnzsetrelative="0.0"> # <pcarea area_signal="frame" sfscaledown="12.282584" sfrotatedeg="-90.76982" sfxtrans="11.676667377221136" sfytrans="-15.677173422877454" sfsketch="204description/scans/plan(38).png" sfstyle="" nodeconnzsetrelative="0.0">
for path, style in re.findall('<pcarea area_signal="frame".*?sfsketch="([^"]*)" sfstyle="([^"]*)"', ttext): for path, style in re.findall(r'<pcarea area_signal="frame".*?sfsketch="([^"]*)" sfstyle="([^"]*)"', ttext):
FindTunnelScan(tunnelfile, path) FindTunnelScan(tunnelfile, path)
# should also scan and look for survex blocks that might have been included # should also scan and look for survex blocks that might have been included