2009-05-22 06:17:24 +01:00
|
|
|
import sys, os, types, logging
|
2009-05-13 06:23:57 +01:00
|
|
|
#sys.path.append('C:\\Expo\\expoweb')
|
|
|
|
#from troggle import *
|
|
|
|
#os.environ['DJANGO_SETTINGS_MODULE']='troggle.settings'
|
2009-09-10 22:07:31 +01:00
|
|
|
import settings
|
|
|
|
from core.models import *
|
2009-05-13 06:23:57 +01:00
|
|
|
from PIL import Image
|
2009-05-13 05:27:43 +01:00
|
|
|
#import settings
|
2009-07-02 20:43:18 +01:00
|
|
|
#import core.models as models
|
2009-05-13 05:27:43 +01:00
|
|
|
import csv
|
|
|
|
import re
|
|
|
|
import datetime
|
2009-07-03 05:31:49 +01:00
|
|
|
from utils import save_carefully
|
2009-05-21 19:47:19 +01:00
|
|
|
|
|
|
|
def get_or_create_placeholder(year):
|
|
|
|
""" All surveys must be related to a logbookentry. We don't have a way to
|
|
|
|
automatically figure out which survey went with which logbookentry,
|
|
|
|
so we create a survey placeholder logbook entry for each year. This
|
|
|
|
function always returns such a placeholder, and creates it if it doesn't
|
|
|
|
exist yet.
|
|
|
|
"""
|
|
|
|
lookupAttribs={'date__year':int(year), 'title':"placeholder for surveys",}
|
|
|
|
nonLookupAttribs={'text':"surveys temporarily attached to this should be re-attached to their actual trips", 'date':datetime.date(int(year),1,1)}
|
|
|
|
placeholder_logbook_entry, newly_created = save_carefully(LogbookEntry, lookupAttribs, nonLookupAttribs)
|
|
|
|
return placeholder_logbook_entry
|
2009-05-13 05:27:43 +01:00
|
|
|
|
2009-09-11 23:56:47 +01:00
|
|
|
# dead
|
2009-05-22 06:17:24 +01:00
|
|
|
def readSurveysFromCSV():
|
2009-08-29 18:08:55 +01:00
|
|
|
try: # could probably combine these two
|
|
|
|
surveytab = open(os.path.join(settings.SURVEY_SCANS, "Surveys.csv"))
|
2009-05-13 06:22:53 +01:00
|
|
|
except IOError:
|
2009-08-29 18:08:55 +01:00
|
|
|
import cStringIO, urllib
|
|
|
|
surveytab = cStringIO.StringIO(urllib.urlopen(settings.SURVEY_SCANS + "/Surveys.csv").read())
|
2009-05-13 06:22:53 +01:00
|
|
|
dialect=csv.Sniffer().sniff(surveytab.read())
|
|
|
|
surveytab.seek(0,0)
|
|
|
|
surveyreader = csv.reader(surveytab,dialect=dialect)
|
|
|
|
headers = surveyreader.next()
|
|
|
|
header = dict(zip(headers, range(len(headers)))) #set up a dictionary where the indexes are header names and the values are column numbers
|
2009-05-13 05:27:43 +01:00
|
|
|
|
2009-05-13 06:22:53 +01:00
|
|
|
# test if the expeditions have been added yet
|
|
|
|
if Expedition.objects.count()==0:
|
|
|
|
print "There are no expeditions in the database. Please run the logbook parser."
|
|
|
|
sys.exit()
|
2009-05-13 06:23:57 +01:00
|
|
|
|
2009-05-22 06:17:24 +01:00
|
|
|
|
|
|
|
logging.info("Deleting all scanned images")
|
2009-05-13 06:22:53 +01:00
|
|
|
ScannedImage.objects.all().delete()
|
2009-05-13 06:23:57 +01:00
|
|
|
|
2009-05-22 06:17:24 +01:00
|
|
|
|
|
|
|
logging.info("Deleting all survey objects")
|
2009-05-13 06:22:53 +01:00
|
|
|
Survey.objects.all().delete()
|
2009-05-13 06:23:57 +01:00
|
|
|
|
2009-05-22 06:17:24 +01:00
|
|
|
|
|
|
|
logging.info("Beginning to import surveys from "+str(os.path.join(settings.SURVEYS, "Surveys.csv"))+"\n"+"-"*60+"\n")
|
2009-05-13 06:23:57 +01:00
|
|
|
|
2009-05-13 06:22:53 +01:00
|
|
|
for survey in surveyreader:
|
2009-05-21 19:47:19 +01:00
|
|
|
#I hate this, but some surveys have a letter eg 2000#34a. The next line deals with that.
|
|
|
|
walletNumberLetter = re.match(r'(?P<number>\d*)(?P<letter>[a-zA-Z]*)',survey[header['Survey Number']])
|
2009-05-13 06:22:53 +01:00
|
|
|
# print walletNumberLetter.groups()
|
2009-05-21 19:47:19 +01:00
|
|
|
year=survey[header['Year']]
|
2009-05-13 05:58:18 +01:00
|
|
|
|
2009-05-21 19:47:19 +01:00
|
|
|
|
2009-05-13 06:22:53 +01:00
|
|
|
surveyobj = Survey(
|
2009-05-21 19:47:19 +01:00
|
|
|
expedition = Expedition.objects.filter(year=year)[0],
|
2009-05-13 06:22:53 +01:00
|
|
|
wallet_number = walletNumberLetter.group('number'),
|
2009-05-21 19:47:19 +01:00
|
|
|
logbook_entry = get_or_create_placeholder(year),
|
2009-05-13 06:22:53 +01:00
|
|
|
comments = survey[header['Comments']],
|
|
|
|
location = survey[header['Location']]
|
|
|
|
)
|
|
|
|
surveyobj.wallet_letter = walletNumberLetter.group('letter')
|
|
|
|
if survey[header['Finished']]=='Yes':
|
|
|
|
#try and find the sketch_scan
|
|
|
|
pass
|
|
|
|
surveyobj.save()
|
2009-05-13 06:23:57 +01:00
|
|
|
|
2009-05-22 06:17:24 +01:00
|
|
|
|
|
|
|
logging.info("added survey " + survey[header['Year']] + "#" + surveyobj.wallet_number + "\r")
|
2009-05-13 05:58:18 +01:00
|
|
|
|
2009-09-11 23:56:47 +01:00
|
|
|
# dead
|
2009-05-13 05:58:18 +01:00
|
|
|
def listdir(*directories):
|
|
|
|
try:
|
|
|
|
return os.listdir(os.path.join(settings.SURVEYS, *directories))
|
|
|
|
except:
|
|
|
|
import urllib
|
|
|
|
url = settings.SURVEYS + reduce(lambda x, y: x + "/" + y, ["listdir"] + list(directories))
|
|
|
|
folders = urllib.urlopen(url.replace("#", "%23")).readlines()
|
|
|
|
return [folder.rstrip(r"/") for folder in folders]
|
|
|
|
|
2009-05-13 05:27:43 +01:00
|
|
|
# add survey scans
|
2009-05-13 06:23:57 +01:00
|
|
|
def parseSurveyScans(year, logfile=None):
|
2009-05-13 06:22:53 +01:00
|
|
|
# yearFileList = listdir(year.year)
|
2009-08-29 18:08:55 +01:00
|
|
|
yearPath=os.path.join(settings.SURVEY_SCANS, "years", year.year)
|
2009-05-13 06:22:53 +01:00
|
|
|
yearFileList=os.listdir(yearPath)
|
|
|
|
print yearFileList
|
2009-05-13 05:52:15 +01:00
|
|
|
for surveyFolder in yearFileList:
|
2009-05-13 05:27:43 +01:00
|
|
|
try:
|
|
|
|
surveyNumber=re.match(r'\d\d\d\d#0*(\d+)',surveyFolder).groups()
|
2009-05-13 06:22:53 +01:00
|
|
|
# scanList = listdir(year.year, surveyFolder)
|
|
|
|
scanList=os.listdir(os.path.join(yearPath,surveyFolder))
|
2009-05-13 05:27:43 +01:00
|
|
|
except AttributeError:
|
2009-05-13 05:52:59 +01:00
|
|
|
print surveyFolder + " ignored",
|
2009-05-13 05:27:43 +01:00
|
|
|
continue
|
|
|
|
|
|
|
|
for scan in scanList:
|
|
|
|
try:
|
2009-05-13 05:31:21 +01:00
|
|
|
scanChopped=re.match(r'(?i).*(notes|elev|plan|elevation|extend)(\d*)\.(png|jpg|jpeg)',scan).groups()
|
2009-05-13 05:27:43 +01:00
|
|
|
scanType,scanNumber,scanFormat=scanChopped
|
|
|
|
except AttributeError:
|
2009-05-13 06:22:53 +01:00
|
|
|
print scan + " ignored \r",
|
2009-05-13 05:27:43 +01:00
|
|
|
continue
|
2009-05-13 06:22:53 +01:00
|
|
|
if scanType == 'elev' or scanType == 'extend':
|
|
|
|
scanType = 'elevation'
|
2009-05-13 05:31:21 +01:00
|
|
|
|
2009-05-13 05:27:43 +01:00
|
|
|
if scanNumber=='':
|
|
|
|
scanNumber=1
|
|
|
|
|
|
|
|
if type(surveyNumber)==types.TupleType:
|
|
|
|
surveyNumber=surveyNumber[0]
|
|
|
|
try:
|
2009-05-21 19:47:19 +01:00
|
|
|
placeholder=get_or_create_placeholder(year=int(year.year))
|
|
|
|
survey=Survey.objects.get_or_create(wallet_number=surveyNumber, expedition=year, defaults={'logbook_entry':placeholder})[0]
|
2009-05-13 06:22:53 +01:00
|
|
|
except Survey.MultipleObjectsReturned:
|
|
|
|
survey=Survey.objects.filter(wallet_number=surveyNumber, expedition=year)[0]
|
2009-05-13 06:23:57 +01:00
|
|
|
file=os.path.join(year.year, surveyFolder, scan)
|
2009-05-13 06:22:53 +01:00
|
|
|
scanObj = ScannedImage(
|
2009-05-13 06:23:57 +01:00
|
|
|
file=file,
|
2009-05-13 05:27:43 +01:00
|
|
|
contents=scanType,
|
|
|
|
number_in_wallet=scanNumber,
|
2009-05-13 06:23:57 +01:00
|
|
|
survey=survey,
|
|
|
|
new_since_parsing=False,
|
2009-05-13 05:27:43 +01:00
|
|
|
)
|
2009-05-13 05:52:15 +01:00
|
|
|
#print "Added scanned image at " + str(scanObj)
|
2009-05-13 06:23:57 +01:00
|
|
|
if scanFormat=="png":
|
|
|
|
if isInterlacedPNG(os.path.join(settings.SURVEY_SCANS,file)):
|
2009-05-19 06:32:42 +01:00
|
|
|
print file + " is an interlaced PNG. No can do."
|
2009-05-13 06:23:57 +01:00
|
|
|
continue
|
2009-05-13 05:27:43 +01:00
|
|
|
scanObj.save()
|
2009-05-13 06:22:53 +01:00
|
|
|
|
2009-09-11 23:56:47 +01:00
|
|
|
# dead
|
2009-05-13 06:23:57 +01:00
|
|
|
def parseSurveys(logfile=None):
|
2009-05-13 06:22:53 +01:00
|
|
|
readSurveysFromCSV()
|
|
|
|
for year in Expedition.objects.filter(year__gte=2000): #expos since 2000, because paths and filenames were nonstandard before then
|
|
|
|
parseSurveyScans(year)
|
2009-05-13 06:23:57 +01:00
|
|
|
|
2009-09-11 23:56:47 +01:00
|
|
|
# dead
|
2009-05-13 06:23:57 +01:00
|
|
|
def isInterlacedPNG(filePath): #We need to check for interlaced PNGs because the thumbnail engine can't handle them (uses PIL)
|
|
|
|
file=Image.open(filePath)
|
2009-05-15 03:56:11 +01:00
|
|
|
print filePath
|
|
|
|
if 'interlace' in file.info:
|
|
|
|
return file.info['interlace']
|
|
|
|
else:
|
2009-08-29 18:08:55 +01:00
|
|
|
return False
|
2009-09-10 22:07:31 +01:00
|
|
|
|
|
|
|
|
2009-09-11 23:56:47 +01:00
|
|
|
# handles url or file, so we can refer to a set of scans on another server
|
2009-09-10 22:07:31 +01:00
|
|
|
def GetListDir(sdir):
|
|
|
|
res = [ ]
|
|
|
|
if sdir[:7] == "http://":
|
2009-09-11 23:56:47 +01:00
|
|
|
assert False, "Not written"
|
2009-09-10 22:07:31 +01:00
|
|
|
s = urllib.urlopen(sdir)
|
|
|
|
else:
|
|
|
|
for f in os.listdir(sdir):
|
|
|
|
if f[0] != ".":
|
|
|
|
ff = os.path.join(sdir, f)
|
|
|
|
res.append((f, ff, os.path.isdir(ff)))
|
|
|
|
return res
|
|
|
|
|
|
|
|
# this iterates through the scans directories (either here or on the remote server)
|
|
|
|
# and builds up the models we can access later
|
|
|
|
def LoadListScans(surveyscansdir):
|
|
|
|
SurvexScanSingle.objects.all().delete()
|
|
|
|
SurvexScansFolder.objects.all().delete()
|
|
|
|
|
|
|
|
for f, ff, fisdir in GetListDir(surveyscansdir):
|
|
|
|
if not fisdir:
|
|
|
|
continue
|
|
|
|
|
|
|
|
# do the year folders
|
|
|
|
if re.match("\d\d\d\d$", f):
|
|
|
|
for fy, ffy, fisdiry in GetListDir(ff):
|
|
|
|
assert fisdiry, ffy
|
|
|
|
survexscansfolder = SurvexScansFolder(fpath=ffy, walletname=fy)
|
|
|
|
survexscansfolder.save()
|
|
|
|
for fyf, ffyf, fisdiryf in GetListDir(ffy):
|
|
|
|
assert not fisdiryf, ffyf
|
|
|
|
survexscansingle = SurvexScanSingle(ffile=ffyf, name=fyf, survexscansfolder=survexscansfolder)
|
|
|
|
survexscansingle.save()
|
|
|
|
elif f != "thumbs":
|
|
|
|
survexscansfolder = SurvexScansFolder(fpath=ff, walletname=f)
|
|
|
|
survexscansfolder.save()
|
|
|
|
gld = [ ]
|
|
|
|
|
|
|
|
# flatten out any directories in these book files
|
|
|
|
for (fyf, ffyf, fisdiryf) in GetListDir(ff):
|
|
|
|
if fisdiryf:
|
|
|
|
gld.extend(GetListDir(ffyf))
|
|
|
|
else:
|
|
|
|
gld.append((fyf, ffyf, fisdiryf))
|
|
|
|
|
|
|
|
for (fyf, ffyf, fisdiryf) in gld:
|
|
|
|
assert not fisdiryf, ffyf
|
|
|
|
survexscansingle = SurvexScanSingle(ffile=ffyf, name=fyf, survexscansfolder=survexscansfolder)
|
|
|
|
survexscansingle.save()
|
|
|
|
|
|
|
|
|
2009-09-11 23:56:47 +01:00
|
|
|
|
|
|
|
def LoadTunnelFiles(tunneldatadir):
|
|
|
|
TunnelFile.objects.all().delete()
|
|
|
|
tunneldirs = [ "" ]
|
|
|
|
while tunneldirs:
|
|
|
|
tunneldir = tunneldirs.pop()
|
|
|
|
for f in os.listdir(os.path.join(tunneldatadir, tunneldir)):
|
|
|
|
if f[0] == "." or f[-1] == "~":
|
|
|
|
continue
|
|
|
|
lf = os.path.join(tunneldir, f)
|
|
|
|
ff = os.path.join(tunneldatadir, lf)
|
|
|
|
if os.path.isdir(ff):
|
|
|
|
tunneldirs.append(lf)
|
|
|
|
elif f[-4:] == ".xml":
|
|
|
|
fin = open(ff)
|
|
|
|
mtype = re.search("<(fontcolours|sketch)", fin.read(200))
|
|
|
|
assert mtype, lf
|
|
|
|
fin.close()
|
|
|
|
tunnelfile = TunnelFile(tunnelpath=lf, bfontcolours=(mtype.group(1)=="fontcolours"))
|
|
|
|
tunnelfile.save()
|
|
|
|
|
|
|
|
|
|
|
|
# survexscans = models.ManyToManyField("SurvexScanSingle")
|
|
|
|
# survexblocks = models.ManyToManyField("SurvexBlock")
|
|
|
|
# tunnelcontains = models.ManyToManyField("TunnelFile") # case when its a frame type
|
|
|
|
|
|
|
|
|