troggle-unchained/databaseReset.py

344 lines
13 KiB
Python
Raw Normal View History

2011-07-11 02:10:22 +01:00
import os
import time
import timeit
2011-07-11 02:10:22 +01:00
import settings
os.environ['PYTHONPATH'] = settings.PYTHON_PATH
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
2011-07-11 02:10:22 +01:00
from django.core import management
from django.db import connection
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from troggle.core.models import Cave, Entrance
import troggle.flatpages.models
2020-04-16 20:36:42 +01:00
import json
2011-07-11 02:10:22 +01:00
databasename=settings.DATABASES['default']['NAME']
expouser=settings.EXPOUSER
expouserpass=settings.EXPOUSERPASS
2015-07-01 01:26:04 +01:00
expouseremail=settings.EXPOUSER_EMAIL
2011-07-11 02:10:22 +01:00
def reload_db():
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3':
2011-07-11 02:10:22 +01:00
try:
os.remove(databasename)
2011-07-11 02:10:22 +01:00
except OSError:
pass
2019-02-24 14:29:14 +00:00
else:
2011-07-11 02:10:22 +01:00
cursor = connection.cursor()
cursor.execute("DROP DATABASE %s" % databasename)
cursor.execute("CREATE DATABASE %s" % databasename)
cursor.execute("ALTER DATABASE %s CHARACTER SET=utf8" % databasename)
cursor.execute("USE %s" % databasename)
2011-07-11 02:10:22 +01:00
management.call_command('syncdb', interactive=False)
user = User.objects.create_user(expouser, expouseremail, expouserpass)
2011-07-11 02:10:22 +01:00
user.is_staff = True
user.is_superuser = True
user.save()
def syncuser():
"""Sync user - needed after reload"""
management.call_command('syncdb', interactive=False)
user = User.objects.create_user(expouser, expouseremail, expouserpass)
user.is_staff = True
user.is_superuser = True
user.save()
2011-07-11 02:10:22 +01:00
def make_dirs():
"""Make directories that troggle requires"""
#should also deal with permissions here.
if not os.path.isdir(settings.PHOTOS_ROOT):
os.mkdir(settings.PHOTOS_ROOT)
def import_caves():
import parsers.caves
print("Importing Caves")
parsers.caves.readcaves()
2011-07-11 02:10:22 +01:00
def import_people():
import parsers.people
parsers.people.LoadPersonsExpos()
def import_logbooks():
import parsers.logbooks
parsers.logbooks.LoadLogbooks()
def import_survex():
import parsers.survex
parsers.survex.LoadAllSurvexBlocks()
parsers.survex.LoadPos()
def import_QMs():
import parsers.QMs
2020-04-16 20:36:42 +01:00
# import process itself runs on qm.csv in only 3 caves, not 264!
2020-04-14 20:46:45 +01:00
2011-07-11 02:10:22 +01:00
def import_surveys():
import parsers.surveys
parsers.surveys.parseSurveys(logfile=settings.LOGFILE)
def import_surveyscans():
import parsers.surveys
parsers.surveys.LoadListScans()
def import_tunnelfiles():
import parsers.surveys
parsers.surveys.LoadTunnelFiles()
2020-04-14 20:46:45 +01:00
def pageredirects():
for oldURL, newURL in [("indxal.htm", reverse("caveindex"))]:
f = troggle.flatpages.models.Redirect(originalURL = oldURL, newURL = newURL)
f.save()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2011-07-11 02:10:22 +01:00
def import_auto_logbooks():
import parsers.logbooks
import os
for pt in troggle.core.models.PersonTrip.objects.all():
2011-07-11 02:10:22 +01:00
pt.delete()
for lbe in troggle.core.models.LogbookEntry.objects.all():
2011-07-11 02:10:22 +01:00
lbe.delete()
for expedition in troggle.core.models.Expedition.objects.all():
directory = os.path.join(settings.EXPOWEB,
"years",
expedition.year,
"autologbook")
2011-07-11 02:10:22 +01:00
for root, dirs, filenames in os.walk(directory):
for filename in filenames:
2019-02-24 14:29:14 +00:00
print(os.path.join(root, filename))
2011-07-11 02:10:22 +01:00
parsers.logbooks.parseAutoLogBookEntry(os.path.join(root, filename))
#Temporary function until definitive source of data transfered.
2011-07-11 02:10:22 +01:00
from django.template.defaultfilters import slugify
from django.template import Context, loader
def dumplogbooks():
def get_name(pe):
if pe.nickname:
return pe.nickname
else:
return pe.person.first_name
for lbe in troggle.core.models.LogbookEntry.objects.all():
2011-07-11 02:10:22 +01:00
dateStr = lbe.date.strftime("%Y-%m-%d")
directory = os.path.join(settings.EXPOWEB,
"years",
2011-07-11 02:10:22 +01:00
lbe.expedition.year,
"autologbook")
if not os.path.isdir(directory):
os.mkdir(directory)
filename = os.path.join(directory,
dateStr + "." + slugify(lbe.title)[:50] + ".html")
if lbe.cave:
2019-02-24 14:29:14 +00:00
print(lbe.cave.reference())
2011-07-11 02:10:22 +01:00
trip = {"title": lbe.title, "html":lbe.text, "cave": lbe.cave.reference(), "caveOrLocation": "cave"}
else:
trip = {"title": lbe.title, "html":lbe.text, "location":lbe.place, "caveOrLocation": "location"}
pts = [pt for pt in lbe.persontrip_set.all() if pt.personexpedition]
persons = [{"name": get_name(pt.personexpedition), "TU": pt.time_underground, "author": pt.is_logbook_entry_author} for pt in pts]
f = open(filename, "wb")
template = loader.get_template('dataformat/logbookentry.html')
context = Context({'trip': trip,
'persons': persons,
'date': dateStr,
'expeditionyear': lbe.expedition.year})
output = template.render(context)
f.write(unicode(output).encode( "utf-8" ))
f.close()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2020-04-16 20:36:42 +01:00
class JobQueue():
"""A list of import operations to run. Always reports times
in the same order.
"""
def __init__(self,run):
self.runlabel = run
self.queue = [] # tuples of (jobname, jobfunction)
self.results = {}
self.results_order=[
"date","runlabel","reload", "caves", "people",
"logbooks", "scans", "QMs", "survex",
"tunnel", "surveys", "test", "makedirs", "redirect", "syncuser" ]
for k in self.results_order:
self.results[k]=[]
2020-04-16 20:36:42 +01:00
self.tfile = "import_profile.json"
self.htmlfile = "profile.html"
2020-04-16 20:36:42 +01:00
#Adding elements to queue - enqueue
def enq(self,label,func):
self.queue.append((label,func))
return True
2020-04-16 20:36:42 +01:00
#Removing the last element from the queue - dequeue
# def deq(self):
# if len(self.queue)>0:
# return self.queue.pop()
# return ("Queue Empty!")
def run(self):
2020-04-16 20:36:42 +01:00
if os.path.isfile(self.tfile):
try:
2020-04-16 20:36:42 +01:00
f = open(self.tfile, "r")
data = json.load(f)
for j in data:
self.results[j] = data[j]
except:
2020-04-16 20:36:42 +01:00
print "FAILURE parsing JSON file %s" % (self.tfile)
# Python bug: https://github.com/ShinNoNoir/twitterwebsearch/issues/12
f.close()
print "** Running job ", self.runlabel
2020-04-16 20:36:42 +01:00
jobstart = time.time()
self.results["date"].append(jobstart)
self.results["runlabel"].append(self.runlabel)
for i in self.queue:
start = time.time()
2020-04-16 20:36:42 +01:00
i[1]() # looks ugly but invokes function passed in the second item in the tuple
duration = time.time()-start
print "\n*- Ended \"", i[0], "\" %.1f seconds" % duration
self.results[i[0]].append(duration)
2020-04-16 20:36:42 +01:00
with open(self.tfile, 'w') as f:
json.dump(self.results, f)
jobend = time.time()
jobduration = jobend-jobstart
print "** Ended all jobs. %.1f seconds" % jobduration
# currently uses django db whatever it was. CHANGE this to explicitly use
# a new sqlite3 db and then import the sql dump of that into the troggle db
# instead of loading directly into the troggle sqlite db.
# in-menmor ":memory:" sqlite is ~ 7x faster and all of troggle can be
# loaded in 6 minutes that way
djconn = django.db.connection
from dump import _iterdump
with open('memdump.sql', 'w') as f:
for line in _iterdump(djconn):
f.write('%s\n' % line.encode("utf8"))
# now import the memory image sql into (to do)
return True
def showprofile(self):
"""Prints out the time it took to run the jobqueue"""
for k in self.results_order:
percen=0
2020-04-16 20:36:42 +01:00
lst = self.results[k]
if k == "runlabel":
r = lst[len(lst)-1]
print '%15s %s' % (k,r)
elif k =="date":
# Calculate dates as days before present to one decimal place
2020-04-16 20:36:42 +01:00
r = lst[len(lst)-1]
if len(lst)>2:
days = (lst[len(lst)-2]-r)/(24*60*60)
print '%15s %8.1f days ago' % (k,days)
elif len(lst)>2:
e = len(lst)-1
percen = 100* (lst[e] - lst[e-1])/lst[e-1]
2020-04-16 20:36:42 +01:00
if abs(percen) >0.1:
print '%15s %8.1f%%' % (k, percen)
else:
print '%15s ' % (k)
return True
2011-07-11 02:10:22 +01:00
def usage():
print("""Usage is 'python databaseReset.py <command> [runlabel]'
where command is:
reset - this is normal usage, clear database and reread everything from files - time-consuming
caves - read in the caves
logbooks - read in just the logbooks
people - read in the people from folk.csv
QMs - read in the QM csv files
reload_db - clear database (delete everything) and make empty tables
scans - NOT the scanned surveynotes ?!
survex - read in the survex files - all the survex blocks
surveys - read in the scanned surveynotes
tunnel - read in the Tunnel files - which scans the surveyscans too
survexpos - just the Pos out of the survex files (not part of reset)
resetend - (archaic?)
writecaves - *disabled* (archaic?)
autologbooks - read in autologbooks (what are these?)
2020-02-19 22:52:00 +00:00
dumplogbooks - write out autologbooks (not working?)
syncuser - needed after reloading database rom SQL backup
test - testing...
and [runlabel] is an optional string identifying this run of the script
in the stored profiling data 'import-profile.json'
""")
2011-07-11 02:10:22 +01:00
if __name__ == "__main__":
import troggle.core.models
2011-07-11 02:10:22 +01:00
import sys
import django
django.setup()
2020-04-14 20:46:45 +01:00
runlabel = sys.argv[len(sys.argv)-1]
jq = JobQueue(runlabel)
if "test" in sys.argv:
2020-04-16 20:36:42 +01:00
jq.enq("reload",reload_db)
jq.enq("makedirs",make_dirs)
jq.enq("caves",import_caves)
jq.enq("survex",import_survex)
jq.enq("surveys",import_surveys)
elif "caves" in sys.argv:
jq.enq("caves",import_caves)
2020-04-14 20:46:45 +01:00
elif "logbooks" in sys.argv:
# management.call_command('syncdb', interactive=False) # this sets the path so that import settings works in import_survex
jq.enq("logbooks",import_logbooks)
elif "people" in sys.argv:
jq.enq("logbooks",import_people)
2011-07-11 02:10:22 +01:00
elif "QMs" in sys.argv:
jq.enq("QMs",import_QMs)
2020-04-14 20:46:45 +01:00
elif "reload_db" in sys.argv:
jq.enq("reload",reload_db)
2011-07-11 02:10:22 +01:00
elif "reset" in sys.argv:
jq.enq("reload",reload_db)
jq.enq("makedirs",make_dirs)
jq.enq("redirect",pageredirects)
jq.enq("caves",import_caves)
jq.enq("people",import_people)
jq.enq("scans",import_surveyscans)
jq.enq("logbooks",import_logbooks)
jq.enq("QMs",import_QMs)
jq.enq("survex",import_survex)
jq.enq("tunnel",import_tunnelfiles)
jq.enq("surveys",import_surveys)
2020-04-14 20:46:45 +01:00
elif "scans" in sys.argv:
jq.enq("scans",import_surveyscans)
2011-07-11 02:10:22 +01:00
elif "survex" in sys.argv:
# management.call_command('syncdb', interactive=False) # this sets the path so that import settings works in import_survex
jq.enq("survex",import_survex)
elif "survexpos" in sys.argv:
# management.call_command('syncdb', interactive=False) # this sets the path so that import settings works in import_survex
import parsers.survex
jq.enq("survexpos",parsers.survex.LoadPos)
2019-02-24 14:29:14 +00:00
elif "surveys" in sys.argv:
jq.enq("surveys",import_surveys)
2020-04-14 20:46:45 +01:00
elif "tunnel" in sys.argv:
jq.enq("tunnel",import_tunnelfiles)
elif "help" in sys.argv:
usage()
2020-04-14 20:46:45 +01:00
elif "resetend" in sys.argv:
jq.enq("QMs",import_QMs)
jq.enq("tunnel",import_tunnelfiles)
jq.enq("surveys",import_surveys)
2020-04-14 20:46:45 +01:00
#import_descriptions() # no longer present
#parse_descriptions() # no longer present
# elif "writeCaves" in sys.argv:
# writeCaves() # no longer present
elif "autologbooks" in sys.argv:
import_auto_logbooks()
elif "dumplogbooks" in sys.argv:
dumplogbooks()
2020-02-21 14:00:33 +00:00
else:
print("%s not recognised" % sys.argv)
usage()
jq.run()
2020-04-16 20:36:42 +01:00
jq.showprofile()