(.*)$", text, re.DOTALL)
+ ms = [len(m.groups()[0]) for m in [mstar, munstar, mhash, munhash, mitem] if m]
+ def min_(i, l):
+ try:
+ v = i.groups()[0]
+ l.remove(len(v))
+ return len(v) < min(l, 1000000000)
+ except:
+ return False
+ if min_(mstar, ms):
+ lists += "*"
+ pre, val, post = mstar.groups()
+ out += pre + "\n" + lists + " " + val
+ text = post
+ elif min_(mhash, ms):
+ lists += "#"
+ pre, val, post = mhash.groups()
+ out += pre + "\n" + lists + " " + val
+ text = post
+ elif min_(mitem, ms):
+ pre, val, post = mitem.groups()
+ out += "\n" + lists + " " + val
+ text = post
+ elif min_(munstar, ms):
+ lists = lists[:-1]
+ text = munstar.groups()[1]
+ elif min_(munhash, ms):
+ lists.pop()
+ text = munhash.groups()[1]
+ else:
+ out += text
+ text = ""
+ text2 = out
+ while text2:
+ mtag = re.match("^(.*?)<(.*?)>(.*)$", text, re.DOTALL)
+ if mtag:
+ text2 = mtag.groups()[2]
+ print mtag.groups()[1]
+ else:
+ text2 = ""
+ return out
+
+for katArea in ['1623', '1626']:
+ if not models.Area.objects.filter(short_name = katArea):
+ newArea = models.Area(short_name = katArea)
+ save(newArea)
+area1626 = models.Area.objects.filter(short_name = '1626')[0]
+area1623 = models.Area.objects.filter(short_name = '1623')[0]
+
+counter=0
+for line in caveReader :
+ if line[Area] == 'nonexistent':
+ continue
+ entranceLetters=[] #Used in caves that have mulitlple entrances, which are not described on seperate lines
+ if line[MultipleEntrances] == 'yes' or line[MultipleEntrances]=='':
+ args = {}
+ def addToArgs(CSVname, modelName):
+ if line[CSVname]:
+ args[modelName] = html_to_wiki(line[CSVname])
+ addToArgs(KatasterNumber, "kataster_number")
+ addToArgs(KatStatusCode, "kataster_code")
+ addToArgs(UnofficialNumber, "unofficial_number")
+ addToArgs(Name, "official_name")
+ addToArgs(Comment, "notes")
+ addToArgs(Explorers, "explorers")
+ addToArgs(UndergroundDescription, "underground_description")
+ addToArgs(Equipment, "equipment")
+ addToArgs(KatasterStatus, "kataster_status")
+ addToArgs(References, "references")
+ addToArgs(UndergroundCentreLine, "underground_centre_line")
+ addToArgs(UndergroundDrawnSurvey, "survey")
+ addToArgs(Length, "length")
+ addToArgs(Depth, "depth")
+ addToArgs(Extent, "extent")
+ addToArgs(SurvexFile, "survex_file")
+ addToArgs(Notes, "notes")
+
+ newCave = models.Cave(**args)
+ save(newCave)
+
+ if line[Area]:
+ if line[Area] == "1626":
+ newCave.area.add(area1626)
+ else:
+ area = models.Area.objects.filter(short_name = line[Area])
+ if area:
+ newArea = area[0]
+ else:
+ newArea = models.Area(short_name = line[Area], parent = area1623)
+ save(newArea)
+ newCave.area.add(newArea)
+ else:
+ newCave.area.add(area1623)
+
+ save(newCave)
+
+ if line[UnofficialName]:
+ newUnofficialName = models.OtherCaveName(cave = newCave, name = line[UnofficialName])
+ save(newUnofficialName)
+ if line[MultipleEntrances] == '' or \
+ line[MultipleEntrances] == 'entrance' or \
+ line[MultipleEntrances] == 'last entrance':
+ args = {}
+ def addToArgs(CSVname, modelName):
+ if line[CSVname]:
+ args[modelName] = html_to_wiki(line[CSVname])
+ def addToArgsViaDict(CSVname, modelName, dictionary):
+ if line[CSVname]:
+ args[modelName] = dictionary[html_to_wiki(line[CSVname])]
+ addToArgs(EntranceName, 'name')
+ addToArgs(Explorers, 'explorers')
+ addToArgs(Map, 'map_description')
+ addToArgs(Location, 'location_description')
+ addToArgs(Approach, 'approach')
+ addToArgs(EntranceDescription, 'entrance_description')
+ addToArgs(UndergroundDescription, 'underground_description')
+ addToArgs(PhotoOfLocation, 'photo')
+ addToArgsViaDict(Marking, 'marking', {"Paint": "P",
+ "Paint (?)": "P?",
+ "Tag": "T",
+ "Tag (?)": "T?",
+ "Retagged": "R",
+ "Retag": "R",
+ "Spit": "S",
+ "Spit (?)": "S?",
+ "Unmarked": "U",
+ "": "?",
+ })
+ addToArgs(MarkingComment, 'marking_comment')
+ addToArgsViaDict(Findability, 'findability', {"Surveyed": "S",
+ "Lost": "L",
+ "Refindable": "R",
+ "": "?",
+ "?": "?",
+ })
+ addToArgs(FindabilityComment, 'findability_description')
+ addToArgs(Easting, 'easting')
+ addToArgs(Northing, 'northing')
+ addToArgs(Altitude, 'alt')
+ addToArgs(DescriptionOfOtherPoint, 'other_description')
+ def addToArgsSurveyStation(CSVname, modelName):
+ if line[CSVname]:
+ surveyPoint = models.SurveyStation(name = line[CSVname])
+ save(surveyPoint)
+ args[modelName] = html_to_wiki(surveyPoint)
+ addToArgsSurveyStation(TagPoint, 'tag_station')
+ addToArgsSurveyStation(ExactEntrance, 'exact_station')
+ addToArgsSurveyStation(OtherPoint, 'other_station')
+ addToArgs(OtherPoint, 'other_description')
+ if line[GPSpreSA]:
+ addToArgsSurveyStation(GPSpreSA, 'other_station')
+ args['other_description'] = 'pre selective availability GPS'
+ if line[GPSpostSA]:
+ addToArgsSurveyStation(GPSpostSA, 'other_station')
+ args['other_description'] = 'post selective availability GPS'
+ addToArgs(Bearings, 'bearings')
+ newEntrance = models.Entrance(**args)
+ save(newEntrance)
+
+ if line[Entrances]:
+ entrance_letter = line[Entrances]
+ else:
+ entrance_letter = ''
+
+ newCaveAndEntrance = models.CaveAndEntrance(cave = newCave, entrance = newEntrance, entrance_letter = entrance_letter)
+ save(newCaveAndEntrance)
\ No newline at end of file
diff --git a/troggle/parsers/logbooks.py b/troggle/parsers/logbooks.py
new file mode 100644
index 000000000..5c38d4187
--- /dev/null
+++ b/troggle/parsers/logbooks.py
@@ -0,0 +1,197 @@
+#.-*- coding: utf-8 -*-
+
+import settings
+import expo.models as models
+import csv
+import sqlite3
+import re
+import os
+import datetime
+
+# Dave Johnson (Stonker) is hacked -- are there two of this DJ name
+# Dave Collins (Scout) is hacked
+# Letty ten Harkel has middle , tu = timeug or ""name removed
+# the have been removed
+# Dave Milne (Lummat)
+# Ben van Millingen
+# Rebecca Lawson (Becka)
+
+persontab = open(os.path.join(settings.EXPOWEB, "noinfo", "folk.csv"))
+personreader = csv.reader(persontab)
+headers = personreader.next()
+header = dict(zip(headers, range(len(headers))))
+
+
+def LoadExpos():
+ models.Expedition.objects.all().delete()
+ y = models.Expedition(year = "2008", name = "CUCC expo2008")
+ y.save()
+ for year in headers[5:]:
+ y = models.Expedition(year = year, name = "CUCC expo%s" % y)
+ y.save()
+
+def LoadPersons():
+ models.Person.objects.all().delete()
+ models.PersonExpedition.objects.all().delete()
+ expoers2008 = """Edvin Deadman,Kathryn Hopkins,Djuke Veldhuis,Becka Lawson,Julian Todd,Natalie Uomini,Aaron Curtis,Tony Rooke,Ollie Stevens,Frank Tully,Martin Jahnke,Mark Shinwell,Jess Stirrups,Nial Peters,Serena Povia,Olly Madge,Steve Jones,Pete Harley,Eeva Makiranta,Keith Curtis""".split(",")
+ expomissing = set(expoers2008)
+
+ for person in personreader:
+ name = person[header["Name"]]
+ name = re.sub("<.*?>", "", name)
+ lname = name.split()
+ if len(lname) >= 2:
+ firstname, lastname = lname[0], lname[1]
+ else:
+ firstname, lastname = lname[0], ""
+ print firstname, lastname
+ #assert lastname == person[header[""]], person
+ pObject = models.Person(first_name = firstname,
+ last_name = lastname,
+ is_guest = person[header["Guest"]] == "1",
+ is_vfho = person[header["VfHO member"]],
+ mug_shot = person[header["Mugshot"]])
+ pObject.save()
+
+ for year, attended in zip(headers, person)[5:]:
+ yo = models.Expedition.objects.filter(year = year)[0]
+ if attended == "1" or attended == "-1":
+ pyo = models.PersonExpedition(person = pObject, expedition = yo)
+ pyo.save()
+
+ if name in expoers2008:
+ print "2008:", name
+ expomissing.discard(name)
+ yo = models.Expedition.objects.filter(year = "2008")[0]
+ pyo = models.PersonExpedition(person = pObject, expedition = yo)
+ pyo.save()
+
+
+ print expomissing
+ for name in expomissing:
+ firstname, lastname = name.split()
+ pObject = models.Person(first_name = firstname,
+ last_name = lastname,
+ is_guest = name in ["Eeva Makiranta", "Kieth Curtis"],
+ is_vfho = False,
+ mug_shot = "")
+ pObject.save()
+ yo = models.Expedition.objects.filter(year = "2008")[0]
+ pyo = models.PersonExpedition(person = pObject, expedition = yo)
+ pyo.save()
+
+
+#
+# the logbook loading section
+#
+def GetTripPersons(trippeople, expedition):
+ res = [ ]
+ author = None
+ for tripperson in re.split(",|\+|&| and ", trippeople):
+ tripperson = tripperson.strip()
+ mul = re.match("(.*?)$", tripperson)
+ if mul:
+ tripperson = mul.group(1)
+ if tripperson and tripperson[0] != '*':
+ #assert tripperson in personyearmap, "'%s' << %s\n\n %s" % (tripperson, trippeople, personyearmap)
+ personyear = expedition.GetPersonExpedition(tripperson)
+ print personyear
+ res.append(personyear)
+ if mul:
+ author = personyear
+ if not author:
+ author = res[-1]
+ return res, author
+
+def Parselogwikitxt(year, personyearmap, txt):
+ trippara = re.findall("===(.*?)===([\s\S]*?)(?====)", txt)
+ for triphead, triptext in trippara:
+ tripheadp = triphead.split("|")
+ assert len(tripheadp) == 3, tripheadp
+ tripdate, tripplace, trippeople = tripheadp
+ tripsplace = tripplace.split(" - ")
+ tripcave = tripsplace[0]
+
+ tul = re.findall("T/?U:?\s*(\d+(?:\.\d*)?|unknown)\s*(hrs|hours)?", triptext)
+ if tul:
+ #assert len(tul) <= 1, (triphead, triptext)
+ #assert tul[0][1] in ["hrs", "hours"], (triphead, triptext)
+ triptime = tul[0][0]
+ else:
+ triptime = ""
+ #assert tripcave == "Journey", (triphead, triptext)
+
+ assert re.match("\d\d\d\d-\d\d-\d\d", tripdate), tripdate
+ ldate = datetime.date(int(tripdate[:4]), int(tripdate[5:7]), int(tripdate[8:10]))
+ lbo = models.LogbookEntry(date = ldate, cave = tripcave, title = tripsplace[-1], text = triptext, tu = triptime)
+ lbo.save()
+
+ trippersons, author = GetTripPersons(trippeople, personyearmap)
+ for tripperson in trippersons:
+ lbo.cavers.add(tripperson)
+ # add the author
+
+def Parseloghtmltxt(year, expedition, txt):
+ tripparas = re.findall("([\s\S]*?)(?=)?
+ \s*
(.*?)
+ \s*
(.*?)
+ \s*
(.*?)
+ ([\s\S]*?)
+ \s*(?:
(.*?)
)?
+ \s*$
+ ''', trippara)
+ assert s, trippara
+
+ tripid, tripid1, tripdate, trippeople, triptitle, triptext, timeug = s.groups()
+ mdatestandard = re.match("(\d\d\d\d)-(\d\d)-(\d\d)", tripdate)
+ mdategoof = re.match("(\d\d?)/(\d)/(\d\d)", tripdate)
+ if mdatestandard:
+ year, month, day = int(mdatestandard.group(1)), int(mdatestandard.group(2)), int(mdatestandard.group(3))
+ elif mdategoof:
+ day, month, year = int(mdategoof.group(1)), int(mdategoof.group(2)), int(mdategoof.group(3)) + 2000
+ else:
+ assert False, tripdate
+ ldate = datetime.date(year, month, day)
+ #assert tripid[:-1] == "t" + tripdate, (tripid, tripdate)
+ trippersons, author = GetTripPersons(trippeople, expedition)
+ tripcave = ""
+ lbo = models.LogbookEntry(date = ldate, place = tripcave, title = triptitle, text = triptext, author=author)
+ lbo.save()
+ tu = timeug or ""
+
+ for tripperson in trippersons:
+ pto = models.PersonTrip(personexpedition = tripperson, place=tripcave, date=ldate, timeunderground=tu, logbookentry=lbo)
+ pto.save()
+
+
+
+def LoadLogbooks():
+ models.LogbookEntry.objects.all().delete()
+ expowebbase = os.path.join(settings.EXPOWEB, "years") # this could be a url
+ yearlinks = [
+# ("2008", "2008/logbook/2008logbook.txt"),
+# ("2007", "2007/logbook/2007logbook.txt"),
+# ("2005", "2005/logbook.html"),
+ ("2004", "2004/logbook.html"),
+# ("2003", "2003/logbook.html"),
+ ]
+
+ for year, lloc in yearlinks:
+ expedition = models.Expedition.objects.filter(year = year)[0]
+ fin = open(os.path.join(expowebbase, lloc))
+ txt = fin.read()
+ fin.close()
+ #print personyearmap
+ if year >= "2007":
+ Parselogwikitxt(year, personyearmap, txt)
+ else:
+ Parseloghtmltxt(year, expedition, txt)
+
+# command line run through the loading stages
+LoadExpos()
+LoadPersons()
+LoadLogbooks()
+
+
diff --git a/troggle/parsers/survex.py b/troggle/parsers/survex.py
new file mode 100644
index 000000000..0f75e068e
--- /dev/null
+++ b/troggle/parsers/survex.py
@@ -0,0 +1,31 @@
+import settings
+import expo.models as models
+import re
+import os
+
+def readFile(filename):
+ for line in fileIterator(settings.SURVEX_DATA, filename):
+ print line
+
+re_include_extension = re.compile(r"^\s*\*include\s+([^\s]*).svx$", re.IGNORECASE)
+re_include_no_extension = re.compile(r"^\s*\*include\s+([^\s]*)$", re.IGNORECASE)
+
+def fileIterator(directory, filename):
+ f = open(os.path.join(directory, filename + ".svx"), "rb")
+ for line in f.readlines():
+ include_extension = re_include_extension.match(line)
+ include_no_extension = re_include_no_extension.match(line)
+ def a(include):
+ link = re.split(r"/|\\", include)
+ print os.path.join(directory, *link[:-1]), link[-1]
+ return fileIterator(os.path.join(directory, *link[:-1]), link[-1])
+ if include_extension:
+ for b in a(include_extension.groups()[0]):
+ yield b
+ elif include_no_extension:
+ for b in a(include_no_extension.groups()[0]):
+ yield b
+ else:
+ yield line
+
+readFile("all")
\ No newline at end of file
diff --git a/troggle/settings.py b/troggle/settings.py
new file mode 100644
index 000000000..7a830502f
--- /dev/null
+++ b/troggle/settings.py
@@ -0,0 +1,84 @@
+from localsettings import *
+# Django settings for troggle2 project.
+
+DEBUG = True
+TEMPLATE_DEBUG = DEBUG
+
+ADMINS = (
+ # ('Your Name', 'your_email@domain.com'),
+)
+
+MANAGERS = ADMINS
+
+# Local time zone for this installation. Choices can be found here:
+# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
+# although not all choices may be available on all operating systems.
+# If running in a Windows environment this must be set to the same as your
+# system time zone.
+TIME_ZONE = 'Europe/London'
+
+# Language code for this installation. All choices can be found here:
+# http://www.i18nguy.com/unicode/language-identifiers.html
+LANGUAGE_CODE = 'en-uk'
+
+SITE_ID = 1
+
+# If you set this to False, Django will make some optimizations so as not
+# to load the internationalization machinery.
+USE_I18N = True
+
+# Absolute path to the directory that holds media.
+# Example: "/home/media/media.lawrence.com/"
+MEDIA_ROOT = '/media-admin/'
+
+# URL that handles the media served from MEDIA_ROOT. Make sure to use a
+# trailing slash if there is a path component (optional in other cases).
+# Examples: "http://media.lawrence.com", "http://example.com/media/"
+MEDIA_URL = 'http://127.0.0.1:8000/site_media/'
+
+SVX_URL = 'http://127.0.0.1:8000/troggle/survex/'
+
+# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
+# trailing slash.
+# Examples: "http://foo.com/media/", "/media/".
+ADMIN_MEDIA_PREFIX = '/media-admin/'
+
+APPEND_SLASH = False
+SMART_APPEND_SLASH = True
+
+# Make this unique, and don't share it with anybody.
+SECRET_KEY = 'a#vaeozn0)uz_9t_%v5n#tj)m+%ace6b_0(^fj!355qki*v)j2'
+
+# List of callables that know how to import templates from various sources.
+TEMPLATE_LOADERS = (
+ 'django.template.loaders.filesystem.load_template_source',
+ 'django.template.loaders.app_directories.load_template_source',
+# 'django.template.loaders.eggs.load_template_source',
+)
+
+MIDDLEWARE_CLASSES = (
+ 'django.middleware.common.CommonMiddleware',
+ 'django.contrib.sessions.middleware.SessionMiddleware',
+ 'django.contrib.auth.middleware.AuthenticationMiddleware',
+ 'django.contrib.redirects.middleware.RedirectFallbackMiddleware',
+ 'troggle.middleware.SmartAppendSlashMiddleware'
+)
+
+ROOT_URLCONF = 'troggle.urls'
+
+TEMPLATE_DIRS = (
+ "templates"
+ # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
+ # Always use forward slashes, even on Windows.
+ # Don't forget to use absolute paths, not relative paths.
+)
+
+INSTALLED_APPS = (
+ 'django.contrib.admin',
+ 'django.contrib.auth',
+ 'django.contrib.contenttypes',
+ 'django.contrib.sessions',
+ 'django.contrib.sites',
+ 'django.contrib.redirects',
+ 'troggle.expo'
+)
diff --git a/troggle/templates/base.html b/troggle/templates/base.html
new file mode 100644
index 000000000..cb84590e9
--- /dev/null
+++ b/troggle/templates/base.html
@@ -0,0 +1,12 @@
+
+
+
+
+
+ {% block title %}{% endblock %}
+
+
+ {% block content %}{% endblock %}
+ {% block footer %}{% endblock %}
+
+
\ No newline at end of file
diff --git a/troggle/templates/cave.html b/troggle/templates/cave.html
new file mode 100644
index 000000000..a043e7f1a
--- /dev/null
+++ b/troggle/templates/cave.html
@@ -0,0 +1,76 @@
+{% extends "base.html" %}
+{% load wiki_markup %}
+
+{% block title %}{{ cave.official_name|wiki_to_html }}{% endblock %}
+
+{% block content %}
+