1 Commits

Author SHA1 Message Date
Sam Wenham
6984f66794 Updates required to move to django 1.8 2019-03-02 14:10:51 +00:00
65 changed files with 1294 additions and 2234 deletions

16
.gitignore vendored
View File

@@ -1,16 +0,0 @@
# use glob syntax
syntax: glob
*.pyc
db*
localsettings.py
*~
parsing_log.txt
troggle
troggle_log.txt
.idea/*
*.orig
media/images/*
.vscode/*
.swp
imagekit-off/

View File

@@ -1,13 +1,13 @@
Troggle is an application for caving expedition data management, originally created for use on Cambridge University Caving Club (CUCC)expeditions and licensed under the GNU Lesser General Public License.
Troggle has been forked into two projects. The original one is maintained by Aron Curtis and is used for Erebus caves. The CUCC variant uses files as the definitive data, not the database and lives at expo.survex.com/troggle.
Troggle has been forked into two projects. The original one is maintained by Aron Curtis and is used for Erebus caves. The CUCC variant uses files as the definitive data, not the database and lives at expo.sruvex.com/troggle.
Troggle setup
==========
Python, Django, and Database setup
-----------------------------------
Troggle requires Django 1.10, and Python 2.7.
Troggle requires Django 1.4 or greater, and any version of Python that works with it.
Install Django with the following command:
apt-get install python-django (on debian/ubuntu)
@@ -18,18 +18,12 @@ If you want to use MySQL or Postgresql, download and install them. However, you
Troggle itself
-------------
Choose a directory where you will keep troggle, and git clone Troggle into it using the following command:
Choose a directory where you will keep troggle, and svn check out Troggle into it using the following command:
git clone git://expo.survex.com/~/troggle
or more reliably
git clone ssh://expo@expo.survex.com/home/expo/troggle
svn co http://troggle.googlecode.com/svn/
Running in development
----------------------
The simplest way to run Troggle in development is through the docker-compose setup
See the docker folder in the repo for details
If you want to work on the source code and be able to commit, your account will need to be added to the troggle project members list. Contact wookey at wookware dot org to get this set up.
If you want to work on the source code and be able to commit, you will need to use https instead of http, and your google account will need to be added to the troggle project members list. Contact aaron dot curtis at cantab dot net to get this set up.
Next, you need to fill in your local settings. Copy either localsettingsubuntu.py or localsettingsserver.py to a new file called localsettings.py. Follow the instructions contained in the file to fill out your settings.
@@ -41,7 +35,7 @@ Run "python databaseReset.py reset" from the troggle directory.
Once troggle is running, you can also log in and then go to "Import / export" data under "admin" on the menu.
Adding a new year/expedition requires adding a column to the
folk/folk.csv table - a year doesn't exist until that is done.
noinfo/folk.csv table - a year doesn't exist until that is done.
Running a Troggle server

View File

@@ -9,12 +9,12 @@ from troggle.core.views_other import downloadLogbook
class TroggleModelAdmin(admin.ModelAdmin):
def save_model(self, request, obj, form, change):
"""overriding admin save to fill the new_since parsing_field"""
obj.new_since_parsing=True
obj.save()
class Media:
js = ('jquery/jquery.min.js','js/QM_helper.js')
@@ -28,10 +28,6 @@ class SurvexBlockAdmin(TroggleModelAdmin):
inlines = (RoleInline,)
class SurvexStationAdmin(TroggleModelAdmin):
search_fields = ('name', 'block__name')
class ScannedImageInline(admin.TabularInline):
model = ScannedImage
extra = 4
@@ -44,7 +40,7 @@ class OtherCaveInline(admin.TabularInline):
class SurveyAdmin(TroggleModelAdmin):
inlines = (ScannedImageInline,)
search_fields = ('expedition__year','wallet_number')
search_fields = ('expedition__year','wallet_number')
class QMsFoundInline(admin.TabularInline):
@@ -52,7 +48,7 @@ class QMsFoundInline(admin.TabularInline):
fk_name='found_by'
fields=('number','grade','location_description','comment')#need to add foreignkey to cave part
extra=1
class PhotoInline(admin.TabularInline):
model = DPhoto
@@ -68,7 +64,7 @@ class PersonTripInline(admin.TabularInline):
#class LogbookEntryAdmin(VersionAdmin):
class LogbookEntryAdmin(TroggleModelAdmin):
prepopulated_fields = {'slug':("title",)}
prepopulated_fields = {'slug':("title",)}
search_fields = ('title','expedition__year')
date_heirarchy = ('date')
inlines = (PersonTripInline, PhotoInline, QMsFoundInline)
@@ -77,12 +73,12 @@ class LogbookEntryAdmin(TroggleModelAdmin):
"all": ("css/troggleadmin.css",)
}
actions=('export_logbook_entries_as_html','export_logbook_entries_as_txt')
def export_logbook_entries_as_html(self, modeladmin, request, queryset):
def export_logbook_entries_as_html(modeladmin, request, queryset):
response=downloadLogbook(request=request, queryset=queryset, extension='html')
return response
def export_logbook_entries_as_txt(self, modeladmin, request, queryset):
def export_logbook_entries_as_txt(modeladmin, request, queryset):
response=downloadLogbook(request=request, queryset=queryset, extension='txt')
return response
@@ -99,11 +95,11 @@ class PersonAdmin(TroggleModelAdmin):
class QMAdmin(TroggleModelAdmin):
search_fields = ('found_by__cave__kataster_number','number','found_by__date')
list_display = ('__unicode__','grade','found_by','ticked_off_by','nearest_station')
list_display = ('__unicode__','grade','found_by','ticked_off_by')
list_display_links = ('__unicode__',)
list_editable = ('found_by','ticked_off_by','grade','nearest_station')
list_editable = ('found_by','ticked_off_by','grade')
list_per_page = 20
raw_id_fields=('found_by','ticked_off_by','nearest_station')
raw_id_fields=('found_by','ticked_off_by')
class PersonExpeditionAdmin(TroggleModelAdmin):
@@ -122,41 +118,37 @@ class EntranceAdmin(TroggleModelAdmin):
admin.site.register(DPhoto)
admin.site.register(Cave, CaveAdmin)
admin.site.register(CaveSlug)
admin.site.register(Area)
#admin.site.register(OtherCaveName)
admin.site.register(CaveAndEntrance)
admin.site.register(NewSubCave)
admin.site.register(CaveDescription)
admin.site.register(Entrance, EntranceAdmin)
admin.site.register(SurvexBlock, SurvexBlockAdmin)
admin.site.register(Expedition)
admin.site.register(Person,PersonAdmin)
admin.site.register(SurvexPersonRole)
admin.site.register(PersonExpedition,PersonExpeditionAdmin)
admin.site.register(LogbookEntry, LogbookEntryAdmin)
#admin.site.register(PersonTrip)
admin.site.register(QM, QMAdmin)
admin.site.register(Survey, SurveyAdmin)
admin.site.register(ScannedImage)
admin.site.register(SurvexStation)
admin.site.register(SurvexDirectory)
admin.site.register(SurvexFile)
admin.site.register(SurvexStation, SurvexStationAdmin)
admin.site.register(SurvexBlock)
admin.site.register(SurvexPersonRole)
admin.site.register(SurvexScansFolder)
admin.site.register(SurvexScanSingle)
admin.site.register(DataIssue)
def export_as_json(modeladmin, request, queryset):
response = HttpResponse(content_type="text/json")
response = HttpResponse(mimetype="text/json")
response['Content-Disposition'] = 'attachment; filename=troggle_output.json'
serializers.serialize("json", queryset, stream=response)
return response
def export_as_xml(modeladmin, request, queryset):
response = HttpResponse(content_type="text/xml")
response = HttpResponse(mimetype="text/xml")
response['Content-Disposition'] = 'attachment; filename=troggle_output.xml'
serializers.serialize("xml", queryset, stream=response)
return response

View File

@@ -15,7 +15,7 @@ def listdir(*path):
for p in os.listdir(root):
if os.path.isdir(os.path.join(root, p)):
l += p + "/\n"
elif os.path.isfile(os.path.join(root, p)):
l += p + "\n"
#Ignore non-files and non-directories
@@ -28,7 +28,7 @@ def listdir(*path):
c = c.replace("#", "%23")
print("FILE: ", settings.FILES + "listdir/" + c)
return urllib.urlopen(settings.FILES + "listdir/" + c).read()
def dirsAsList(*path):
return [d for d in listdir(*path).split("\n") if len(d) > 0 and d[-1] == "/"]

View File

@@ -16,7 +16,7 @@ class CaveForm(ModelForm):
underground_centre_line = forms.CharField(required = False, widget=forms.Textarea())
notes = forms.CharField(required = False, widget=forms.Textarea())
references = forms.CharField(required = False, widget=forms.Textarea())
url = forms.CharField(required = True)
url = forms.CharField(required = True)
class Meta:
model = Cave
exclude = ("filename",)
@@ -24,9 +24,9 @@ class CaveForm(ModelForm):
def clean(self):
if self.cleaned_data.get("kataster_number") == "" and self.cleaned_data.get("unofficial_number") == "":
self._errors["unofficial_number"] = self.error_class(["Either the kataster or unoffical number is required."])
self._errors["unofficial_number"] = self.error_class(["Either the kataster or unoffical number is required."])
if self.cleaned_data.get("kataster_number") != "" and self.cleaned_data.get("official_name") == "":
self._errors["official_name"] = self.error_class(["This field is required when there is a kataster number."])
self._errors["official_name"] = self.error_class(["This field is required when there is a kataster number."])
if self.cleaned_data.get("area") == []:
self._errors["area"] = self.error_class(["This field is required."])
if self.cleaned_data.get("url") and self.cleaned_data.get("url").startswith("/"):
@@ -46,12 +46,12 @@ class EntranceForm(ModelForm):
#underground_centre_line = forms.CharField(required = False, widget=TinyMCE(attrs={'cols': 80, 'rows': 10}))
#notes = forms.CharField(required = False, widget=TinyMCE(attrs={'cols': 80, 'rows': 10}))
#references = forms.CharField(required = False, widget=TinyMCE(attrs={'cols': 80, 'rows': 10}))
other_station = forms.CharField(required=False) # Trying to change this to a single line entry
tag_station = forms.CharField(required=False) # Trying to change this to a single line entry
exact_station = forms.CharField(required=False) # Trying to change this to a single line entry
northing = forms.CharField(required=False) # Trying to change this to a single line entry
easting = forms.CharField(required=False) # Trying to change this to a single line entry
alt = forms.CharField(required=False) # Trying to change this to a single line entry
other_station = forms.CharField(required=False) # Trying to change this to a singl;e line entry
tag_station = forms.CharField(required=False) # Trying to change this to a singl;e line entry
exact_station = forms.CharField(required=False) # Trying to change this to a singl;e line entry
northing = forms.CharField(required=False) # Trying to change this to a singl;e line entry
easting = forms.CharField(required=False) # Trying to change this to a singl;e line entry
alt = forms.CharField(required=False) # Trying to change this to a singl;e line entry
class Meta:
model = Entrance
exclude = ("cached_primary_slug", "filename",)
@@ -82,11 +82,11 @@ class EntranceLetterForm(ModelForm):
# This function returns html-formatted paragraphs for each of the
# wikilink types that are related to this logbookentry. Each paragraph
# contains a list of all of the related wikilinks.
#
#
# Perhaps an admin javascript solution would be better.
# """
# res = ["Please use the following wikilinks, which are related to this logbook entry:"]
#
#
# res.append(r'</p><p style="float: left;"><b>QMs found:</b>')
# for QM in LogbookEntry.instance.QMs_found.all():
# res.append(QM.wiki_link())
@@ -94,12 +94,12 @@ class EntranceLetterForm(ModelForm):
# res.append(r'</p><p style="float: left;"><b>QMs ticked off:</b>')
# for QM in LogbookEntry.instance.QMs_ticked_off.all():
# res.append(QM.wiki_link())
# res.append(r'</p><p style="float: left; "><b>People</b>')
# for persontrip in LogbookEntry.instance.persontrip_set.all():
# res.append(persontrip.wiki_link())
# res.append(r'</p>')
# return string.join(res, r'<br />')
# def __init__(self, *args, **kwargs):
@@ -107,7 +107,7 @@ class EntranceLetterForm(ModelForm):
# self.fields['text'].help_text=self.wikiLinkHints()#
#class CaveForm(forms.Form):
# html = forms.CharField(widget=TinyMCE(attrs={'cols': 80, 'rows': 30}))
# html = forms.CharField(widget=TinyMCE(attrs={'cols': 80, 'rows': 30}))
def getTripForm(expedition):
@@ -118,18 +118,18 @@ def getTripForm(expedition):
caves.sort()
caves = ["-----"] + caves
cave = forms.ChoiceField([(c, c) for c in caves], required=False)
location = forms.CharField(max_length=200, required=False)
location = forms.CharField(max_length=200, required=False)
caveOrLocation = forms.ChoiceField([("cave", "Cave"), ("location", "Location")], widget = forms.widgets.RadioSelect())
html = forms.CharField(widget=TinyMCE(attrs={'cols': 80, 'rows': 30}))
html = forms.CharField(widget=TinyMCE(attrs={'cols': 80, 'rows': 30}))
def clean(self):
print(dir(self))
print dir(self)
if self.cleaned_data.get("caveOrLocation") == "cave" and not self.cleaned_data.get("cave"):
self._errors["cave"] = self.error_class(["This field is required"])
self._errors["cave"] = self.error_class(["This field is required"])
if self.cleaned_data.get("caveOrLocation") == "location" and not self.cleaned_data.get("location"):
self._errors["location"] = self.error_class(["This field is required"])
self._errors["location"] = self.error_class(["This field is required"])
return self.cleaned_data
class PersonTripForm(forms.Form):
names = [get_name(pe) for pe in PersonExpedition.objects.filter(expedition = expedition)]
names.sort()
@@ -141,7 +141,7 @@ def getTripForm(expedition):
PersonTripFormSet = formset_factory(PersonTripForm, extra=1)
return PersonTripFormSet, TripForm
def get_name(pe):
if pe.nickname:
return pe.nickname
@@ -162,18 +162,18 @@ def get_name(pe):
# caves = ["-----"] + caves
# cave = forms.ChoiceField([(c, c) for c in caves], required=False)
# entrance = forms.ChoiceField([("-----", "Please select a cave"), ], required=False)
# qm = forms.ChoiceField([("-----", "Please select a cave"), ], required=False)
# entrance = forms.ChoiceField([("-----", "Please select a cave"), ], required=False)
# qm = forms.ChoiceField([("-----", "Please select a cave"), ], required=False)
# expeditions = [e.year for e in Expedition.objects.all()]
# expeditions.sort()
# expeditions = ["-----"] + expeditions
# expedition = forms.ChoiceField([(e, e) for e in expeditions], required=False)
# expedition = forms.ChoiceField([(e, e) for e in expeditions], required=False)
# logbookentry = forms.ChoiceField([("-----", "Please select an expedition"), ], required=False)
# person = forms.ChoiceField([("-----", "Please select an expedition"), ], required=False)
# logbookentry = forms.ChoiceField([("-----", "Please select an expedition"), ], required=False)
# person = forms.ChoiceField([("-----", "Please select an expedition"), ], required=False)
# survey_point = forms.CharField()

View File

@@ -1,21 +1,21 @@
from imagekit.specs import ImageSpec
from imagekit import processors
class ResizeThumb(processors.Resize):
width = 100
class ResizeThumb(processors.Resize):
width = 100
crop = False
class ResizeDisplay(processors.Resize):
width = 600
#class EnhanceThumb(processors.Adjustment):
width = 600
#class EnhanceThumb(processors.Adjustment):
#contrast = 1.2
#sharpness = 2
class Thumbnail(ImageSpec):
access_as = 'thumbnail_image'
pre_cache = True
processors = [ResizeThumb]
class Thumbnail(ImageSpec):
access_as = 'thumbnail_image'
pre_cache = True
processors = [ResizeThumb]
class Display(ImageSpec):
increment_count = True

View File

@@ -2,14 +2,6 @@ from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from troggle.core.models import Cave
import settings
import os
from django.db import connection
from django.core import management
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from troggle.core.models import Cave, Entrance
import troggle.flatpages.models
databasename=settings.DATABASES['default']['NAME']
expouser=settings.EXPOUSER
@@ -20,13 +12,22 @@ class Command(BaseCommand):
help = 'This is normal usage, clear database and reread everything'
option_list = BaseCommand.option_list + (
make_option('--reset',
make_option('--foo',
action='store_true',
dest='reset',
dest='foo',
default=False,
help='Reset the entier DB from files'),
help='test'),
)
def add_arguments(self, parser):
parser.add_argument(
'--foo',
action='store_true',
dest='foo',
help='Help text',
)
def handle(self, *args, **options):
print(args)
print(options)
@@ -45,8 +46,8 @@ class Command(BaseCommand):
self.import_QMs()
elif "tunnel" in args:
self.import_tunnelfiles()
elif options['reset']:
self.reset(self)
elif "reset" in args:
self.reset()
elif "survex" in args:
self.import_survex()
elif "survexpos" in args:
@@ -60,15 +61,13 @@ class Command(BaseCommand):
self.dumplogbooks()
elif "writeCaves" in args:
self.writeCaves()
elif options['foo']:
self.stdout.write(self.style.WARNING('Tesing....'))
elif "foo" in args:
self.stdout.write('Tesing....')
else:
#self.stdout.write("%s not recognised" % args)
#self.usage(options)
self.stdout.write("poo")
#print(args)
self.stdout.write("%s not recognised" % args)
self.usage(options)
def reload_db(obj):
def reload_db():
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3':
try:
os.remove(databasename)
@@ -87,22 +86,22 @@ class Command(BaseCommand):
user.is_superuser = True
user.save()
def make_dirs(obj):
def make_dirs():
"""Make directories that troggle requires"""
# should also deal with permissions here.
if not os.path.isdir(settings.PHOTOS_ROOT):
os.mkdir(settings.PHOTOS_ROOT)
def import_caves(obj):
def import_caves():
import parsers.caves
print("Importing Caves")
print("importing caves")
parsers.caves.readcaves()
def import_people(obj):
def import_people():
import parsers.people
parsers.people.LoadPersonsExpos()
def import_logbooks(obj):
def import_logbooks():
# The below line was causing errors I didn't understand (it said LOGFILE was a string), and I couldn't be bothered to figure
# what was going on so I just catch the error with a try. - AC 21 May
try:
@@ -113,57 +112,57 @@ class Command(BaseCommand):
import parsers.logbooks
parsers.logbooks.LoadLogbooks()
def import_survex(obj):
def import_survex():
import parsers.survex
parsers.survex.LoadAllSurvexBlocks()
parsers.survex.LoadPos()
def import_QMs(obj):
def import_QMs():
import parsers.QMs
def import_surveys(obj):
def import_surveys():
import parsers.surveys
parsers.surveys.parseSurveys(logfile=settings.LOGFILE)
def import_surveyscans(obj):
def import_surveyscans():
import parsers.surveys
parsers.surveys.LoadListScans()
def import_tunnelfiles(obj):
def import_tunnelfiles():
import parsers.surveys
parsers.surveys.LoadTunnelFiles()
def reset(self, mgmt_obj):
def reset():
""" Wipe the troggle database and import everything from legacy data
"""
self.reload_db()
self.make_dirs()
self.pageredirects()
self.import_caves()
self.import_people()
self.import_surveyscans()
self.import_survex()
self.import_logbooks()
self.import_QMs()
reload_db()
make_dirs()
pageredirects()
import_caves()
import_people()
import_surveyscans()
import_survex()
import_logbooks()
import_QMs()
try:
self.import_tunnelfiles()
import_tunnelfiles()
except:
print("Tunnel files parser broken.")
self.import_surveys()
import_surveys()
def pageredirects(obj):
def pageredirects():
for oldURL, newURL in [("indxal.htm", reverse("caveindex"))]:
f = troggle.flatpages.models.Redirect(originalURL=oldURL, newURL=newURL)
f.save()
def writeCaves(obj):
def writeCaves():
for cave in Cave.objects.all():
cave.writeDataFile()
for entrance in Entrance.objects.all():
entrance.writeDataFile()
def troggle_usage(obj):
def usage(self, parser):
print("""Usage is 'manage.py reset_db <command>'
where command is:
reset - this is normal usage, clear database and reread everything

View File

@@ -1,575 +0,0 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2020-02-18 16:01
from __future__ import unicode_literals
from django.conf import settings
import django.core.files.storage
from django.db import migrations, models
import django.db.models.deletion
import troggle.core.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Area',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('non_public', models.BooleanField(default=False)),
('short_name', models.CharField(max_length=100)),
('name', models.CharField(blank=True, max_length=200, null=True)),
('description', models.TextField(blank=True, null=True)),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Area')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Cave',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('non_public', models.BooleanField(default=False)),
('official_name', models.CharField(max_length=160)),
('kataster_code', models.CharField(blank=True, max_length=20, null=True)),
('kataster_number', models.CharField(blank=True, max_length=10, null=True)),
('unofficial_number', models.CharField(blank=True, max_length=60, null=True)),
('explorers', models.TextField(blank=True, null=True)),
('underground_description', models.TextField(blank=True, null=True)),
('equipment', models.TextField(blank=True, null=True)),
('references', models.TextField(blank=True, null=True)),
('survey', models.TextField(blank=True, null=True)),
('kataster_status', models.TextField(blank=True, null=True)),
('underground_centre_line', models.TextField(blank=True, null=True)),
('notes', models.TextField(blank=True, null=True)),
('length', models.CharField(blank=True, max_length=100, null=True)),
('depth', models.CharField(blank=True, max_length=100, null=True)),
('extent', models.CharField(blank=True, max_length=100, null=True)),
('survex_file', models.CharField(blank=True, max_length=100, null=True)),
('description_file', models.CharField(blank=True, max_length=200, null=True)),
('url', models.CharField(blank=True, max_length=200, null=True)),
('filename', models.CharField(max_length=200)),
('area', models.ManyToManyField(blank=True, to='core.Area')),
],
options={
'ordering': ('kataster_code', 'unofficial_number'),
},
),
migrations.CreateModel(
name='CaveAndEntrance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('entrance_letter', models.CharField(blank=True, max_length=20, null=True)),
('cave', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Cave')),
],
),
migrations.CreateModel(
name='CaveDescription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('non_public', models.BooleanField(default=False)),
('short_name', models.CharField(max_length=50, unique=True)),
('long_name', models.CharField(blank=True, max_length=200, null=True)),
('description', models.TextField(blank=True, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CaveSlug',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField(unique=True)),
('primary', models.BooleanField(default=False)),
('cave', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Cave')),
],
),
migrations.CreateModel(
name='DataIssue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('non_public', models.BooleanField(default=False)),
('date', models.DateTimeField(auto_now_add=True)),
('parser', models.CharField(blank=True, max_length=50, null=True)),
('message', models.CharField(blank=True, max_length=400, null=True)),
],
options={
'ordering': ['date'],
},
),
migrations.CreateModel(
name='DPhoto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('caption', models.CharField(blank=True, max_length=1000, null=True)),
('file', models.ImageField(storage=django.core.files.storage.FileSystemStorage(base_url=b'http://127.0.0.1:8000/photos/', location=b'/expo/expoweb/photos'), upload_to=b'.')),
('is_mugshot', models.BooleanField(default=False)),
('lon_utm', models.FloatField(blank=True, null=True)),
('lat_utm', models.FloatField(blank=True, null=True)),
('contains_cave', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Cave')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Entrance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('non_public', models.BooleanField(default=False)),
('name', models.CharField(blank=True, max_length=100, null=True)),
('entrance_description', models.TextField(blank=True, null=True)),
('explorers', models.TextField(blank=True, null=True)),
('map_description', models.TextField(blank=True, null=True)),
('location_description', models.TextField(blank=True, null=True)),
('approach', models.TextField(blank=True, null=True)),
('underground_description', models.TextField(blank=True, null=True)),
('photo', models.TextField(blank=True, null=True)),
('marking', models.CharField(choices=[(b'P', b'Paint'), (b'P?', b'Paint (?)'), (b'T', b'Tag'), (b'T?', b'Tag (?)'), (b'R', b'Needs Retag'), (b'S', b'Spit'), (b'S?', b'Spit (?)'), (b'U', b'Unmarked'), (b'?', b'Unknown')], max_length=2)),
('marking_comment', models.TextField(blank=True, null=True)),
('findability', models.CharField(blank=True, choices=[(b'?', b'To be confirmed ...'), (b'S', b'Coordinates'), (b'L', b'Lost'), (b'R', b'Refindable')], max_length=1, null=True)),
('findability_description', models.TextField(blank=True, null=True)),
('alt', models.TextField(blank=True, null=True)),
('northing', models.TextField(blank=True, null=True)),
('easting', models.TextField(blank=True, null=True)),
('tag_station', models.TextField(blank=True, null=True)),
('exact_station', models.TextField(blank=True, null=True)),
('other_station', models.TextField(blank=True, null=True)),
('other_description', models.TextField(blank=True, null=True)),
('bearings', models.TextField(blank=True, null=True)),
('url', models.CharField(blank=True, max_length=200, null=True)),
('filename', models.CharField(max_length=200)),
('cached_primary_slug', models.CharField(blank=True, max_length=200, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='EntranceSlug',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField(unique=True)),
('primary', models.BooleanField(default=False)),
('entrance', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Entrance')),
],
),
migrations.CreateModel(
name='Expedition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('non_public', models.BooleanField(default=False)),
('year', models.CharField(max_length=20, unique=True)),
('name', models.CharField(max_length=100)),
],
options={
'ordering': ('-year',),
'get_latest_by': 'year',
},
),
migrations.CreateModel(
name='ExpeditionDay',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('non_public', models.BooleanField(default=False)),
('date', models.DateField()),
('expedition', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Expedition')),
],
options={
'ordering': ('date',),
},
),
migrations.CreateModel(
name='LogbookEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('non_public', models.BooleanField(default=False)),
('date', models.DateTimeField()),
('title', models.CharField(max_length=200)),
('cave_slug', models.SlugField()),
('place', models.CharField(blank=True, help_text=b"Only use this if you haven't chosen a cave", max_length=100, null=True)),
('text', models.TextField()),
('slug', models.SlugField()),
('filename', models.CharField(max_length=200, null=True)),
('entry_type', models.CharField(choices=[(b'wiki', b'Wiki style logbook'), (b'html', b'Html style logbook')], default=b'wiki', max_length=50, null=True)),
('expedition', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Expedition')),
('expeditionday', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.ExpeditionDay')),
],
options={
'ordering': ('-date',),
'verbose_name_plural': 'Logbook Entries',
},
),
migrations.CreateModel(
name='NewSubCave',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('non_public', models.BooleanField(default=False)),
('name', models.CharField(max_length=200, unique=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='OtherCaveName',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('non_public', models.BooleanField(default=False)),
('name', models.CharField(max_length=160)),
('cave', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Cave')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('non_public', models.BooleanField(default=False)),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('fullname', models.CharField(max_length=200)),
('is_vfho', models.BooleanField(default=False, help_text=b'VFHO is the Vereines f&uuml;r H&ouml;hlenkunde in Obersteier, a nearby Austrian caving club.')),
('mug_shot', models.CharField(blank=True, max_length=100, null=True)),
('blurb', models.TextField(blank=True, null=True)),
('orderref', models.CharField(max_length=200)),
('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('orderref',),
'verbose_name_plural': 'People',
},
),
migrations.CreateModel(
name='PersonExpedition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('non_public', models.BooleanField(default=False)),
('slugfield', models.SlugField(blank=True, null=True)),
('is_guest', models.BooleanField(default=False)),
('expo_committee_position', models.CharField(blank=True, choices=[(b'leader', b'Expo leader'), (b'medical', b'Expo medical officer'), (b'treasurer', b'Expo treasurer'), (b'sponsorship', b'Expo sponsorship coordinator'), (b'research', b'Expo research coordinator')], max_length=200, null=True)),
('nickname', models.CharField(blank=True, max_length=100, null=True)),
('expedition', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Expedition')),
('person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Person')),
],
options={
'ordering': ('-expedition',),
},
),
migrations.CreateModel(
name='PersonTrip',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('non_public', models.BooleanField(default=False)),
('time_underground', models.FloatField(help_text=b'In decimal hours')),
('is_logbook_entry_author', models.BooleanField(default=False)),
('logbook_entry', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.LogbookEntry')),
('personexpedition', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.PersonExpedition')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='QM',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('non_public', models.BooleanField(default=False)),
('number', models.IntegerField(help_text=b'this is the sequential number in the year')),
('grade', models.CharField(choices=[(b'A', b'A: Large obvious lead'), (b'B', b'B: Average lead'), (b'C', b'C: Tight unpromising lead'), (b'D', b'D: Dig'), (b'X', b'X: Unclimbable aven')], max_length=1)),
('location_description', models.TextField(blank=True)),
('nearest_station_description', models.CharField(blank=True, max_length=400, null=True)),
('nearest_station_name', models.CharField(blank=True, max_length=200, null=True)),
('area', models.CharField(blank=True, max_length=100, null=True)),
('completion_description', models.TextField(blank=True, null=True)),
('comment', models.TextField(blank=True, null=True)),
('found_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='QMs_found', to='core.LogbookEntry')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ScannedImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('file', models.ImageField(storage=django.core.files.storage.FileSystemStorage(base_url=b'/survey_scans/', location=b'/expo/expofiles/'), upload_to=troggle.core.models.get_scan_path)),
('scanned_on', models.DateField(null=True)),
('contents', models.CharField(choices=[(b'notes', b'notes'), (b'plan', b'plan_sketch'), (b'elevation', b'elevation_sketch')], max_length=20)),
('number_in_wallet', models.IntegerField(null=True)),
('lon_utm', models.FloatField(blank=True, null=True)),
('lat_utm', models.FloatField(blank=True, null=True)),
('scanned_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Person')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='SurvexBlock',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('text', models.TextField()),
('date', models.DateTimeField(blank=True, null=True)),
('begin_char', models.IntegerField()),
('survexpath', models.CharField(max_length=200)),
('totalleglength', models.FloatField()),
('cave', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Cave')),
('expedition', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Expedition')),
('expeditionday', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.ExpeditionDay')),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.SurvexBlock')),
],
options={
'ordering': ('id',),
},
),
migrations.CreateModel(
name='SurvexDirectory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('path', models.CharField(max_length=200)),
('cave', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Cave')),
],
options={
'ordering': ('path',),
},
),
migrations.CreateModel(
name='SurvexEquate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cave', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Cave')),
],
),
migrations.CreateModel(
name='SurvexFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('path', models.CharField(max_length=200)),
('cave', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Cave')),
('survexdirectory', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.SurvexDirectory')),
],
options={
'ordering': ('id',),
},
),
migrations.CreateModel(
name='SurvexLeg',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tape', models.FloatField()),
('compass', models.FloatField()),
('clino', models.FloatField()),
('block', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.SurvexBlock')),
],
),
migrations.CreateModel(
name='SurvexPersonRole',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nrole', models.CharField(blank=True, choices=[(b'insts', b'Instruments'), (b'dog', b'Other'), (b'notes', b'Notes'), (b'pics', b'Pictures'), (b'tape', b'Tape measure'), (b'useless', b'Useless'), (b'helper', b'Helper'), (b'disto', b'Disto'), (b'consultant', b'Consultant')], max_length=200, null=True)),
('personname', models.CharField(max_length=100)),
('expeditionday', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.ExpeditionDay')),
('person', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Person')),
('personexpedition', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.PersonExpedition')),
('persontrip', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.PersonTrip')),
('survexblock', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.SurvexBlock')),
],
),
migrations.CreateModel(
name='SurvexScansFolder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fpath', models.CharField(max_length=200)),
('walletname', models.CharField(max_length=200)),
],
options={
'ordering': ('walletname',),
},
),
migrations.CreateModel(
name='SurvexScanSingle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ffile', models.CharField(max_length=200)),
('name', models.CharField(max_length=200)),
('survexscansfolder', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.SurvexScansFolder')),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='SurvexStation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('x', models.FloatField(blank=True, null=True)),
('y', models.FloatField(blank=True, null=True)),
('z', models.FloatField(blank=True, null=True)),
('block', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.SurvexBlock')),
('equate', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.SurvexEquate')),
],
),
migrations.CreateModel(
name='SurvexTitle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('cave', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Cave')),
('survexblock', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.SurvexBlock')),
],
),
migrations.CreateModel(
name='Survey',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_since_parsing', models.BooleanField(default=False, editable=False)),
('non_public', models.BooleanField(default=False)),
('wallet_number', models.IntegerField(blank=True, null=True)),
('wallet_letter', models.CharField(blank=True, max_length=1, null=True)),
('comments', models.TextField(blank=True, null=True)),
('location', models.CharField(blank=True, max_length=400, null=True)),
('centreline_printed_on', models.DateField(blank=True, null=True)),
('tunnel_file', models.FileField(blank=True, null=True, upload_to=b'surveyXMLfiles')),
('integrated_into_main_sketch_on', models.DateField(blank=True, null=True)),
('rendered_image', models.ImageField(blank=True, null=True, upload_to=b'renderedSurveys')),
('centreline_printed_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='centreline_printed_by', to='core.Person')),
('expedition', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Expedition')),
('integrated_into_main_sketch_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='integrated_into_main_sketch_by', to='core.Person')),
('logbook_entry', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.LogbookEntry')),
('subcave', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.NewSubCave')),
('survex_block', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.SurvexBlock')),
('tunnel_main_sketch', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Survey')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='TunnelFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tunnelpath', models.CharField(max_length=200)),
('tunnelname', models.CharField(max_length=200)),
('bfontcolours', models.BooleanField(default=False)),
('filesize', models.IntegerField(default=0)),
('npaths', models.IntegerField(default=0)),
('survexblocks', models.ManyToManyField(to='core.SurvexBlock')),
('survexscans', models.ManyToManyField(to='core.SurvexScanSingle')),
('survexscansfolders', models.ManyToManyField(to='core.SurvexScansFolder')),
('survextitles', models.ManyToManyField(to='core.SurvexTitle')),
('tunnelcontains', models.ManyToManyField(to='core.TunnelFile')),
],
options={
'ordering': ('tunnelpath',),
},
),
migrations.AddField(
model_name='survexleg',
name='stationfrom',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='stationfrom', to='core.SurvexStation'),
),
migrations.AddField(
model_name='survexleg',
name='stationto',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='stationto', to='core.SurvexStation'),
),
migrations.AddField(
model_name='survexdirectory',
name='primarysurvexfile',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='primarysurvexfile', to='core.SurvexFile'),
),
migrations.AddField(
model_name='survexblock',
name='survexfile',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.SurvexFile'),
),
migrations.AddField(
model_name='survexblock',
name='survexscansfolder',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.SurvexScansFolder'),
),
migrations.AddField(
model_name='scannedimage',
name='survey',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Survey'),
),
migrations.AddField(
model_name='qm',
name='nearest_station',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.SurvexStation'),
),
migrations.AddField(
model_name='qm',
name='ticked_off_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='QMs_ticked_off', to='core.LogbookEntry'),
),
migrations.AddField(
model_name='dphoto',
name='contains_entrance',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='photo_file', to='core.Entrance'),
),
migrations.AddField(
model_name='dphoto',
name='contains_logbookentry',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.LogbookEntry'),
),
migrations.AddField(
model_name='dphoto',
name='contains_person',
field=models.ManyToManyField(blank=True, to='core.Person'),
),
migrations.AddField(
model_name='dphoto',
name='nearest_QM',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.QM'),
),
migrations.AddField(
model_name='cavedescription',
name='linked_entrances',
field=models.ManyToManyField(blank=True, to='core.Entrance'),
),
migrations.AddField(
model_name='cavedescription',
name='linked_qms',
field=models.ManyToManyField(blank=True, to='core.QM'),
),
migrations.AddField(
model_name='cavedescription',
name='linked_subcaves',
field=models.ManyToManyField(blank=True, to='core.NewSubCave'),
),
migrations.AddField(
model_name='caveandentrance',
name='entrance',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Entrance'),
),
]

View File

@@ -17,6 +17,7 @@ getcontext().prec=2 #use 2 significant figures for decimal calculations
from troggle.core.models_survex import *
def get_related_by_wikilinks(wiki_text):
found=re.findall(settings.QM_PATTERN,wiki_text)
res=[]
@@ -27,10 +28,10 @@ def get_related_by_wikilinks(wiki_text):
qm=QM.objects.get(found_by__cave_slug__in = cave_slugs,
found_by__date__year = qmdict['year'],
number = qmdict['number'])
res.append(qm)
res.append(qm)
except QM.DoesNotExist:
print('fail on '+str(wikilink))
return res
try:
@@ -38,7 +39,7 @@ try:
filename=settings.LOGFILE,
filemode='w')
except:
subprocess.call(settings.FIX_PERMISSIONS)
subprocess.call(settings.FIX_PERMISSIONS)
logging.basicConfig(level=logging.DEBUG,
filename=settings.LOGFILE,
filemode='w')
@@ -58,7 +59,7 @@ class TroggleModel(models.Model):
class TroggleImageModel(models.Model):
new_since_parsing = models.BooleanField(default=False, editable=False)
def object_name(self):
return self._meta.object_name
@@ -69,23 +70,23 @@ class TroggleImageModel(models.Model):
class Meta:
abstract = True
#
#
# single Expedition, usually seen by year
#
class Expedition(TroggleModel):
year = models.CharField(max_length=20, unique=True)
name = models.CharField(max_length=100)
def __unicode__(self):
return self.year
class Meta:
ordering = ('-year',)
get_latest_by = 'year'
def get_absolute_url(self):
return urlparse.urljoin(settings.URL_ROOT, reverse('expedition', args=[self.year]))
# construction function. should be moved out
def get_expedition_day(self, date):
expeditiondays = self.expeditionday_set.filter(date=date)
@@ -95,81 +96,77 @@ class Expedition(TroggleModel):
res = ExpeditionDay(expedition=self, date=date)
res.save()
return res
def day_min(self):
res = self.Expeditionday_set.all()
res = self.expeditionday_set.all()
return res and res[0] or None
def day_max(self):
res = self.Expeditionday_set.all()
res = self.expeditionday_set.all()
return res and res[len(res) - 1] or None
class ExpeditionDay(TroggleModel):
expedition = models.ForeignKey("Expedition")
date = models.DateField()
class Meta:
ordering = ('date',)
ordering = ('date',)
def GetPersonTrip(self, personexpedition):
personexpeditions = self.Persontrip_set.filter(expeditionday=self)
personexpeditions = self.persontrip_set.filter(expeditionday=self)
return personexpeditions and personexpeditions[0] or None
def __unicode__(self):
return str(self.expedition) + ' ' + str(self.date)
#
# single Person, can go on many years
#
class Person(TroggleModel):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
fullname = models.CharField(max_length=200)
is_vfho = models.BooleanField(help_text="VFHO is the Vereines f&uuml;r H&ouml;hlenkunde in Obersteier, a nearby Austrian caving club.", default=False)
mug_shot = models.CharField(max_length=100, blank=True,null=True)
blurb = models.TextField(blank=True,null=True)
#href = models.CharField(max_length=200)
orderref = models.CharField(max_length=200) # for alphabetic
orderref = models.CharField(max_length=200) # for alphabetic
#the below have been removed and made methods. I'm not sure what the b in bisnotable stands for. - AC 16 Feb
#notability = models.FloatField() # for listing the top 20 people
#bisnotable = models.BooleanField(default=False)
user = models.OneToOneField(User, null=True, blank=True)
def get_absolute_url(self):
return urlparse.urljoin(settings.URL_ROOT,reverse('person',kwargs={'first_name':self.first_name,'last_name':self.last_name}))
class Meta:
verbose_name_plural = "People"
ordering = ('orderref',) # "Wookey" makes too complex for: ('last_name', 'first_name')
ordering = ('orderref',) # "Wookey" makes too complex for: ('last_name', 'first_name')
def __unicode__(self):
if self.last_name:
return "%s %s" % (self.first_name, self.last_name)
return self.first_name
def notability(self):
notability = Decimal(0)
max_expo_val = 0
max_expo_year = Expedition.objects.all().aggregate(Max('year'))
max_expo_val = int(max_expo_year['year__max']) + 1
for personexpedition in self.personexpedition_set.all():
if not personexpedition.is_guest:
# print(personexpedition.expedition.year)
notability += Decimal(1) / (max_expo_val - int(personexpedition.expedition.year))
notability += Decimal(1) / (2012 - int(personexpedition.expedition.year))
return notability
def bisnotable(self):
return self.notability() > Decimal(1)/Decimal(3)
def surveyedleglength(self):
return sum([personexpedition.surveyedleglength() for personexpedition in self.personexpedition_set.all()])
def first(self):
return self.personexpedition_set.order_by('-expedition')[0]
def last(self):
return self.personexpedition_set.order_by('expedition')[0]
#def Sethref(self):
#if self.last_name:
#self.href = self.first_name.lower() + "_" + self.last_name.lower()
@@ -178,7 +175,7 @@ class Person(TroggleModel):
# self.href = self.first_name.lower()
#self.orderref = self.first_name
#self.notability = 0.0 # set temporarily
#
# Person's attenance to one Expo
@@ -187,8 +184,8 @@ class PersonExpedition(TroggleModel):
expedition = models.ForeignKey(Expedition)
person = models.ForeignKey(Person)
slugfield = models.SlugField(max_length=50,blank=True,null=True)
is_guest = models.BooleanField(default=False)
is_guest = models.BooleanField(default=False)
COMMITTEE_CHOICES = (
('leader','Expo leader'),
('medical','Expo medical officer'),
@@ -198,7 +195,7 @@ class PersonExpedition(TroggleModel):
)
expo_committee_position = models.CharField(blank=True,null=True,choices=COMMITTEE_CHOICES,max_length=200)
nickname = models.CharField(max_length=100,blank=True,null=True)
def GetPersonroles(self):
res = [ ]
for personrole in self.personrole_set.order_by('survexblock'):
@@ -214,8 +211,8 @@ class PersonExpedition(TroggleModel):
def __unicode__(self):
return "%s: (%s)" % (self.person, self.expedition)
#why is the below a function in personexpedition, rather than in person? - AC 14 Feb 09
def name(self):
if self.nickname:
@@ -226,11 +223,11 @@ class PersonExpedition(TroggleModel):
def get_absolute_url(self):
return urlparse.urljoin(settings.URL_ROOT, reverse('personexpedition',kwargs={'first_name':self.person.first_name,'last_name':self.person.last_name,'year':self.expedition.year}))
def surveyedleglength(self):
survexblocks = [personrole.survexblock for personrole in self.personrole_set.all() ]
return sum([survexblock.totalleglength for survexblock in set(survexblocks)])
# would prefer to return actual person trips so we could link to first and last ones
def day_min(self):
res = self.persontrip_set.aggregate(day_min=Min("expeditionday__date"))
@@ -242,30 +239,26 @@ class PersonExpedition(TroggleModel):
#
# Single parsed entry from Logbook
#
#
class LogbookEntry(TroggleModel):
LOGBOOK_ENTRY_TYPES = (
("wiki", "Wiki style logbook"),
("html", "Html style logbook")
)
date = models.DateTimeField()#MJG wants to turn this into a datetime such that multiple Logbook entries on the same day can be ordered.ld()
date = models.DateField()#MJG wants to turn this into a datetime such that multiple Logbook entries on the same day can be ordered.
expeditionday = models.ForeignKey("ExpeditionDay", null=True)#MJG wants to KILL THIS (redundant information)
expedition = models.ForeignKey(Expedition,blank=True,null=True) # yes this is double-
title = models.CharField(max_length=settings.MAX_LOGBOOK_ENTRY_TITLE_LENGTH)
cave_slug = models.SlugField(max_length=50)
place = models.CharField(max_length=100,blank=True,null=True,help_text="Only use this if you haven't chosen a cave")
text = models.TextField()
slug = models.SlugField(max_length=50)
filename = models.CharField(max_length=200,null=True)
entry_type = models.CharField(default="wiki",null=True,choices=LOGBOOK_ENTRY_TYPES,max_length=50)
#author = models.ForeignKey(PersonExpedition,blank=True,null=True) # the person who writes it up doesn't have to have been on the trip.
# Re: the above- so this field should be "typist" or something, not "author". - AC 15 jun 09
#MJG wants to KILL THIS, as it is typically redundant with PersonTrip.is_logbook_entry_author, in the rare it was not redundanty and of actually interest it could be added to the text.
title = models.CharField(max_length=settings.MAX_LOGBOOK_ENTRY_TITLE_LENGTH)
cave_slug = models.SlugField(max_length=50)
place = models.CharField(max_length=100,blank=True,null=True,help_text="Only use this if you haven't chosen a cave")
text = models.TextField()
slug = models.SlugField(max_length=50)
filename = models.CharField(max_length=200,null=True)
class Meta:
verbose_name_plural = "Logbook Entries"
# several PersonTrips point in to this object
ordering = ('-date',)
def __getattribute__(self, item):
if item == "cave": #Allow a logbookentries cave to be directly accessed despite not having a proper foreignkey
return CaveSlug.objects.get(slug = self.cave_slug).cave
@@ -298,7 +291,7 @@ class LogbookEntry(TroggleModel):
if self.cave:
nextQMnumber=self.cave.new_QM_number(self.date.year)
else:
return None
return none
return nextQMnumber
def new_QM_found_link(self):
@@ -308,24 +301,23 @@ class LogbookEntry(TroggleModel):
def DayIndex(self):
return list(self.expeditionday.logbookentry_set.all()).index(self)
#
# Single Person going on a trip, which may or may not be written up (accounts for different T/U for people in same logbook entry)
#
class PersonTrip(TroggleModel):
personexpedition = models.ForeignKey("PersonExpedition",null=True)
#expeditionday = models.ForeignKey("ExpeditionDay")#MJG wants to KILL THIS (redundant information)
#date = models.DateField() #MJG wants to KILL THIS (redundant information)
time_underground = models.FloatField(help_text="In decimal hours")
logbook_entry = models.ForeignKey(LogbookEntry)
is_logbook_entry_author = models.BooleanField(default=False)
# sequencing by person (difficult to solve locally)
#persontrip_next = models.ForeignKey('PersonTrip', related_name='pnext', blank=True,null=True)#MJG wants to KILL THIS (and use funstion persontrip_next_auto)
#persontrip_prev = models.ForeignKey('PersonTrip', related_name='pprev', blank=True,null=True)#MJG wants to KILL THIS(and use funstion persontrip_prev_auto)
def persontrip_next(self):
futurePTs = PersonTrip.objects.filter(personexpedition = self.personexpedition, logbook_entry__date__gt = self.logbook_entry.date).order_by('logbook_entry__date').all()
if len(futurePTs) > 0:
@@ -345,7 +337,7 @@ class PersonTrip(TroggleModel):
def __unicode__(self):
return "%s (%s)" % (self.personexpedition, self.logbook_entry.date)
##########################################
@@ -372,22 +364,19 @@ class CaveAndEntrance(models.Model):
cave = models.ForeignKey('Cave')
entrance = models.ForeignKey('Entrance')
entrance_letter = models.CharField(max_length=20,blank=True,null=True)
def __unicode__(self):
return unicode(self.cave) + unicode(self.entrance_letter)
class CaveSlug(models.Model):
cave = models.ForeignKey('Cave')
slug = models.SlugField(max_length=50, unique = True)
primary = models.BooleanField(default=False)
def __unicode__(self):
return self.slug
class Cave(TroggleModel):
# too much here perhaps,
# too much here perhaps,
official_name = models.CharField(max_length=160)
area = models.ManyToManyField(Area, blank=True)
area = models.ManyToManyField(Area, blank=True, null=True)
kataster_code = models.CharField(max_length=20,blank=True,null=True)
kataster_number = models.CharField(max_length=10,blank=True, null=True)
unofficial_number = models.CharField(max_length=60,blank=True, null=True)
@@ -411,13 +400,13 @@ class Cave(TroggleModel):
#class Meta:
# unique_together = (("area", "kataster_number"), ("area", "unofficial_number"))
# FIXME Kataster Areas and CUCC defined sub areas need seperating
# FIXME Kataster Areas and CUCC defined sub areas need seperating
#href = models.CharField(max_length=100)
class Meta:
ordering = ('kataster_code', 'unofficial_number')
ordering = ('kataster_code', 'unofficial_number')
def hassurvey(self):
if not self.underground_centre_line:
@@ -432,7 +421,7 @@ class Cave(TroggleModel):
if self.survex_file:
return "Yes"
return "Missing"
def slug(self):
primarySlugs = self.caveslug_set.filter(primary = True)
if primarySlugs:
@@ -450,14 +439,14 @@ class Cave(TroggleModel):
return "%s-%s" % (self.kat_area(), self.kataster_number)
else:
return "%s-%s" % (self.kat_area(), self.unofficial_number)
def get_absolute_url(self):
if self.kataster_number:
href = self.kataster_number
elif self.unofficial_number:
href = self.unofficial_number
else:
href = self.official_name.lower()
href = official_name.lower()
#return settings.URL_ROOT + '/cave/' + href + '/'
return urlparse.urljoin(settings.URL_ROOT, reverse('cave',kwargs={'cave_id':href,}))
@@ -465,7 +454,7 @@ class Cave(TroggleModel):
return unicode(self.slug())
def get_QMs(self):
return QM.objects.filter(nearest_station__block__cave__caveslug=self.caveslug_set.all())
return QM.objects.filter(found_by__cave_slug=self.caveslug_set.all())
def new_QM_number(self, year=datetime.date.today().year):
"""Given a cave and the current year, returns the next QM number."""
@@ -479,13 +468,13 @@ class Cave(TroggleModel):
for a in self.area.all():
if a.kat_area():
return a.kat_area()
def entrances(self):
return CaveAndEntrance.objects.filter(cave=self)
def singleentrance(self):
return len(CaveAndEntrance.objects.filter(cave=self)) == 1
def entrancelist(self):
rs = []
res = ""
@@ -513,12 +502,12 @@ class Cave(TroggleModel):
else:
res += "&ndash;" + prevR
return res
def writeDataFile(self):
try:
f = open(os.path.join(settings.CAVEDESCRIPTIONS, self.filename), "w")
except:
subprocess.call(settings.FIX_PERMISSIONS)
subprocess.call(settings.FIX_PERMISSIONS)
f = open(os.path.join(settings.CAVEDESCRIPTIONS, self.filename), "w")
t = loader.get_template('dataformat/cave.xml')
c = Context({'cave': self})
@@ -526,7 +515,7 @@ class Cave(TroggleModel):
u8 = u.encode("utf-8")
f.write(u8)
f.close()
def getArea(self):
areas = self.area.all()
lowestareas = list(areas)
@@ -540,22 +529,20 @@ class Cave(TroggleModel):
def getCaveByReference(reference):
areaname, code = reference.split("-", 1)
#print(areaname, code)
print(areaname, code)
area = Area.objects.get(short_name = areaname)
#print(area)
foundCaves = list(Cave.objects.filter(area = area, kataster_number = code).all()) + list(Cave.objects.filter(area = area, unofficial_number = code).all())
print(area)
foundCaves = list(Cave.objects.filter(area = area, kataster_number = code).all()) + list(Cave.objects.filter(area = area, unofficial_number = code).all())
print(list(foundCaves))
if len(foundCaves) == 1:
return foundCaves[0]
else:
return False
assert len(foundCaves) == 1
return foundCaves[0]
class OtherCaveName(TroggleModel):
name = models.CharField(max_length=160)
cave = models.ForeignKey(Cave)
def __unicode__(self):
return unicode(self.name)
class EntranceSlug(models.Model):
entrance = models.ForeignKey('Entrance')
slug = models.SlugField(max_length=50, unique = True)
@@ -606,35 +593,31 @@ class Entrance(TroggleModel):
def exact_location(self):
return SurvexStation.objects.lookup(self.exact_station)
def other_location(self):
return SurvexStation.objects.lookup(self.other_station)
def find_location(self):
r = {'': 'To be entered ',
'?': 'To be confirmed:',
'?': 'To be confirmed:',
'S': '',
'L': 'Lost:',
'R': 'Refindable:'}[self.findability]
if self.tag_station:
try:
s = SurvexStation.objects.filter(name=self.tag_station)[:1]
s = s[0]
s = SurvexStation.objects.lookup(self.tag_station)
return r + "%0.0fE %0.0fN %0.0fAlt" % (s.x, s.y, s.z)
except:
return r + "%s Tag Station not in dataset" % self.tag_station
if self.exact_station:
try:
s = SurvexStation.objects.filter(name=self.exact_station)[:1]
s = s[0]
s = SurvexStation.objects.lookup(self.exact_station)
return r + "%0.0fE %0.0fN %0.0fAlt" % (s.x, s.y, s.z)
except:
return r + "%s Exact Station not in dataset" % self.tag_station
if self.other_station:
try:
s = SurvexStation.objects.filter(name=self.other_station)[:1]
s = s[0]
s = SurvexStation.objects.lookup(self.other_station)
return r + "%0.0fE %0.0fN %0.0fAlt %s" % (s.x, s.y, s.z, self.other_description)
except:
return r + "%s Other Station not in dataset" % self.tag_station
@@ -669,28 +652,28 @@ class Entrance(TroggleModel):
for f in self.FINDABLE_CHOICES:
if f[0] == self.findability:
return f[1]
def tag(self):
return SurvexStation.objects.lookup(self.tag_station)
def needs_surface_work(self):
return self.findability != "S" or not self.has_photo or self.marking != "T"
def get_absolute_url(self):
ancestor_titles='/'.join([subcave.title for subcave in self.get_ancestors()])
if ancestor_titles:
res = '/'.join((self.get_root().cave.get_absolute_url(), ancestor_titles, self.title))
else:
res = '/'.join((self.get_root().cave.get_absolute_url(), self.title))
return res
def slug(self):
if not self.cached_primary_slug:
primarySlugs = self.entranceslug_set.filter(primary = True)
if primarySlugs:
if primarySlugs:
self.cached_primary_slug = primarySlugs[0].slug
self.save()
else:
@@ -704,7 +687,7 @@ class Entrance(TroggleModel):
try:
f = open(os.path.join(settings.ENTRANCEDESCRIPTIONS, self.filename), "w")
except:
subprocess.call(settings.FIX_PERMISSIONS)
subprocess.call(settings.FIX_PERMISSIONS)
f = open(os.path.join(settings.ENTRANCEDESCRIPTIONS, self.filename), "w")
t = loader.get_template('dataformat/entrance.xml')
c = Context({'entrance': self})
@@ -717,19 +700,19 @@ class CaveDescription(TroggleModel):
short_name = models.CharField(max_length=50, unique = True)
long_name = models.CharField(max_length=200, blank=True, null=True)
description = models.TextField(blank=True,null=True)
linked_subcaves = models.ManyToManyField("NewSubCave", blank=True)
linked_entrances = models.ManyToManyField("Entrance", blank=True)
linked_qms = models.ManyToManyField("QM", blank=True)
linked_subcaves = models.ManyToManyField("NewSubCave", blank=True,null=True)
linked_entrances = models.ManyToManyField("Entrance", blank=True,null=True)
linked_qms = models.ManyToManyField("QM", blank=True,null=True)
def __unicode__(self):
if self.long_name:
return unicode(self.long_name)
else:
return unicode(self.short_name)
def get_absolute_url(self):
return urlparse.urljoin(settings.URL_ROOT, reverse('cavedescription', args=(self.short_name,)))
def save(self):
"""
Overridden save method which stores wikilinks in text as links in database.
@@ -746,26 +729,26 @@ class NewSubCave(TroggleModel):
return unicode(self.name)
class QM(TroggleModel):
# based on qm.csv in trunk/expoweb/1623/204 which has the fields:
# "Number","Grade","Area","Description","Page reference","Nearest station","Completion description","Comment"
#based on qm.csv in trunk/expoweb/1623/204 which has the fields:
#"Number","Grade","Area","Description","Page reference","Nearest station","Completion description","Comment"
found_by = models.ForeignKey(LogbookEntry, related_name='QMs_found',blank=True, null=True )
ticked_off_by = models.ForeignKey(LogbookEntry, related_name='QMs_ticked_off',null=True,blank=True)
# cave = models.ForeignKey(Cave)
# expedition = models.ForeignKey(Expedition)
#cave = models.ForeignKey(Cave)
#expedition = models.ForeignKey(Expedition)
number = models.IntegerField(help_text="this is the sequential number in the year", )
GRADE_CHOICES=(
('A', 'A: Large obvious lead'),
('B', 'B: Average lead'),
('C', 'C: Tight unpromising lead'),
('D', 'D: Dig'),
('X', 'X: Unclimbable aven')
('A', 'A: Large obvious lead'),
('B', 'B: Average lead'),
('C', 'C: Tight unpromising lead'),
('D', 'D: Dig'),
('X', 'X: Unclimbable aven')
)
grade = models.CharField(max_length=1, choices=GRADE_CHOICES)
location_description = models.TextField(blank=True)
#should be a foreignkey to surveystation
nearest_station_description = models.CharField(max_length=400,null=True,blank=True)
nearest_station_name = models.CharField(max_length=200,blank=True,null=True)
nearest_station = models.ForeignKey(SurvexStation,null=True,blank=True)
nearest_station = models.CharField(max_length=200,blank=True,null=True)
area = models.CharField(max_length=100,blank=True,null=True)
completion_description = models.TextField(blank=True,null=True)
comment=models.TextField(blank=True,null=True)
@@ -774,19 +757,11 @@ class QM(TroggleModel):
return u"%s %s" % (self.code(), self.grade)
def code(self):
if self.found_by:
# Old style QMs where found_by is a logbook entry
return u"%s-%s-%s" % (unicode(self.found_by.cave)[6:], self.found_by.date.year, self.number)
elif self.nearest_station:
# New style QMs where QMs are stored in SVX files and nearest station is a forigin key
return u"%s-%s-%s" % (self.nearest_station.block.name, self.nearest_station.name, self.number)
else:
# Just give up!!
return u"%s" % (self.number)
return u"%s-%s-%s" % (unicode(self.found_by.cave)[6:], self.found_by.date.year, self.number)
def get_absolute_url(self):
#return settings.URL_ROOT + '/cave/' + self.found_by.cave.kataster_number + '/' + str(self.found_by.date.year) + '-' + '%02d' %self.number
return urlparse.urljoin(settings.URL_ROOT, reverse('qm',kwargs={'qm_id':self.id}))
return urlparse.urljoin(settings.URL_ROOT, reverse('qm',kwargs={'cave_id':self.found_by.cave.kataster_number,'year':self.found_by.date.year,'qm_id':self.number,'grade':self.grade}))
def get_next_by_id(self):
return QM.objects.get(id=self.id+1)
@@ -798,10 +773,10 @@ class QM(TroggleModel):
return u"%s%s%s" % ('[[QM:',self.code(),']]')
photoFileStorage = FileSystemStorage(location=settings.PHOTOS_ROOT, base_url=settings.PHOTOS_URL)
class DPhoto(TroggleImageModel):
class DPhoto(TroggleImageModel):
caption = models.CharField(max_length=1000,blank=True,null=True)
contains_logbookentry = models.ForeignKey(LogbookEntry,blank=True,null=True)
contains_person = models.ManyToManyField(Person,blank=True)
contains_person = models.ManyToManyField(Person,blank=True,null=True)
file = models.ImageField(storage=photoFileStorage, upload_to='.',)
is_mugshot = models.BooleanField(default=False)
contains_cave = models.ForeignKey(Cave,blank=True,null=True)
@@ -810,12 +785,12 @@ class DPhoto(TroggleImageModel):
nearest_QM = models.ForeignKey(QM,blank=True,null=True)
lon_utm = models.FloatField(blank=True,null=True)
lat_utm = models.FloatField(blank=True,null=True)
class IKOptions:
spec_module = 'core.imagekit_specs'
cache_dir = 'thumbs'
image_field = 'file'
#content_type = models.ForeignKey(ContentType)
#object_id = models.PositiveIntegerField()
#location = generic.GenericForeignKey('content_type', 'object_id')
@@ -853,7 +828,7 @@ class ScannedImage(TroggleImageModel):
#This is an ugly hack to deal with the #s in our survey scan paths. The correct thing is to write a custom file storage backend which calls urlencode on the name for making file.url but not file.path.
def correctURL(self):
return string.replace(self.file.url,r'#',r'%23')
def __unicode__(self):
return get_scan_path(self,'')
@@ -875,9 +850,8 @@ class Survey(TroggleModel):
integrated_into_main_sketch_on = models.DateField(blank=True,null=True)
integrated_into_main_sketch_by = models.ForeignKey('Person' ,related_name='integrated_into_main_sketch_by', blank=True,null=True)
rendered_image = models.ImageField(upload_to='renderedSurveys',blank=True,null=True)
def __unicode__(self):
return self.expedition.year+"#" + "%s%02d" % (self.wallet_letter, int(self.wallet_number))
return self.expedition.year+"#"+"%02d" % int(self.wallet_number)
def notes(self):
return self.scannedimage_set.filter(contents='notes')
@@ -887,14 +861,3 @@ class Survey(TroggleModel):
def elevations(self):
return self.scannedimage_set.filter(contents='elevation')
class DataIssue(TroggleModel):
date = models.DateTimeField(auto_now_add=True, blank=True)
parser = models.CharField(max_length=50, blank=True, null=True)
message = models.CharField(max_length=400, blank=True, null=True)
class Meta:
ordering = ['date']
def __unicode__(self):
return u"%s - %s" % (self.parser, self.message)

View File

@@ -9,7 +9,7 @@ from django.core.urlresolvers import reverse
###########################################################
# These will allow browsing and editing of the survex data
###########################################################
# Needs to add:
# Needs to add:
# Equates
# reloading
@@ -18,37 +18,29 @@ class SurvexDirectory(models.Model):
cave = models.ForeignKey('Cave', blank=True, null=True)
primarysurvexfile = models.ForeignKey('SurvexFile', related_name='primarysurvexfile', blank=True, null=True)
# could also include files in directory but not referenced
def __unicode__(self):
return self.path
class Meta:
ordering = ('path',)
ordering = ('id',)
class SurvexFile(models.Model):
path = models.CharField(max_length=200)
survexdirectory = models.ForeignKey("SurvexDirectory", blank=True, null=True)
cave = models.ForeignKey('Cave', blank=True, null=True)
class Meta:
ordering = ('id',)
def __unicode__(self):
return self.path + '.svx' or 'no file'
def exists(self):
fname = os.path.join(settings.SURVEX_DATA, self.path + ".svx")
return os.path.isfile(fname)
def OpenFile(self):
fname = os.path.join(settings.SURVEX_DATA, self.path + ".svx")
return open(fname)
def SetDirectory(self):
dirpath = os.path.split(self.path)[0]
survexdirectorylist = SurvexDirectory.objects.filter(cave=self.cave, path=dirpath)
# if self.cave is '' or self.cave is None:
# print('No cave set for survex dir %s' % self.path)
if survexdirectorylist:
self.survexdirectory = survexdirectorylist[0]
else:
@@ -67,20 +59,14 @@ class SurvexStationLookUpManager(models.Manager):
name__iexact = stationname)
class SurvexStation(models.Model):
name = models.CharField(max_length=100)
name = models.CharField(max_length=100)
block = models.ForeignKey('SurvexBlock')
equate = models.ForeignKey('SurvexEquate', blank=True, null=True)
objects = SurvexStationLookUpManager()
x = models.FloatField(blank=True, null=True)
y = models.FloatField(blank=True, null=True)
z = models.FloatField(blank=True, null=True)
def __unicode__(self):
if self.block.cave:
# If we haven't got a cave we can't have a slug, saves a nonetype return
return self.block.cave.slug() + '/' + self.block.name + '/' + self.name or 'No station name'
else:
return str(self.block.cave) + '/' + self.block.name + '/' + self.name or 'No station name'
def path(self):
r = self.name
b = self.block
@@ -103,15 +89,15 @@ class SurvexLeg(models.Model):
#
# Single SurvexBlock
#
# Single SurvexBlock
#
class SurvexBlockLookUpManager(models.Manager):
def lookup(self, name):
if name == "":
blocknames = []
else:
blocknames = name.split(".")
block = SurvexBlock.objects.get(parent=None, survexfile__path=settings.SURVEX_TOPNAME)
block = SurvexBlock.objects.get(parent=None, survexfile__path="all")
for blockname in blocknames:
block = SurvexBlock.objects.get(parent=block, name__iexact=blockname)
return block
@@ -122,20 +108,20 @@ class SurvexBlock(models.Model):
parent = models.ForeignKey('SurvexBlock', blank=True, null=True)
text = models.TextField()
cave = models.ForeignKey('Cave', blank=True, null=True)
date = models.DateTimeField(blank=True, null=True)
date = models.DateField(blank=True, null=True)
expeditionday = models.ForeignKey("ExpeditionDay", null=True)
expedition = models.ForeignKey('Expedition', blank=True, null=True)
survexfile = models.ForeignKey("SurvexFile", blank=True, null=True)
begin_char = models.IntegerField() # code for where in the survex data files this block sits
survexpath = models.CharField(max_length=200) # the path for the survex stations
survexscansfolder = models.ForeignKey("SurvexScansFolder", null=True)
survexscansfolder = models.ForeignKey("SurvexScansFolder", null=True)
#refscandir = models.CharField(max_length=100)
totalleglength = models.FloatField()
class Meta:
ordering = ('id',)
@@ -144,7 +130,7 @@ class SurvexBlock(models.Model):
def __unicode__(self):
return self.name and unicode(self.name) or 'no name'
def GetPersonroles(self):
res = [ ]
for personrole in self.personrole_set.order_by('personexpedition'):
@@ -163,10 +149,10 @@ class SurvexBlock(models.Model):
ss = SurvexStation(name=name, block=self)
ss.save()
return ss
def DayIndex(self):
return list(self.expeditionday.survexblock_set.all()).index(self)
class SurvexTitle(models.Model):
survexblock = models.ForeignKey('SurvexBlock')
@@ -191,45 +177,39 @@ ROLE_CHOICES = (
class SurvexPersonRole(models.Model):
survexblock = models.ForeignKey('SurvexBlock')
nrole = models.CharField(choices=ROLE_CHOICES, max_length=200, blank=True, null=True)
# increasing levels of precision
# increasing levels of precision
personname = models.CharField(max_length=100)
person = models.ForeignKey('Person', blank=True, null=True)
personexpedition = models.ForeignKey('PersonExpedition', blank=True, null=True)
persontrip = models.ForeignKey('PersonTrip', blank=True, null=True)
persontrip = models.ForeignKey('PersonTrip', blank=True, null=True)
expeditionday = models.ForeignKey("ExpeditionDay", null=True)
def __unicode__(self):
return unicode(self.person) + " - " + unicode(self.survexblock) + " - " + unicode(self.nrole)
class SurvexScansFolder(models.Model):
fpath = models.CharField(max_length=200)
walletname = models.CharField(max_length=200)
class Meta:
ordering = ('walletname',)
def __unicode__(self):
return self.walletname or 'no wallet'
def get_absolute_url(self):
return urlparse.urljoin(settings.URL_ROOT, reverse('surveyscansfolder', kwargs={"path":re.sub("#", "%23", self.walletname)}))
class SurvexScanSingle(models.Model):
ffile = models.CharField(max_length=200)
name = models.CharField(max_length=200)
survexscansfolder = models.ForeignKey("SurvexScansFolder", null=True)
class Meta:
ordering = ('name',)
def __unicode__(self):
return self.survexscansfolder.walletname + '/' + self.name
def get_absolute_url(self):
return urlparse.urljoin(settings.URL_ROOT, reverse('surveyscansingle', kwargs={"path":re.sub("#", "%23", self.survexscansfolder.walletname), "file":self.name}))
class TunnelFile(models.Model):
tunnelpath = models.CharField(max_length=200)
tunnelname = models.CharField(max_length=200)
@@ -241,8 +221,8 @@ class TunnelFile(models.Model):
filesize = models.IntegerField(default=0)
npaths = models.IntegerField(default=0)
survextitles = models.ManyToManyField("SurvexTitle")
class Meta:
ordering = ('tunnelpath',)

View File

@@ -47,6 +47,6 @@ def survex_to_html(value, autoescape=None):
if autoescape:
value = conditional_escape(value)
for regex, sub in regexes:
print(sub)
print sub
value = regex.sub(sub, value)
return mark_safe(value)

View File

@@ -7,6 +7,7 @@ from troggle.core.models import QM, DPhoto, LogbookEntry, Cave
import re, urlparse
register = template.Library()
@register.filter()
def plusone(n):
@@ -76,7 +77,7 @@ def wiki_to_html_short(value, autoescape=None):
if number>1:
return '<h'+num+'>'+matchobj.groups()[1]+'</h'+num+'>'
else:
print('morethanone')
print 'morethanone'
return matchobj.group()
value = re.sub(r"(?m)^(=+)([^=]+)(=+)$",headerrepl,value)
@@ -142,13 +143,13 @@ def wiki_to_html_short(value, autoescape=None):
value = re.sub(photoSrcPattern,photoSrcRepl, value, re.DOTALL)
#make cave links
value = re.sub(r"\[\[\s*cave:([^\s]+)\s*\s*\]\]", r'<a href="%scave/\1/">\1</a>' % settings.URL_ROOT, value, re.DOTALL)
value = re.sub("\[\[\s*cave:([^\s]+)\s*\s*\]\]", r'<a href="%scave/\1/">\1</a>' % settings.URL_ROOT, value, re.DOTALL)
#make people links
value = re.sub(r"\[\[\s*person:(.+)\|(.+)\]\]",r'<a href="%sperson/\1/">\2</a>' % settings.URL_ROOT, value, re.DOTALL)
value = re.sub("\[\[\s*person:(.+)\|(.+)\]\]",r'<a href="%sperson/\1/">\2</a>' % settings.URL_ROOT, value, re.DOTALL)
#make subcave links
value = re.sub(r"\[\[\s*subcave:(.+)\|(.+)\]\]",r'<a href="%ssubcave/\1/">\2</a>' % settings.URL_ROOT, value, re.DOTALL)
value = re.sub("\[\[\s*subcave:(.+)\|(.+)\]\]",r'<a href="%ssubcave/\1/">\2</a>' % settings.URL_ROOT, value, re.DOTALL)
#make cavedescription links
value = re.sub(r"\[\[\s*cavedescription:(.+)\|(.+)\]\]",r'<a href="%scavedescription/\1/">\2</a>' % settings.URL_ROOT, value, re.DOTALL)
value = re.sub("\[\[\s*cavedescription:(.+)\|(.+)\]\]",r'<a href="%scavedescription/\1/">\2</a>' % settings.URL_ROOT, value, re.DOTALL)

View File

@@ -1,6 +1,6 @@
from django.conf import settings
import fileAbstraction
from django.shortcuts import render
from django.shortcuts import render_to_response
from django.http import HttpResponse, Http404
import os, stat
import re
@@ -8,7 +8,7 @@ from troggle.core.models import SurvexScansFolder, SurvexScanSingle, SurvexBlock
import parsers.surveys
import urllib
# inline fileabstraction into here if it's not going to be useful anywhere else
# inline fileabstraction into here if it's not going to be useful anywhere else
# keep things simple and ignore exceptions everywhere for now
@@ -33,7 +33,7 @@ def upload(request, path):
def download(request, path):
#try:
return HttpResponse(fileAbstraction.readFile(path), content_type=getMimeType(path.split(".")[-1]))
#except:
# raise Http404
@@ -49,32 +49,32 @@ extmimetypes = {".txt": "text/plain",
".jpg": "image/jpeg",
".jpeg": "image/jpeg",
}
# dead
def jgtfile(request, f):
fp = os.path.join(settings.SURVEYS, f)
# could also surf through SURVEX_DATA
# directory listing
if os.path.isdir(fp):
listdirfiles = [ ]
listdirdirs = [ ]
for lf in sorted(os.listdir(fp)):
hpath = os.path.join(f, lf) # not absolute path
if lf[0] == "." or lf[-1] == "~":
continue
hpath = hpath.replace("\\", "/") # for windows users
href = hpath.replace("#", "%23") # '#' in file name annoyance
flf = os.path.join(fp, lf)
if os.path.isdir(flf):
nfiles = len([sf for sf in os.listdir(flf) if sf[0] != "."])
listdirdirs.append((href, hpath + "/", nfiles))
else:
listdirfiles.append((href, hpath, os.path.getsize(flf)))
upperdirs = [ ]
lf = f
while lf:
@@ -85,9 +85,9 @@ def jgtfile(request, f):
lf = os.path.split(lf)[0]
upperdirs.append((href, hpath))
upperdirs.append(("", "/"))
return render(request, 'listdir.html', {'file':f, 'listdirfiles':listdirfiles, 'listdirdirs':listdirdirs, 'upperdirs':upperdirs, 'settings': settings})
return render_to_response('listdir.html', {'file':f, 'listdirfiles':listdirfiles, 'listdirdirs':listdirdirs, 'upperdirs':upperdirs, 'settings': settings})
# flat output of file when loaded
if os.path.isfile(fp):
ext = os.path.splitext(fp)[1].lower()
@@ -123,16 +123,16 @@ def SaveImageInDir(name, imgdir, project, fdata, bbinary):
print "*** Making directory", fprojdir
os.path.mkdir(fprojdir)
print "hhh"
fname = os.path.join(fprojdir, name)
print fname, "fff"
fname = UniqueFile(fname)
p2, p1 = os.path.split(fname)
p3, p2 = os.path.split(p2)
p4, p3 = os.path.split(p3)
res = os.path.join(p3, p2, p1)
print "saving file", fname
fout = open(fname, (bbinary and "wb" or "w"))
fout.write(fdata.read())
@@ -163,73 +163,73 @@ def jgtuploadfile(request):
#print ("FFF", request.FILES.values())
message = ""
print "gothere"
return render(request, 'fileupload.html', {'message':message, 'filesuploaded':filesuploaded, 'settings': settings})
return render_to_response('fileupload.html', {'message':message, 'filesuploaded':filesuploaded, 'settings': settings})
def surveyscansfolder(request, path):
#print [ s.walletname for s in SurvexScansFolder.objects.all() ]
survexscansfolder = SurvexScansFolder.objects.get(walletname=urllib.unquote(path))
return render(request, 'survexscansfolder.html', { 'survexscansfolder':survexscansfolder, 'settings': settings })
return render_to_response('survexscansfolder.html', { 'survexscansfolder':survexscansfolder, 'settings': settings })
def surveyscansingle(request, path, file):
survexscansfolder = SurvexScansFolder.objects.get(walletname=urllib.unquote(path))
survexscansingle = SurvexScanSingle.objects.get(survexscansfolder=survexscansfolder, name=file)
return HttpResponse(content=open(survexscansingle.ffile), content_type=getMimeType(path.split(".")[-1]))
#return render(request, 'survexscansfolder.html', { 'survexscansfolder':survexscansfolder, 'settings': settings })
#return render_to_response('survexscansfolder.html', { 'survexscansfolder':survexscansfolder, 'settings': settings })
def surveyscansfolders(request):
survexscansfolders = SurvexScansFolder.objects.all()
return render(request, 'survexscansfolders.html', { 'survexscansfolders':survexscansfolders, 'settings': settings })
return render_to_response('survexscansfolders.html', { 'survexscansfolders':survexscansfolders, 'settings': settings })
def tunneldata(request):
tunnelfiles = TunnelFile.objects.all()
return render(request, 'tunnelfiles.html', { 'tunnelfiles':tunnelfiles, 'settings': settings })
return render_to_response('tunnelfiles.html', { 'tunnelfiles':tunnelfiles, 'settings': settings })
def tunnelfile(request, path):
tunnelfile = TunnelFile.objects.get(tunnelpath=urllib.unquote(path))
tfile = os.path.join(settings.TUNNEL_DATA, tunnelfile.tunnelpath)
return HttpResponse(content=open(tfile), content_type="text/plain")
def tunnelfileupload(request, path):
tunnelfile = TunnelFile.objects.get(tunnelpath=urllib.unquote(path))
tfile = os.path.join(settings.TUNNEL_DATA, tunnelfile.tunnelpath)
project, user, password, tunnelversion = request.POST["tunnelproject"], request.POST["tunneluser"], request.POST["tunnelpassword"], request.POST["tunnelversion"]
print (project, user, tunnelversion)
assert len(request.FILES.values()) == 1, "only one file to upload"
uploadedfile = request.FILES.values()[0]
if uploadedfile.field_name != "sketch":
return HttpResponse(content="Error: non-sketch file uploaded", content_type="text/plain")
if uploadedfile.content_type != "text/plain":
return HttpResponse(content="Error: non-plain content type", content_type="text/plain")
# could use this to add new files
if os.path.split(path)[1] != uploadedfile.name:
if os.path.split(path)[1] != uploadedfile.name:
return HttpResponse(content="Error: name disagrees", content_type="text/plain")
orgsize = tunnelfile.filesize # = os.stat(tfile)[stat.ST_SIZE]
ttext = uploadedfile.read()
# could check that the user and projects agree here
fout = open(tfile, "w")
fout.write(ttext)
fout.close()
# redo its settings of
# redo its settings of
parsers.surveys.SetTunnelfileInfo(tunnelfile)
tunnelfile.save()
uploadedfile.close()
message = "File size %d overwritten with size %d" % (orgsize, tunnelfile.filesize)
return HttpResponse(content=message, content_type="text/plain")

View File

@@ -10,10 +10,11 @@ from troggle.helper import login_required_if_public
from django.forms.models import modelformset_factory
from django import forms
from django.core.urlresolvers import reverse
from utils import render_with_context # see views_logbooks for explanation on this.
from django.http import HttpResponse, HttpResponseRedirect
from django.conf import settings
import re, urlparse
from django.shortcuts import get_object_or_404, render
from django.shortcuts import get_object_or_404
import settings
@@ -29,7 +30,7 @@ def getCave(cave_id):
return cave
def pad5(x):
return "0" * (5 -len(x.group(0))) + x.group(0)
return "0" * (5 -len(x.group(0))) + x.group(0)
def padnumber(x):
return re.sub("\d+", pad5, x)
def numericalcmp(x, y):
@@ -37,7 +38,7 @@ def numericalcmp(x, y):
def caveCmp(x, y):
def caveCmp(x, y):
if x.kataster_number:
if y.kataster_number:
return numericalcmp(x.kataster_number, y.kataster_number) # Note that cave kataster numbers are not generally integers.
@@ -46,22 +47,23 @@ def caveCmp(x, y):
else:
if y.kataster_number:
return 1
else:
else:
return numericalcmp(x.unofficial_number, y.unofficial_number)
def caveindex(request):
#caves = Cave.objects.all()
caves = Cave.objects.all()
notablecavehrefs = settings.NOTABLECAVESHREFS
notablecaves = [Cave.objects.get(kataster_number=kataster_number) for kataster_number in notablecavehrefs ]
caves1623 = list(Cave.objects.filter(area__short_name = "1623"))
caves1626 = list(Cave.objects.filter(area__short_name = "1626"))
caves1623.sort(caveCmp)
caves1626.sort(caveCmp)
return render(request,'caveindex.html', {'caves1623': caves1623, 'caves1626': caves1626, 'notablecaves':notablecaves, 'cavepage': True})
return render_with_context(request,'caveindex.html', {'caves1623': caves1623, 'caves1626': caves1626, 'notablecaves':notablecaves, 'cavepage': True})
def millenialcaves(request):
#RW messing around area
return HttpResponse("Test text", content_type="text/plain")
def cave3d(request, cave_id=''):
@@ -81,44 +83,43 @@ def cave3d(request, cave_id=''):
def cave(request, cave_id='', offical_name=''):
cave=getCave(cave_id)
if cave.non_public and settings.PUBLIC_SITE and not request.user.is_authenticated():
return render(request,'nonpublic.html', {'instance': cave, 'cavepage': True, 'cave_id': cave_id})
return render_with_context(request,'nonpublic.html', {'instance': cave, 'cavepage': True, 'cave_id': cave_id})
else:
return render(request,'cave.html', {'settings': settings, 'cave': cave, 'cavepage': True, 'cave_id': cave_id})
return render_with_context(request,'cave.html', {'settings': settings, 'cave': cave, 'cavepage': True, 'cave_id': cave_id})
def caveEntrance(request, slug):
cave = Cave.objects.get(caveslug__slug = slug)
if cave.non_public and settings.PUBLIC_SITE and not request.user.is_authenticated():
return render(request,'nonpublic.html', {'instance': cave})
return render_with_context(request,'nonpublic.html', {'instance': cave})
else:
return render(request,'cave_entrances.html', {'cave': cave})
return render_with_context(request,'cave_entrances.html', {'cave': cave})
def caveDescription(request, slug):
cave = Cave.objects.get(caveslug__slug = slug)
if cave.non_public and settings.PUBLIC_SITE and not request.user.is_authenticated():
return render(request,'nonpublic.html', {'instance': cave})
return render_with_context(request,'nonpublic.html', {'instance': cave})
else:
return render(request,'cave_uground_description.html', {'cave': cave})
return render_with_context(request,'cave_uground_description.html', {'cave': cave})
def caveQMs(request, slug):
cave = Cave.objects.get(caveslug__slug = slug)
if cave.non_public and settings.PUBLIC_SITE and not request.user.is_authenticated():
return render(request,'nonpublic.html', {'instance': cave})
return render_with_context(request,'nonpublic.html', {'instance': cave})
else:
return render(request,'cave_qms.html', {'cave': cave})
return render_with_context(request,'cave_qms.html', {'cave': cave})
def caveLogbook(request, slug):
cave = Cave.objects.get(caveslug__slug = slug)
if cave.non_public and settings.PUBLIC_SITE and not request.user.is_authenticated():
return render(request,'nonpublic.html', {'instance': cave})
return render_with_context(request,'nonpublic.html', {'instance': cave})
else:
return render(request,'cave_logbook.html', {'cave': cave})
return render_with_context(request,'cave_logbook.html', {'cave': cave})
def caveSlug(request, slug):
cave = Cave.objects.get(caveslug__slug = slug)
if cave.non_public and settings.PUBLIC_SITE and not request.user.is_authenticated():
return render(request,'nonpublic.html', {'instance': cave, 'cave_editable': slug})
return render_with_context(request,'nonpublic.html', {'instance': cave, 'cave_editable': slug})
else:
return render(request,'cave.html', {'cave': cave, 'cave_editable': slug})
return render_with_context(request,'cave.html', {'cave': cave, 'cave_editable': slug})
@login_required_if_public
def edit_cave(request, slug=None):
@@ -153,14 +154,14 @@ def edit_cave(request, slug=None):
ceinst.cave = cave
ceinst.save()
cave.writeDataFile()
return HttpResponseRedirect("/" + cave.url)
return HttpResponseRedirect("/" + cave.url)
else:
form = CaveForm(instance=cave)
ceFormSet = CaveAndEntranceFormSet(queryset=cave.caveandentrance_set.all())
versionControlForm = VersionControlCommentForm()
return render(request,
'editcave2.html',
return render_with_context(request,
'editcave2.html',
{'form': form,
'caveAndEntranceFormSet': ceFormSet,
'versionControlForm': versionControlForm
@@ -168,7 +169,7 @@ def edit_cave(request, slug=None):
@login_required_if_public
def editEntrance(request, caveslug, slug=None):
cave = Cave.objects.get(caveslug__slug = caveslug)
cave = Cave.objects.get(caveslug__slug = caveslug)
if slug is not None:
entrance = Entrance.objects.get(entranceslug__slug = slug)
else:
@@ -195,7 +196,7 @@ def editEntrance(request, caveslug, slug=None):
el.entrance = entrance
el.save()
entrance.writeDataFile()
return HttpResponseRedirect("/" + cave.url)
return HttpResponseRedirect("/" + cave.url)
else:
form = EntranceForm(instance = entrance)
versionControlForm = VersionControlCommentForm()
@@ -203,102 +204,104 @@ def editEntrance(request, caveslug, slug=None):
entletter = EntranceLetterForm(request.POST)
else:
entletter = None
return render(request,
'editentrance.html',
return render_with_context(request,
'editentrance.html',
{'form': form,
'versionControlForm': versionControlForm,
'entletter': entletter
})
def qm(request,qm_id):
def qm(request,cave_id,qm_id,year,grade=None):
year=int(year)
try:
qm=QM.objects.get(id=qm_id)
return render(request,'qm.html',locals())
qm=getCave(cave_id).get_QMs().get(number=qm_id,found_by__date__year=year)
return render_with_context(request,'qm.html',locals())
except QM.DoesNotExist:
url=urlparse.urljoin(settings.URL_ROOT, r'/admin/core/qm/add/'+'?'+ r'number=' + qm_id)
if grade:
url += r'&grade=' + grade
return HttpResponseRedirect(url)
def ent(request, cave_id, ent_letter):
cave = Cave.objects.filter(kataster_number = cave_id)[0]
cave_and_ent = CaveAndEntrance.objects.filter(cave = cave).filter(entrance_letter = ent_letter)[0]
return render(request,'entrance.html', {'cave': cave,
return render_with_context(request,'entrance.html', {'cave': cave,
'entrance': cave_and_ent.entrance,
'letter': cave_and_ent.entrance_letter,})
def entranceSlug(request, slug):
entrance = Entrance.objects.get(entranceslug__slug = slug)
if entrance.non_public and not request.user.is_authenticated():
return render(request,'nonpublic.html', {'instance': entrance})
return render_with_context(request,'nonpublic.html', {'instance': entrance})
else:
return render(request,'entranceslug.html', {'entrance': entrance})
return render_with_context(request,'entranceslug.html', {'entrance': entrance})
def survexblock(request, survexpath):
survexpath = re.sub("/", ".", survexpath)
print("jjjjjj", survexpath)
print "jjjjjj", survexpath
survexblock = models.SurvexBlock.objects.get(survexpath=survexpath)
#ftext = survexblock.filecontents()
ftext = survexblock.text
return render(request,'survexblock.html', {'survexblock':survexblock, 'ftext':ftext, })
return render_with_context(request,'survexblock.html', {'survexblock':survexblock, 'ftext':ftext, })
def surveyindex(request):
surveys=Survey.objects.all()
expeditions=Expedition.objects.order_by("-year")
return render(request,'survey.html',locals())
return render_with_context(request,'survey.html',locals())
def survey(request,year,wallet_number):
surveys=Survey.objects.all()
expeditions=Expedition.objects.order_by("-year")
current_expedition=Expedition.objects.filter(year=year)[0]
if wallet_number!='':
current_survey=Survey.objects.filter(expedition=current_expedition,wallet_number=wallet_number)[0]
notes=current_survey.scannedimage_set.filter(contents='notes')
planSketches=current_survey.scannedimage_set.filter(contents='plan')
elevationSketches=current_survey.scannedimage_set.filter(contents='elevation')
return render(request,'survey.html', locals())
return render_with_context(request,'survey.html', locals())
def cave_description(request, cavedescription_name):
cave_description = get_object_or_404(CaveDescription, short_name = cavedescription_name)
return render(request,'cave_description.html', locals())
return render_with_context(request,'cave_description.html', locals())
def get_entrances(request, caveslug):
cave = Cave.objects.get(caveslug__slug = caveslug)
return render(request,'options.html', {"items": [(e.entrance.slug(), e.entrance.slug()) for e in cave.entrances()]})
return render_with_context(request,'options.html', {"items": [(e.entrance.slug(), e.entrance.slug()) for e in cave.entrances()]})
def get_qms(request, caveslug):
cave = Cave.objects.get(caveslug__slug = caveslug)
return render(request,'options.html', {"items": [(e.entrance.slug(), e.entrance.slug()) for e in cave.entrances()]})
return render_with_context(request,'options.html', {"items": [(e.entrance.slug(), e.entrance.slug()) for e in cave.entrances()]})
areanames = [
#('', 'Location unclear'),
('1a', '1a &ndash; Plateau: around Top Camp'),
('1b', '1b &ndash; Western plateau near 182'),
('1c', '1c &ndash; Eastern plateau near 204 walk-in path'),
('1d', '1d &ndash; Further plateau around 76'),
('2a', '2a &ndash; Southern Schwarzmooskogel near 201 path and the Nipple'),
('2b', '2b &ndash; Eish&ouml;hle area'),
('2b or 4 (unclear)', '2b or 4 (unclear)'),
('2c', '2c &ndash; Kaninchenh&ouml;hle area'),
('2d', '2d &ndash; Steinbr&uuml;ckenh&ouml;hle area'),
('3', '3 &ndash; Br&auml;uning Alm'),
('4', '4 &ndash; Kratzer valley'),
('5', '5 &ndash; Schwarzmoos-Wildensee'),
('6', '6 &ndash; Far plateau'),
('1626 or 6 (borderline)', '1626 or 6 (borderline)'),
('7', '7 &ndash; Egglgrube'),
('8a', '8a &ndash; Loser south face'),
('8b', '8b &ndash; Loser below Dimmelwand'),
('8c', '8c &ndash; Augst See'),
('8d', '8d &ndash; Loser-Hochganger ridge'),
('9', '9 &ndash; Gschwandt Alm'),
('10', '10 &ndash; Altaussee'),
('11', '11 &ndash; Augstbach')
]
#('', 'Location unclear'),
('1a', '1a &ndash; Plateau: around Top Camp'),
('1b', '1b &ndash; Western plateau near 182'),
('1c', '1c &ndash; Eastern plateau near 204 walk-in path'),
('1d', '1d &ndash; Further plateau around 76'),
('2a', '2a &ndash; Southern Schwarzmooskogel near 201 path and the Nipple'),
('2b', '2b &ndash; Eish&ouml;hle area'),
('2b or 4 (unclear)', '2b or 4 (unclear)'),
('2c', '2c &ndash; Kaninchenh&ouml;hle area'),
('2d', '2d &ndash; Steinbr&uuml;ckenh&ouml;hle area'),
('3', '3 &ndash; Br&auml;uning Alm'),
('4', '4 &ndash; Kratzer valley'),
('5', '5 &ndash; Schwarzmoos-Wildensee'),
('6', '6 &ndash; Far plateau'),
('1626 or 6 (borderline)', '1626 or 6 (borderline)'),
('7', '7 &ndash; Egglgrube'),
('8a', '8a &ndash; Loser south face'),
('8b', '8b &ndash; Loser below Dimmelwand'),
('8c', '8c &ndash; Augst See'),
('8d', '8d &ndash; Loser-Hochganger ridge'),
('9', '9 &ndash; Gschwandt Alm'),
('10', '10 &ndash; Altaussee'),
('11', '11 &ndash; Augstbach')
]
def prospecting(request):
@@ -310,27 +313,27 @@ def prospecting(request):
caves = list(a.cave_set.all())
caves.sort(caveCmp)
areas.append((name, a, caves))
return render(request, 'prospecting.html', {"areas": areas})
return render_with_context(request, 'prospecting.html', {"areas": areas})
# Parameters for big map and zoomed subarea maps:
# big map first (zoom factor ignored)
maps = {
# id left top right bottom zoom
# G&K G&K G&K G&K factor
"all": [33810.4, 85436.5, 38192.0, 81048.2, 0.35,
"All"],
"40": [36275.6, 82392.5, 36780.3, 81800.0, 3.0,
"Eish&ouml;hle"],
"76": [35440.0, 83220.0, 36090.0, 82670.0, 1.3,
"Eislufth&ouml;hle"],
"204": [36354.1, 84154.5, 37047.4, 83300, 3.0,
"Steinbr&uuml;ckenh&ouml;hle"],
"tc": [35230.0, 82690.0, 36110.0, 82100.0, 3.0,
"Near Top Camp"],
# id left top right bottom zoom
# G&K G&K G&K G&K factor
"all": [33810.4, 85436.5, 38192.0, 81048.2, 0.35,
"All"],
"40": [36275.6, 82392.5, 36780.3, 81800.0, 3.0,
"Eish&ouml;hle"],
"76": [35440.0, 83220.0, 36090.0, 82670.0, 1.3,
"Eislufth&ouml;hle"],
"204": [36354.1, 84154.5, 37047.4, 83300, 3.0,
"Steinbr&uuml;ckenh&ouml;hle"],
"tc": [35230.0, 82690.0, 36110.0, 82100.0, 3.0,
"Near Top Camp"],
"grieß":
[36000.0, 86300.0, 38320.0, 84400.0, 4.0,
"Grießkogel Area"],
[36000.0, 86300.0, 38320.0, 84400.0, 4.0,
"Grießkogel Area"],
}
for n in maps.keys():
@@ -341,7 +344,7 @@ for n in maps.keys():
for j in range(2):
maps["%s%i%i" % (n, i, j)] = [L + i * W, T - j * H, L + (i + 1) * W, T - (j + 1) * H, S, name]
# Keys in the order in which we want the maps output
mapcodes = ["all", "grieß","40", "76", "204", "tc"]
mapcodes = ["all", "grieß","40", "76", "204", "tc"]
# Field codes
L = 0
T = 1
@@ -351,54 +354,54 @@ ZOOM = 4
DESC = 5
areacolours = {
'1a' : '#00ffff',
'1b' : '#ff00ff',
'1c' : '#ffff00',
'1d' : '#ffffff',
'2a' : '#ff0000',
'2b' : '#00ff00',
'2c' : '#008800',
'2d' : '#ff9900',
'3' : '#880000',
'4' : '#0000ff',
'6' : '#000000', # doubles for surface fixed pts, and anything else
'7' : '#808080'
}
'1a' : '#00ffff',
'1b' : '#ff00ff',
'1c' : '#ffff00',
'1d' : '#ffffff',
'2a' : '#ff0000',
'2b' : '#00ff00',
'2c' : '#008800',
'2d' : '#ff9900',
'3' : '#880000',
'4' : '#0000ff',
'6' : '#000000', # doubles for surface fixed pts, and anything else
'7' : '#808080'
}
for FONT in [
"/usr/share/fonts/truetype/freefont/FreeSans.ttf",
"/usr/X11R6/lib/X11/fonts/truetype/arial.ttf",
"C:\WINNT\Fonts\ARIAL.TTF"
]:
if os.path.isfile(FONT): break
"/usr/share/fonts/truetype/freefont/FreeSans.ttf",
"/usr/X11R6/lib/X11/fonts/truetype/arial.ttf",
"C:\WINNT\Fonts\ARIAL.TTF"
]:
if os.path.isfile(FONT): break
TEXTSIZE = 16
CIRCLESIZE =8
LINEWIDTH = 2
myFont = ImageFont.truetype(FONT, TEXTSIZE)
def mungecoord(x, y, mapcode, img):
# Top of Zinken is 73 1201 = dataset 34542 81967
# Top of Hinter is 1073 562 = dataset 36670 83317
# image is 1417 by 2201
# FACTOR1 = 1000.0 / (36670.0-34542.0)
# FACTOR2 = (1201.0-562.0) / (83317 - 81967)
# FACTOR = (FACTOR1 + FACTOR2)/2
# The factors aren't the same as the scanned map's at a slight angle. I
# can't be bothered to fix this. Since we zero on the Hinter it makes
# very little difference for caves in the areas round 76 or 204.
# xoffset = (x - 36670)*FACTOR
# yoffset = (y - 83317)*FACTOR
# return (1073 + xoffset, 562 - yoffset)
m = maps[mapcode]
factorX, factorY = img.size[0] / (m[R] - m[L]), img.size[1] / (m[T] - m[B])
return ((x - m[L]) * factorX, (m[T] - y) * factorY)
# Top of Zinken is 73 1201 = dataset 34542 81967
# Top of Hinter is 1073 562 = dataset 36670 83317
# image is 1417 by 2201
# FACTOR1 = 1000.0 / (36670.0-34542.0)
# FACTOR2 = (1201.0-562.0) / (83317 - 81967)
# FACTOR = (FACTOR1 + FACTOR2)/2
# The factors aren't the same as the scanned map's at a slight angle. I
# can't be bothered to fix this. Since we zero on the Hinter it makes
# very little difference for caves in the areas round 76 or 204.
# xoffset = (x - 36670)*FACTOR
# yoffset = (y - 83317)*FACTOR
# return (1073 + xoffset, 562 - yoffset)
m = maps[mapcode]
factorX, factorY = img.size[0] / (m[R] - m[L]), img.size[1] / (m[T] - m[B])
return ((x - m[L]) * factorX, (m[T] - y) * factorY)
COL_TYPES = {True: "red",
False: "#dddddd",
"Reference": "#dddddd"}
def plot(surveypoint, number, point_type, label, mapcode, draw, img):
try:
ss = SurvexStation.objects.lookup(surveypoint)
@@ -420,40 +423,40 @@ def prospecting_image(request, name):
m = maps[name]
#imgmaps = []
if name == "all":
img = mainImage
img = mainImage
else:
M = maps['all']
W, H = mainImage.size
l = int((m[L] - M[L]) / (M[R] - M[L]) * W)
t = int((m[T] - M[T]) / (M[B] - M[T]) * H)
r = int((m[R] - M[L]) / (M[R] - M[L]) * W)
b = int((m[B] - M[T]) / (M[B] - M[T]) * H)
img = mainImage.crop((l, t, r, b))
w = int(round(m[ZOOM] * (m[R] - m[L]) / (M[R] - M[L]) * W))
h = int(round(m[ZOOM] * (m[B] - m[T]) / (M[B] - M[T]) * H))
img = img.resize((w, h), Image.BICUBIC)
M = maps['all']
W, H = mainImage.size
l = int((m[L] - M[L]) / (M[R] - M[L]) * W)
t = int((m[T] - M[T]) / (M[B] - M[T]) * H)
r = int((m[R] - M[L]) / (M[R] - M[L]) * W)
b = int((m[B] - M[T]) / (M[B] - M[T]) * H)
img = mainImage.crop((l, t, r, b))
w = int(round(m[ZOOM] * (m[R] - m[L]) / (M[R] - M[L]) * W))
h = int(round(m[ZOOM] * (m[B] - m[T]) / (M[B] - M[T]) * H))
img = img.resize((w, h), Image.BICUBIC)
draw = ImageDraw.Draw(img)
draw.setfont(myFont)
if name == "all":
for maparea in maps.keys():
if maparea == "all":
continue
localm = maps[maparea]
l,t = mungecoord(localm[L], localm[T], "all", img)
r,b = mungecoord(localm[R], localm[B], "all", img)
text = maparea + " map"
textlen = draw.textsize(text)[0] + 3
draw.rectangle([l, t, l+textlen, t+TEXTSIZE+2], fill='#ffffff')
draw.text((l+2, t+1), text, fill="#000000")
#imgmaps.append( [l, t, l+textlen, t+SIZE+2, "submap" + maparea, maparea + " subarea map"] )
draw.line([l, t, r, t], fill='#777777', width=LINEWIDTH)
draw.line([l, b, r, b], fill='#777777', width=LINEWIDTH)
draw.line([l, t, l, b], fill='#777777', width=LINEWIDTH)
draw.line([r, t, r, b], fill='#777777', width=LINEWIDTH)
draw.line([l, t, l+textlen, t], fill='#777777', width=LINEWIDTH)
draw.line([l, t+TEXTSIZE+2, l+textlen, t+TEXTSIZE+2], fill='#777777', width=LINEWIDTH)
draw.line([l, t, l, t+TEXTSIZE+2], fill='#777777', width=LINEWIDTH)
draw.line([l+textlen, t, l+textlen, t+TEXTSIZE+2], fill='#777777', width=LINEWIDTH)
if maparea == "all":
continue
localm = maps[maparea]
l,t = mungecoord(localm[L], localm[T], "all", img)
r,b = mungecoord(localm[R], localm[B], "all", img)
text = maparea + " map"
textlen = draw.textsize(text)[0] + 3
draw.rectangle([l, t, l+textlen, t+TEXTSIZE+2], fill='#ffffff')
draw.text((l+2, t+1), text, fill="#000000")
#imgmaps.append( [l, t, l+textlen, t+SIZE+2, "submap" + maparea, maparea + " subarea map"] )
draw.line([l, t, r, t], fill='#777777', width=LINEWIDTH)
draw.line([l, b, r, b], fill='#777777', width=LINEWIDTH)
draw.line([l, t, l, b], fill='#777777', width=LINEWIDTH)
draw.line([r, t, r, b], fill='#777777', width=LINEWIDTH)
draw.line([l, t, l+textlen, t], fill='#777777', width=LINEWIDTH)
draw.line([l, t+TEXTSIZE+2, l+textlen, t+TEXTSIZE+2], fill='#777777', width=LINEWIDTH)
draw.line([l, t, l, t+TEXTSIZE+2], fill='#777777', width=LINEWIDTH)
draw.line([l+textlen, t, l+textlen, t+TEXTSIZE+2], fill='#777777', width=LINEWIDTH)
#imgmaps[maparea] = []
# Draw scale bar
m100 = int(100 / (m[R] - m[L]) * img.size[0])
@@ -465,7 +468,7 @@ def prospecting_image(request, name):
plot("laser.0_7", "BNase", "Reference", "Br&auml;uning Nase laser point", name, draw, img)
plot("226-96", "BZkn", "Reference", "Br&auml;uning Zinken trig point", name, draw, img)
plot("vd1","VD1","Reference", "VD1 survey point", name, draw, img)
plot("laser.kt114_96","HSK","Reference", "Hinterer Schwarzmooskogel trig point", name, draw, img)
plot("laser.kt114_96","HSK","Reference", "Hinterer Schwarzmooskogel trig point", name, draw, img)
plot("2000","Nipple","Reference", "Nipple (Wei&szlig;e Warze)", name, draw, img)
plot("3000","VSK","Reference", "Vorderer Schwarzmooskogel summit", name, draw, img)
plot("topcamp", "TC", "Reference", "Top Camp", name, draw, img)
@@ -475,15 +478,14 @@ def prospecting_image(request, name):
plot("laser.0_5", "LSR5", "Reference", "Laser Point 0/5", name, draw, img)
plot("225-96", "BAlm", "Reference", "Br&auml;uning Alm trig point", name, draw, img)
for entrance in Entrance.objects.all():
station = entrance.best_station()
if station:
#try:
areaName = entrance.caveandentrance_set.all()[0].cave.getArea().short_name
plot(station, "%s-%s" % (areaName, str(entrance)
[5:]), entrance.needs_surface_work(), str(entrance), name, draw, img)
#except:
# pass
station = entrance.best_station()
if station:
#try:
areaName = entrance.caveandentrance_set.all()[0].cave.getArea().short_name
plot(station, "%s-%s" % (areaName, str(entrance)[5:]), entrance.needs_surface_work(), str(entrance), name, draw, img)
#except:
# pass
for (N, E, D, num) in [(35975.37, 83018.21, 100,"177"), # Calculated from bearings
(35350.00, 81630.00, 50, "71"), # From Auer map
(36025.00, 82475.00, 50, "146"), # From mystery map
@@ -507,8 +509,8 @@ def prospecting_image(request, name):
del draw
img.save(response, "PNG")
return response
STATIONS = {}
STATIONS = {}
poslineregex = re.compile("^\(\s*([+-]?\d*\.\d*),\s*([+-]?\d*\.\d*),\s*([+-]?\d*\.\d*)\s*\)\s*([^\s]+)$")
def LoadPos():
call([settings.CAVERN, "--output=%s/all.3d" % settings.SURVEX_DATA, "%s/all.svx" % settings.SURVEX_DATA])
@@ -516,7 +518,7 @@ def LoadPos():
posfile = open("%sall.pos" % settings.SURVEX_DATA)
posfile.readline()#Drop header
for line in posfile.readlines():
r = poslineregex.match(line)
r = poslineregex.match(line)
if r:
x, y, z, name = r.groups()
STATIONS[name] = (x, y, z)

View File

@@ -1,4 +1,4 @@
from django.shortcuts import render_to_response, render
from django.shortcuts import render_to_response
from troggle.core.models import Expedition, Person, PersonExpedition, PersonTrip, LogbookEntry, SurvexBlock
import troggle.core.models as models
import troggle.settings as settings
@@ -9,6 +9,7 @@ from troggle.core.forms import getTripForm#, get_name, PersonForm
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.template import Context, loader
from utils import render_with_context
import os.path
import troggle.parsers.logbooks as logbookparsers
from django.template.defaultfilters import slugify
@@ -37,7 +38,7 @@ def getNotablePersons():
for person in Person.objects.all():
if person.bisnotable():
notablepersons.append(person)
return notablepersons
return notablepersons
def personindex(request):
@@ -48,13 +49,13 @@ def personindex(request):
nc = (len(persons) + ncols - 1) / ncols
for i in range(ncols):
personss.append(persons[i * nc: (i + 1) * nc])
notablepersons = []
for person in Person.objects.all():
if person.bisnotable():
notablepersons.append(person)
return render(request,'personindex.html', {'persons': persons, 'personss':personss, 'notablepersons':notablepersons})
return render_with_context(request,'personindex.html', {'persons': persons, 'personss':personss, 'notablepersons':notablepersons, })
def expedition(request, expeditionname):
@@ -67,20 +68,17 @@ def expedition(request, expeditionname):
for personexpedition in this_expedition.personexpedition_set.all():
prow = [ ]
for date in dates:
pcell = { "persontrips": PersonTrip.objects.filter(personexpedition=personexpedition,
pcell = { "persontrips": PersonTrip.objects.filter(personexpedition=personexpedition,
logbook_entry__date=date) }
pcell["survexblocks"] = set(SurvexBlock.objects.filter(survexpersonrole__personexpedition=personexpedition,
date=date))
pcell["survexblocks"] = set(SurvexBlock.objects.filter(survexpersonrole__personexpedition=personexpedition,
date = date))
prow.append(pcell)
personexpeditiondays.append({"personexpedition":personexpedition, "personrow":prow})
message = ""
if "reload" in request.GET:
LoadLogbookForExpedition(this_expedition)
return render(request,'expedition.html', {'this_expedition': this_expedition,
'expeditions':expeditions,
'personexpeditiondays':personexpeditiondays,
'settings':settings,
'dateditems': dateditems })
message = LoadLogbookForExpedition(this_expedition)
return render_with_context(request,'expedition.html', {'expedition': this_expedition, 'expeditions':expeditions, 'personexpeditiondays':personexpeditiondays, 'message':message, 'settings':settings, 'dateditems': dateditems })
def get_absolute_url(self):
return ('expedition', (expedition.year))
@@ -97,39 +95,39 @@ class ExpeditionListView(ListView):
def person(request, first_name='', last_name='', ):
this_person = Person.objects.get(first_name = first_name, last_name = last_name)
# This is for removing the reference to the user's profile, in case they set it to the wrong person
if request.method == 'GET':
if request.GET.get('clear_profile')=='True':
this_person.user=None
this_person.save()
return HttpResponseRedirect(reverse('profiles_select_profile'))
return render(request,'person.html', {'person': this_person, })
return render_with_context(request,'person.html', {'person': this_person, })
def GetPersonChronology(personexpedition):
res = { }
for persontrip in personexpedition.persontrip_set.all():
a = res.setdefault(persontrip.logbook_entry.date, { })
a = res.setdefault(persontrip.date, { })
a.setdefault("persontrips", [ ]).append(persontrip)
for personrole in personexpedition.survexpersonrole_set.all():
a = res.setdefault(personrole.survexblock.date, { })
a.setdefault("personroles", [ ]).append(personrole.survexblock)
# build up the tables
rdates = res.keys()
rdates.sort()
res2 = [ ]
for rdate in rdates:
persontrips = res[rdate].get("persontrips", [])
personroles = res[rdate].get("personroles", [])
for n in range(max(len(persontrips), len(personroles))):
res2.append(((n == 0 and rdate or "--"), (n < len(persontrips) and persontrips[n]), (n < len(personroles) and personroles[n])))
return res2
@@ -138,17 +136,17 @@ def personexpedition(request, first_name='', last_name='', year=''):
this_expedition = Expedition.objects.get(year=year)
personexpedition = person.personexpedition_set.get(expedition=this_expedition)
personchronology = GetPersonChronology(personexpedition)
return render(request,'personexpedition.html', {'personexpedition': personexpedition, 'personchronology':personchronology})
return render_with_context(request,'personexpedition.html', {'personexpedition': personexpedition, 'personchronology':personchronology})
def logbookentry(request, date, slug):
this_logbookentry = LogbookEntry.objects.filter(date=date, slug=slug)
if len(this_logbookentry)>1:
return render(request, 'object_list.html',{'object_list':this_logbookentry})
return render_with_context(request, 'object_list.html',{'object_list':this_logbookentry})
else:
this_logbookentry=this_logbookentry[0]
return render(request, 'logbookentry.html', {'logbookentry': this_logbookentry})
return render_with_context(request, 'logbookentry.html', {'logbookentry': this_logbookentry})
def logbookSearch(request, extra):
@@ -159,14 +157,14 @@ def logbookSearch(request, extra):
entry_query = search.get_query(query_string, ['text','title',])
found_entries = LogbookEntry.objects.filter(entry_query)
return render(request,'logbooksearch.html',
return render_with_context(request,'logbooksearch.html',
{ 'query_string': query_string, 'found_entries': found_entries, })
#context_instance=RequestContext(request))
def personForm(request,pk):
person=Person.objects.get(pk=pk)
form=PersonForm(instance=person)
return render(request,'personform.html', {'form':form,})
return render_with_context(request,'personform.html', {'form':form,})
def experimental(request):
@@ -180,10 +178,10 @@ def experimental(request):
survexleglength += survexblock.totalleglength
legsbyexpo.append((expedition, {"nsurvexlegs":len(survexlegs), "survexleglength":survexleglength}))
legsbyexpo.reverse()
survexlegs = models.SurvexLeg.objects.all()
totalsurvexlength = sum([survexleg.tape for survexleg in survexlegs])
return render(request, 'experimental.html', { "nsurvexlegs":len(survexlegs), "totalsurvexlength":totalsurvexlength, "legsbyexpo":legsbyexpo })
return render_with_context(request, 'experimental.html', { "nsurvexlegs":len(survexlegs), "totalsurvexlength":totalsurvexlength, "legsbyexpo":legsbyexpo })
@login_required_if_public
def newLogbookEntry(request, expeditionyear, pdate = None, pslug = None):
@@ -198,11 +196,11 @@ def newLogbookEntry(request, expeditionyear, pdate = None, pslug = None):
personTripFormSet = PersonTripFormSet(request.POST)
if tripForm.is_valid() and personTripFormSet.is_valid(): # All validation rules pass
dateStr = tripForm.cleaned_data["date"].strftime("%Y-%m-%d")
directory = os.path.join(settings.EXPOWEB,
"years",
expedition.year,
directory = os.path.join(settings.EXPOWEB,
"years",
expedition.year,
"autologbook")
filename = os.path.join(directory,
filename = os.path.join(directory,
dateStr + "." + slugify(tripForm.cleaned_data["title"])[:50] + ".html")
if not os.path.isdir(directory):
os.mkdir(directory)
@@ -210,7 +208,7 @@ def newLogbookEntry(request, expeditionyear, pdate = None, pslug = None):
delLogbookEntry(previouslbe)
f = open(filename, "w")
template = loader.get_template('dataformat/logbookentry.html')
context = Context({'trip': tripForm.cleaned_data,
context = Context({'trip': tripForm.cleaned_data,
'persons': personTripFormSet.cleaned_data,
'date': dateStr,
'expeditionyear': expeditionyear})
@@ -234,15 +232,15 @@ def newLogbookEntry(request, expeditionyear, pdate = None, pslug = None):
"location": previouslbe.place,
"caveOrLocation": "location",
"html": previouslbe.text})
personTripFormSet = PersonTripFormSet(initial=[{"name": get_name(py.personexpedition),
"TU": py.time_underground,
personTripFormSet = PersonTripFormSet(initial=[{"name": get_name(py.personexpedition),
"TU": py.time_underground,
"author": py.is_logbook_entry_author}
for py in previouslbe.persontrip_set.all()])
else:
else:
tripForm = TripForm() # An unbound form
personTripFormSet = PersonTripFormSet()
return render(request, 'newlogbookentry.html', {
return render_with_context(request, 'newlogbookentry.html', {
'tripForm': tripForm,
'personTripFormSet': personTripFormSet,
@@ -264,8 +262,9 @@ def delLogbookEntry(lbe):
def get_people(request, expeditionslug):
exp = Expedition.objects.get(year = expeditionslug)
return render(request,'options.html', {"items": [(pe.slug, pe.name) for pe in exp.personexpedition_set.all()]})
return render_with_context(request,'options.html', {"items": [(pe.slug, pe.name) for pe in exp.personexpedition_set.all()]})
def get_logbook_entries(request, expeditionslug):
exp = Expedition.objects.get(year = expeditionslug)
return render(request,'options.html', {"items": [(le.slug, "%s - %s" % (le.date, le.title)) for le in exp.logbookentry_set.all()]})
return render_with_context(request,'options.html', {"items": [(le.slug, "%s - %s" % (le.date, le.title)) for le in exp.logbookentry_set.all()]})

View File

@@ -4,11 +4,11 @@ from django.conf import settings
from django import forms
from django.template import loader, Context
from django.db.models import Q
from django.shortcuts import render
import databaseReset
import re
from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
from utils import render_with_context
from troggle.core.models import *
from troggle.helper import login_required_if_public
@@ -21,21 +21,21 @@ def stats(request):
statsDict['caveCount'] = int(Cave.objects.count())
statsDict['personCount'] = int(Person.objects.count())
statsDict['logbookEntryCount'] = int(LogbookEntry.objects.count())
return render(request,'statistics.html', statsDict)
return render_with_context(request,'statistics.html', statsDict)
def frontpage(request):
if request.user.is_authenticated():
return render(request,'tasks.html')
return render_with_context(request,'tasks.html')
expeditions = Expedition.objects.order_by("-year")
logbookentry = LogbookEntry
cave = Cave
photo = DPhoto
from django.contrib.admin.templatetags import log
return render(request,'frontpage.html', locals())
return render_with_context(request,'frontpage.html', locals())
def todo(request):
message = "no test message" #reverse('personn', kwargs={"name":"hkjhjh"})
message = "no test message" #reverse('personn', kwargs={"name":"hkjhjh"})
if "reloadexpos" in request.GET:
message = LoadPersonsExpos()
message = "Reloaded personexpos"
@@ -45,47 +45,47 @@ def todo(request):
expeditions = Expedition.objects.order_by("-year")
totallogbookentries = LogbookEntry.objects.count()
return render(request,'index.html', {'expeditions':expeditions, 'all':'all', 'totallogbookentries':totallogbookentries, "message":message})
return render_with_context(request,'index.html', {'expeditions':expeditions, 'all':'all', 'totallogbookentries':totallogbookentries, "message":message})
def controlPanel(request):
jobs_completed=[]
if request.method=='POST':
if request.user.is_superuser:
#importlist is mostly here so that things happen in the correct order.
#http post data seems to come in an unpredictable order, so we do it this way.
importlist=['reload_db', 'import_people', 'import_cavetab', 'import_logbooks', 'import_surveys', 'import_QMs']
databaseReset.make_dirs()
for item in importlist:
if item in request.POST:
print("running"+ " databaseReset."+item+"()")
exec("databaseReset."+item+"()")
print "running"+ " databaseReset."+item+"()"
exec "databaseReset."+item+"()"
jobs_completed.append(item)
else:
if request.user.is_authenticated(): #The user is logged in, but is not a superuser.
return render(request,'controlPanel.html', {'caves':Cave.objects.all(),'error':'You must be a superuser to use that feature.'})
return render_with_context(request,'controlPanel.html', {'caves':Cave.objects.all(),'error':'You must be a superuser to use that feature.'})
else:
return HttpResponseRedirect(reverse('auth_login'))
return render(request,'controlPanel.html', {'caves':Cave.objects.all(),'expeditions':Expedition.objects.all(),'jobs_completed':jobs_completed})
return render_with_context(request,'controlPanel.html', {'caves':Cave.objects.all(),'expeditions':Expedition.objects.all(),'jobs_completed':jobs_completed})
def downloadCavetab(request):
from export import tocavetab
response = HttpResponse(content_type='text/csv')
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename=CAVETAB2.CSV'
tocavetab.writeCaveTab(response)
return response
def downloadSurveys(request):
from export import tosurveys
response = HttpResponse(content_type='text/csv')
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename=Surveys.csv'
tosurveys.writeCaveTab(response)
return response
def downloadLogbook(request,year=None,extension=None,queryset=None):
if year:
current_expedition=Expedition.objects.get(year=year)
logbook_entries=LogbookEntry.objects.filter(expedition=current_expedition)
@@ -94,28 +94,29 @@ def downloadLogbook(request,year=None,extension=None,queryset=None):
logbook_entries=queryset
filename='logbook'
else:
response = HttpResponse(content_type='text/plain')
return response(r"Error: Logbook downloader doesn't know what year you want")
if 'year' in request.GET:
year=request.GET['year']
if 'extension' in request.GET:
extension=request.GET['extension']
if extension =='txt':
response = HttpResponse(content_type='text/plain')
response = HttpResponse(mimetype='text/plain')
style='2008'
elif extension == 'html':
response = HttpResponse(content_type='text/html')
response = HttpResponse(mimetype='text/html')
style='2005'
template='logbook'+style+'style.'+extension
response['Content-Disposition'] = 'attachment; filename='+filename+'.'+extension
response['Content-Disposition'] = 'attachment; filename='+filename+'.'+extension
t=loader.get_template(template)
c=Context({'logbook_entries':logbook_entries})
response.write(t.render(c))
return response
def downloadQMs(request):
# Note to self: use get_cave method for the below
@@ -123,29 +124,29 @@ def downloadQMs(request):
try:
cave=Cave.objects.get(kataster_number=request.GET['cave_id'])
except Cave.DoesNotExist:
cave=Cave.objects.get(name=request.GET['cave_id'])
cave=Cave.objects.get(name=cave_id)
from export import toqms
response = HttpResponse(content_type='text/csv')
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename=qm.csv'
toqms.writeQmTable(response,cave)
return response
def ajax_test(request):
post_text = request.POST['post_data']
return HttpResponse("{'response_text': '"+post_text+" recieved.'}",
content_type="application/json")
return HttpResponse("{'response_text': '"+post_text+" recieved.'}",
mimetype="application/json")
def eyecandy(request):
return
return
def ajax_QM_number(request):
if request.method=='POST':
cave=Cave.objects.get(id=request.POST['cave'])
print(cave)
print cave
exp=Expedition.objects.get(pk=request.POST['year'])
print(exp)
print exp
res=cave.new_QM_number(exp.year)
return HttpResponse(res)
@@ -158,15 +159,15 @@ def logbook_entry_suggestions(request):
unwiki_QM_pattern=r"(?P<whole>(?P<explorer_code>[ABC]?)(?P<cave>\d*)-?(?P<year>\d\d\d?\d?)-(?P<number>\d\d)(?P<grade>[ABCDXV]?))"
unwiki_QM_pattern=re.compile(unwiki_QM_pattern)
#wikilink_QM_pattern=settings.QM_PATTERN
slug=request.POST['slug']
date=request.POST['date']
lbo=LogbookEntry.objects.get(slug=slug, date=date)
#unwiki_QMs=re.findall(unwiki_QM_pattern,lbo.text)
unwiki_QMs=[m.groupdict() for m in unwiki_QM_pattern.finditer(lbo.text)]
print(unwiki_QMs)
print unwiki_QMs
for qm in unwiki_QMs:
#try:
if len(qm['year'])==2:
@@ -179,29 +180,29 @@ def logbook_entry_suggestions(request):
try:
lbo=LogbookEntry.objects.get(date__year=qm['year'],title__icontains="placeholder for QMs in")
except:
print("failed to get placeholder for year "+str(qm['year']))
print "failed to get placeholder for year "+str(qm['year'])
temp_QM=QM(found_by=lbo,number=qm['number'],grade=qm['grade'])
temp_QM.grade=qm['grade']
qm['wikilink']=temp_QM.wiki_link()
#except:
#print 'failed'
print(unwiki_QMs)
print unwiki_QMs
#wikilink_QMs=re.findall(wikilink_QM_pattern,lbo.text)
attached_QMs=lbo.QMs_found.all()
unmentioned_attached_QMs=''#not implemented, fill this in by subtracting wiklink_QMs from attached_QMs
#Find unattached_QMs. We only look at the QMs with a proper wiki link.
#for qm in wikilink_QMs:
#Try to look up the QM.
print('got 208')
#Try to look up the QM.
print 'got 208'
any_suggestions=True
print('got 210')
return render(request,'suggestions.html',
print 'got 210'
return render_with_context(request,'suggestions.html',
{
'unwiki_QMs':unwiki_QMs,
'any_suggestions':any_suggestions
@@ -217,11 +218,11 @@ def newFile(request, pslug = None):
# personTripFormSet = PersonTripFormSet(request.POST)
# if tripForm.is_valid() and personTripFormSet.is_valid(): # All validation rules pass
# dateStr = tripForm.cleaned_data["date"].strftime("%Y-%m-%d")
# directory = os.path.join(settings.EXPOWEB,
# "years",
# expedition.year,
# directory = os.path.join(settings.EXPOWEB,
# "years",
# expedition.year,
# "autologbook")
# filename = os.path.join(directory,
# filename = os.path.join(directory,
# dateStr + "." + slugify(tripForm.cleaned_data["title"])[:50] + ".html")
# if not os.path.isdir(directory):
# os.mkdir(directory)
@@ -229,7 +230,7 @@ def newFile(request, pslug = None):
# delLogbookEntry(previouslbe)
# f = open(filename, "w")
# template = loader.get_template('dataformat/logbookentry.html')
# context = Context({'trip': tripForm.cleaned_data,
# context = Context({'trip': tripForm.cleaned_data,
# 'persons': personTripFormSet.cleaned_data,
# 'date': dateStr,
# 'expeditionyear': expeditionyear})
@@ -254,14 +255,14 @@ def newFile(request, pslug = None):
# "location": previouslbe.place,
# "caveOrLocation": "location",
# "html": previouslbe.text})
# personTripFormSet = PersonTripFormSet(initial=[{"name": get_name(py.personexpedition),
# "TU": py.time_underground,
# personTripFormSet = PersonTripFormSet(initial=[{"name": get_name(py.personexpedition),
# "TU": py.time_underground,
# "author": py.is_logbook_entry_author}
# for py in previouslbe.persontrip_set.all()])
# else:
# else:
# fileform = UploadFileForm() # An unbound form
return render(request, 'editfile.html', {
return render_with_context(request, 'editfile.html', {
'fileForm': fileform,
})

View File

@@ -1,8 +1,6 @@
from django import forms
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render
from django.views.decorators import csrf
from django.views.decorators.csrf import csrf_protect
from django.shortcuts import render_to_response
from django.http import HttpResponse, Http404
import re
import os
@@ -17,7 +15,7 @@ import troggle.settings as settings
import parsers.survex
survextemplatefile = """; Locn: Totes Gebirge, Austria - Loser/Augst-Eck Plateau (kataster group 1623)
; Cave:
; Cave:
*begin [surveyname]
@@ -40,9 +38,9 @@ survextemplatefile = """; Locn: Totes Gebirge, Austria - Loser/Augst-Eck Plateau
*data passage station left right up down ignoreall
1 [L] [R] [U] [D] comment
*end [surveyname]"""
*end [surveyname]"""
def ReplaceTabs(stext):
res = [ ]
nsl = 0
@@ -65,7 +63,7 @@ class SvxForm(forms.Form):
datetime = forms.DateTimeField(widget=forms.TextInput(attrs={"readonly":True}))
outputtype = forms.CharField(widget=forms.TextInput(attrs={"readonly":True}))
code = forms.CharField(widget=forms.Textarea(attrs={"cols":150, "rows":18}))
def GetDiscCode(self):
fname = settings.SURVEX_DATA + self.data['filename'] + ".svx"
if not os.path.isfile(fname):
@@ -75,7 +73,7 @@ class SvxForm(forms.Form):
svxtext = ReplaceTabs(svxtext).strip()
fin.close()
return svxtext
def DiffCode(self, rcode):
code = self.GetDiscCode()
difftext = difflib.unified_diff(code.splitlines(), rcode.splitlines())
@@ -86,14 +84,14 @@ class SvxForm(forms.Form):
fname = settings.SURVEX_DATA + self.data['filename'] + ".svx"
if not os.path.isfile(fname):
# only save if appears valid
if re.search(r"\[|\]", rcode):
if re.search(r"\[|\]", rcode):
return "Error: clean up all []s from the text"
mbeginend = re.search(r"(?s)\*begin\s+(\w+).*?\*end\s+(\w+)", rcode)
if not mbeginend:
return "Error: no begin/end block here"
if mbeginend.group(1) != mbeginend.group(2):
return "Error: mismatching beginend"
fout = open(fname, "w")
res = fout.write(rcode.encode("latin1"))
fout.close()
@@ -111,28 +109,28 @@ class SvxForm(forms.Form):
log = re.sub("(?s).*?(Survey contains)", "\\1", log)
return log
@csrf_protect
def svx(request, survex_file):
# get the basic data from the file given in the URL
dirname = os.path.split(survex_file)[0]
dirname += "/"
nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
outputtype = "normal"
form = SvxForm({'filename':survex_file, 'dirname':dirname, 'datetime':nowtime, 'outputtype':outputtype})
form = SvxForm({'filename':survex_file, 'dirname':dirname, 'datetime':nowtime, 'outputtype':outputtype})
# if the form has been returned
difflist = [ ]
logmessage = ""
message = ""
if request.method == 'POST': # If the form has been submitted...
rform = SvxForm(request.POST) #
rform = SvxForm(request.POST) #
if rform.is_valid(): # All validation rules pass (how do we check it against the filename and users?)
rcode = rform.cleaned_data['code']
outputtype = rform.cleaned_data['outputtype']
difflist = form.DiffCode(rcode)
#print "ssss", rform.data
if "revert" in rform.data:
pass
if "process" in rform.data:
@@ -153,20 +151,20 @@ def svx(request, survex_file):
form.data['code'] = rcode
if "diff" in rform.data:
form.data['code'] = rcode
#process(survex_file)
if 'code' not in form.data:
if 'code' not in form.data:
form.data['code'] = form.GetDiscCode()
if not difflist:
difflist.append("none")
if message:
difflist.insert(0, message)
#print [ form.data['code'] ]
svxincludes = re.findall(r'\*include\s+(\S+)(?i)', form.data['code'] or "")
vmap = {'settings': settings,
'has_3d': os.path.isfile(settings.SURVEX_DATA + survex_file + ".3d"),
'title': survex_file,
@@ -174,14 +172,13 @@ def svx(request, survex_file):
'difflist': difflist,
'logmessage':logmessage,
'form':form}
# vmap.update(csrf(request))
if outputtype == "ajax":
return render(request, 'svxfiledifflistonly.html', vmap)
return render(request, 'svxfile.html', vmap)
return render_to_response('svxfiledifflistonly.html', vmap)
return render_to_response('svxfile.html', vmap)
def svxraw(request, survex_file):
svx = open(os.path.join(settings.SURVEX_DATA, survex_file+".svx"), "rb")
return HttpResponse(svx, content_type="text")
return HttpResponse(svx, mimetype="text")
# The cavern running function
@@ -196,22 +193,22 @@ def threed(request, survex_file):
process(survex_file)
try:
threed = open(settings.SURVEX_DATA + survex_file + ".3d", "rb")
return HttpResponse(threed, content_type="model/3d")
return HttpResponse(threed, mimetype="model/3d")
except:
log = open(settings.SURVEX_DATA + survex_file + ".log", "rb")
return HttpResponse(log, content_type="text")
return HttpResponse(log, mimetype="text")
def log(request, survex_file):
process(survex_file)
log = open(settings.SURVEX_DATA + survex_file + ".log", "rb")
return HttpResponse(log, content_type="text")
return HttpResponse(log, mimetype="text")
def err(request, survex_file):
process(survex_file)
err = open(settings.SURVEX_DATA + survex_file + ".err", "rb")
return HttpResponse(err, content_type="text")
return HttpResponse(err, mimetype="text")
def identifycavedircontents(gcavedir):
@@ -227,13 +224,13 @@ def identifycavedircontents(gcavedir):
pass
elif name == "115" and (f in ["115cufix.svx", "115fix.svx"]):
pass
elif os.path.isdir(os.path.join(gcavedir, f)):
if f[0] != ".":
subdirs.append(f)
elif f[-4:] == ".svx":
nf = f[:-4]
if nf.lower() == name.lower() or nf[:3] == "all" or (name, nf) in [("resurvey2005", "145-2005"), ("cucc", "cu115")]:
if primesvx:
if nf[:3] == "all":
@@ -253,50 +250,38 @@ def identifycavedircontents(gcavedir):
if primesvx:
subsvx.insert(0, primesvx)
return subdirs, subsvx
# direct local non-database browsing through the svx file repositories
# perhaps should use the database and have a reload button for it
def survexcaveslist(request):
kat_areas = settings.KAT_AREAS
fnumlist = []
kat_areas = ['1623']
for area in kat_areas:
print(area)
cavesdir = os.path.join(settings.SURVEX_DATA, "caves-%s" % area)
print(cavesdir)
#cavesdircontents = { }
fnumlist += [ (-int(re.match(r"\d*", f).group(0) or "0"), f, area) for f in os.listdir(cavesdir) ]
print(fnumlist)
print(len(fnumlist))
# first sort the file list
fnumlist.sort()
cavesdir = os.path.join(settings.SURVEX_DATA, "caves-1623")
#cavesdircontents = { }
onefilecaves = [ ]
multifilecaves = [ ]
subdircaves = [ ]
# first sort the file list
fnumlist = [ (-int(re.match(r"\d*", f).group(0) or "0"), f) for f in os.listdir(cavesdir) ]
fnumlist.sort()
print(fnumlist)
# go through the list and identify the contents of each cave directory
for num, cavedir, area in fnumlist:
for num, cavedir in fnumlist:
if cavedir in ["144", "40"]:
continue
cavesdir = os.path.join(settings.SURVEX_DATA, "caves-%s" % area)
gcavedir = os.path.join(cavesdir, cavedir)
if os.path.isdir(gcavedir) and cavedir[0] != ".":
subdirs, subsvx = identifycavedircontents(gcavedir)
survdirobj = [ ]
for lsubsvx in subsvx:
survdirobj.append(("caves-" + area + "/"+cavedir+"/"+lsubsvx, lsubsvx))
survdirobj.append(("caves-1623/"+cavedir+"/"+lsubsvx, lsubsvx))
# caves with subdirectories
if subdirs:
subsurvdirs = [ ]
@@ -305,10 +290,10 @@ def survexcaveslist(request):
assert not dsubdirs
lsurvdirobj = [ ]
for lsubsvx in dsubsvx:
lsurvdirobj.append(("caves-" + area + "/"+cavedir+"/"+subdir+"/"+lsubsvx, lsubsvx))
lsurvdirobj.append(("caves-1623/"+cavedir+"/"+subdir+"/"+lsubsvx, lsubsvx))
subsurvdirs.append((lsurvdirobj[0], lsurvdirobj[1:]))
subdircaves.append((cavedir, (survdirobj[0], survdirobj[1:]), subsurvdirs))
# multifile caves
elif len(survdirobj) > 1:
multifilecaves.append((survdirobj[0], survdirobj[1:]))
@@ -317,22 +302,24 @@ def survexcaveslist(request):
#print("survdirobj = ")
#print(survdirobj)
onefilecaves.append(survdirobj[0])
return render_to_response('svxfilecavelist.html', {'settings': settings, "onefilecaves":onefilecaves, "multifilecaves":multifilecaves, "subdircaves":subdircaves })
return render(request, 'svxfilecavelist.html', {"onefilecaves":onefilecaves, "multifilecaves":multifilecaves, "subdircaves":subdircaves })
# parsing all the survex files of a single cave and showing that it's consistent and can find all the files and people
# doesn't use recursion. just writes it twice
def survexcavesingle(request, survex_cave):
breload = False
cave = Cave.objects.filter(kataster_number=survex_cave)
if len(cave) < 1:
cave = Cave.objects.filter(unofficial_number=survex_cave)
cave = Cave.objects.get(kataster_number=survex_cave)
if breload:
parsers.survex.ReloadSurvexCave(survex_cave)
if len(cave) > 0:
return render(request, 'svxcavesingle.html', {"cave":cave[0] })
else:
return render(request, 'svxcavesingle.html', {"cave":cave })
return render_to_response('svxcavesingle.html', {'settings': settings, "cave":cave })

View File

@@ -3,11 +3,6 @@ import time
import settings
os.environ['PYTHONPATH'] = settings.PYTHON_PATH
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
if __name__ == '__main__':
import django
django.setup()
from django.core import management
from django.db import connection
from django.contrib.auth.models import User
@@ -34,6 +29,7 @@ def reload_db():
cursor.execute("ALTER DATABASE %s CHARACTER SET=utf8" % databasename)
cursor.execute("USE %s" % databasename)
management.call_command('migrate', interactive=False)
#management.call_command('syncdb', interactive=False)
user = User.objects.create_user(expouser, expouseremail, expouserpass)
user.is_staff = True
user.is_superuser = True
@@ -47,7 +43,7 @@ def make_dirs():
def import_caves():
import parsers.caves
print("Importing Caves")
print("importing caves")
parsers.caves.readcaves()
def import_people():
@@ -61,7 +57,7 @@ def import_logbooks():
settings.LOGFILE.write('\nBegun importing logbooks at ' + time.asctime() +'\n'+'-'*60)
except:
pass
import parsers.logbooks
parsers.logbooks.LoadLogbooks()
@@ -95,16 +91,14 @@ def reset():
import_caves()
import_people()
import_surveyscans()
import_survex()
import_logbooks()
import_QMs()
import_survex()
try:
import_tunnelfiles()
except:
print("Tunnel files parser broken.")
import_surveys()
@@ -116,10 +110,10 @@ def import_auto_logbooks():
for lbe in troggle.core.models.LogbookEntry.objects.all():
lbe.delete()
for expedition in troggle.core.models.Expedition.objects.all():
directory = os.path.join(settings.EXPOWEB,
"years",
expedition.year,
"autologbook")
directory = os.path.join(settings.EXPOWEB,
"years",
expedition.year,
"autologbook")
for root, dirs, filenames in os.walk(directory):
for filename in filenames:
print(os.path.join(root, filename))
@@ -136,13 +130,13 @@ def dumplogbooks():
return pe.person.first_name
for lbe in troggle.core.models.LogbookEntry.objects.all():
dateStr = lbe.date.strftime("%Y-%m-%d")
directory = os.path.join(settings.EXPOWEB,
directory = os.path.join(settings.EXPOWEB,
"years",
lbe.expedition.year,
lbe.expedition.year,
"autologbook")
if not os.path.isdir(directory):
os.mkdir(directory)
filename = os.path.join(directory,
filename = os.path.join(directory,
dateStr + "." + slugify(lbe.title)[:50] + ".html")
if lbe.cave:
print(lbe.cave.reference())
@@ -153,7 +147,7 @@ def dumplogbooks():
persons = [{"name": get_name(pt.personexpedition), "TU": pt.time_underground, "author": pt.is_logbook_entry_author} for pt in pts]
f = open(filename, "wb")
template = loader.get_template('dataformat/logbookentry.html')
context = Context({'trip': trip,
context = Context({'trip': trip,
'persons': persons,
'date': dateStr,
'expeditionyear': lbe.expedition.year})
@@ -202,6 +196,9 @@ if __name__ == "__main__":
elif "scans" in sys.argv:
import_surveyscans()
elif "caves" in sys.argv:
reload_db()
make_dirs()
pageredirects()
import_caves()
elif "people" in sys.argv:
import_people()
@@ -222,14 +219,14 @@ if __name__ == "__main__":
import_descriptions()
parse_descriptions()
elif "survex" in sys.argv:
# management.call_command('syncdb', interactive=False) # this sets the path so that import settings works in import_survex
#management.call_command('syncdb', interactive=False) # this sets the path so that import settings works in import_survex
import_survex()
elif "survexpos" in sys.argv:
# management.call_command('syncdb', interactive=False) # this sets the path so that import settings works in import_survex
management.call_command('syncdb', interactive=False) # this sets the path so that import settings works in import_survex
import parsers.survex
parsers.survex.LoadPos()
parsers.survex.LoadPos()
elif "logbooks" in sys.argv:
# management.call_command('syncdb', interactive=False) # this sets the path so that import settings works in import_survex
management.call_command('syncdb', interactive=False) # this sets the path so that import settings works in import_survex
import_logbooks()
elif "autologbooks" in sys.argv:
import_auto_logbooks()
@@ -241,8 +238,10 @@ if __name__ == "__main__":
import_surveys()
elif "help" in sys.argv:
usage()
elif "reload_db" in sys.argv:
reload_db()
else:
print("%s not recognised" % sys.argv)
usage()

85
debian/serversetup vendored
View File

@@ -1,85 +0,0 @@
Instructions for setting up new expo debian server/VM
For Debian Stretch, June 2019.
adduser expo
apt install openssh-server mosh tmux mc zile emacs-nox mc most ncdu
apt install python-django apache2 mysql-server survex make rsync
apt install libjs-openlayers make
apt install git mercurial mercurial-server?
for boe:
apt install libcgi-session-perl libcrypt-passwdmd5-perl libfile-slurp-perl libgit-wrapper-perl libhtml-template-perl libhtml-template-pro-perl libmime-lite-perl libtext-password-pronounceable-perl libtime-parsedate-perl libuuid-tiny-perl libcrypt-cracklib-perl
obsolete-packages:
bins (move to jigl?) (for photos)
python-django 1.7
backports: survex therion
not-packaged: caveview
make these dirs available at top documentroot:
cuccfiles
expofiles
loser (link to repo)
tunneldata (link to repo)
troggle (link to repo)
expoweb (link to repo)
boc/boe
config
containing:
setup apache configs for cucc and expo
#disable default website
a2dissite 000-default
a2ensite cucc
a2ensite expo
a2enmod cgid
Boe config:
Alias /boe /home/expo/boe/boc/boc.pl
<Directory /home/expo/boe/boc>
AddHandler cgi-script .pl
SetHandler cgi-script
Options +ExecCGI
Require all granted
</Directory>
And remember to set both program and data dir to be
www-data:www-data
(optionally make file group read/write by treasurer account)
create empty repo by clicking create in boe interface
then set names in 'settings'
Set up mysql (as root)
mysql -p
CREATE DATABASE troggle;
GRANT ALL PRIVILEGES ON troggle.* TO 'expo'@'localhost' IDENTIFIED BY 'somepassword';
install django:
sudo apt install python-django python-django-registration python-django-imagekit python-django-tinymce fonts-freefont-ttf libapache2-mod-wsgi
python-django-imagekit comes from https://salsa.debian.org/python-team/modules/python-django-imagekit
python-django-tinymce comes from https://salsa.debian.org/python-team/modules/python-django-tinymce
(both modified for stretch/python2). packages under /home/wookey/packages/
need fonts-freefont-ttf (to have truetype freesans available for troggle via PIL)
need libapache2-mod-wsgi for apache wsgi support.
On stretch the django 1.10 is no use so get rid of that:
apt remove python3-django python-django python-django-common python-django-doc
Then replace with django 1.7 (Needs to be built for stretch)
apt install python-django python-django-common python-django-doc
apt install python-django-registration python-django-imagekit python-django-tinymce
then hold them to stop them being upgraded by unattended upgrades:
echo "python-django hold" | sudo dpkg --set-selections
echo "python-django-common hold" | sudo dpkg --set-selections
echo "python-django-doc hold" | sudo dpkg --set-selections
#troggle has to have a writable logfile otherwise the website explodes
# 500 error on the server, and apache error log has non-rentrant errors
create /var/log/troggle/troggle.log
chown www-data:adm /var/log/troggle/troggle.log
chmod 660 /var/log/troggle/troggle.log

View File

@@ -2,17 +2,15 @@ FROM python:2.7-stretch
#COPY backports.list /etc/apt/sources.list.d/
RUN apt-get -y update && apt-get install -y mercurial \
fonts-freefont-ttf locales survex python-levenshtein \
python-pygraphviz
RUN apt-get -y update && apt-get install -y mercurial fonts-freefont-ttf locales survex
#RUN apt-get -y -t -backports install survex
# Set the locale
RUN locale-gen en_GB.UTF-8
ENV LANG en_GB.UTF-8
ENV LANGUAGE en_GB:en
ENV LC_ALL en_GB.UTF-8
ENV LANG en_GB.UTF-8
ENV LANGUAGE en_GB:en
ENV LC_ALL en_GB.UTF-8
WORKDIR /opt/expo/troggle
COPY requirements.txt .

View File

@@ -40,6 +40,7 @@ mkdir -p expofiles/surveyscans
To start the containers run
```bash
$ cd ~/expo/troggle/docker
$ docker-compose up
```
You will now have a working troggle but with no data. To import the data you need to access the container run

View File

@@ -1 +1 @@
requirements.txt.dj-1.10
requirements.txt.dj-1.7.11

View File

@@ -1,13 +0,0 @@
Django==1.10.8
django-registration==2.1.2
mysql
django-imagekit
Image
django-tinymce
smartencoding
fuzzywuzzy
GitPython
unidecode
django-extensions
pygraphviz
python-Levenshtein

View File

@@ -6,7 +6,3 @@ django-imagekit
Image
django-tinymce==2.7.0
smartencoding
fuzzywuzzy
GitPython
unidecode
django-extensions

View File

@@ -33,3 +33,4 @@ def writeQmTable(outfile,cave):
cavewriter.writerow(headers)
for qm in cave.get_QMs():
cavewriter.writerow(qmRow(qm))

View File

@@ -1,34 +0,0 @@
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2020-02-18 16:01
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='EntranceRedirect',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('originalURL', models.CharField(max_length=200)),
('entrance', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Entrance')),
],
),
migrations.CreateModel(
name='Redirect',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('originalURL', models.CharField(max_length=200, unique=True)),
('newURL', models.CharField(max_length=200)),
],
),
]

View File

@@ -1,6 +1,6 @@
import troggle.settings as settings
from troggle.helper import login_required_if_public
from django.shortcuts import render
from utils import render_with_context
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.core.urlresolvers import reverse
@@ -33,11 +33,12 @@ def flatpage(request, path):
except EntranceRedirect.DoesNotExist:
pass
if path.startswith("noinfo") and settings.PUBLIC_SITE and not request.user.is_authenticated():
print("flat path noinfo", path)
return HttpResponseRedirect(reverse("auth_login") + '?next=%s' % request.path)
if path.endswith("/") or path == "":
if path.endswith("/") or path == "":
try:
o = open(os.path.normpath(settings.EXPOWEB + path + "index.html"), "rb")
path = path + "index.html"
@@ -46,16 +47,16 @@ def flatpage(request, path):
o = open(os.path.normpath(settings.EXPOWEB + path + "index.htm"), "rb")
path = path + "index.htm"
except IOError:
return render(request, 'pagenotfound.html', {'path': path})
else:
return render_with_context(request, 'pagenotfound.html', {'path': path})
else:
try:
filetobeopened = os.path.normpath(settings.EXPOWEB + path)
o = open(filetobeopened, "rb")
except IOError:
return render(request, 'pagenotfound.html', {'path': path})
return render_with_context(request, 'pagenotfound.html', {'path': path})
if path.endswith(".htm") or path.endswith(".html"):
html = o.read()
m = re.search(r"(.*)<\s*head([^>]*)>(.*)<\s*/head\s*>(.*)<\s*body([^>]*)>(.*)<\s*/body\s*>(.*)", html, re.DOTALL + re.IGNORECASE)
if m:
preheader, headerattrs, head, postheader, bodyattrs, body, postbody = m.groups()
@@ -74,7 +75,7 @@ def flatpage(request, path):
if re.search(r"iso-8859-1", html):
body = unicode(body, "iso-8859-1")
body.strip
return render(request, 'flatpage.html', {'editable': True, 'path': path, 'title': title, 'body': body, 'homepage': (path == "index.htm"), 'has_menu': has_menu})
return render_with_context(request, 'flatpage.html', {'editable': True, 'path': path, 'title': title, 'body': body, 'homepage': (path == "index.htm"), 'has_menu': has_menu})
else:
return HttpResponse(o.read(), content_type=getmimetype(path))
@@ -124,7 +125,7 @@ def editflatpage(request, path):
return HttpResponse("Page could not be split into header and body")
except IOError:
filefound = False
if request.method == 'POST': # If the form has been submitted...
flatpageForm = FlatPageForm(request.POST) # A form bound to the POST data
@@ -141,7 +142,7 @@ def editflatpage(request, path):
headerargs = ""
postheader = ""
bodyargs = ""
postbody = "</html>"
postbody = "</html>"
body = flatpageForm.cleaned_data["html"]
body = body.replace("\r", "")
result = u"%s<head%s>%s</head>%s<body%s>\n%s</body>%s" % (preheader, headerargs, head, postheader, bodyargs, body, postbody)
@@ -152,16 +153,16 @@ def editflatpage(request, path):
else:
if filefound:
m = re.search(r"<title>(.*)</title>", head, re.DOTALL + re.IGNORECASE)
if m:
if m:
title, = m.groups()
else:
title = ""
flatpageForm = FlatPageForm({"html": body, "title": title})
else:
flatpageForm = FlatPageForm()
return render(request, 'editflatpage.html', {'path': path, 'form': flatpageForm, })
return render_with_context(request, 'editflatpage.html', {'path': path, 'form': flatpageForm, })
class FlatPageForm(forms.Form):
title = forms.CharField(widget=forms.TextInput(attrs={'size':'60'}))
html = forms.CharField(widget=TinyMCE(attrs={'cols': 80, 'rows': 20}))
html = forms.CharField(widget=forms.Textarea())

View File

@@ -1,8 +1,8 @@
import sys
# This is the local settings for use with the docker compose dev setup. It is imported automatically
# link localsettings to this file for use on expo computer in austria
DATABASES = {
'default': {
'default': {
'ENGINE': 'django.db.backends.mysql', # 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME' : 'troggle', # Or path to database file if using sqlite3.
'USER' : 'troggleuser', # Not used with sqlite3.
@@ -12,8 +12,6 @@ DATABASES = {
}
}
ALLOWED_HOSTS = ['*']
EXPOUSER = 'expo'
EXPOUSERPASS = 'somepasshere'
EXPOUSER_EMAIL = 'wookey@wookware.org'
@@ -49,12 +47,19 @@ MEDIA_URL = URL_ROOT + DIR_ROOT + 'site_media/'
MEDIA_ROOT = REPOS_ROOT_PATH + '/troggle/media/'
MEDIA_ADMIN_DIR = '/usr/lib/python2.7/site-packages/django/contrib/admin/media/'
STATIC_URL = "/static/"
STATIC_ROOT = "/expo/static"
STATIC_URL = URL_ROOT
STATIC_ROOT = DIR_ROOT
JSLIB_URL = URL_ROOT + 'javascript/'
TINY_MCE_MEDIA_ROOT = STATIC_ROOT + '/tiny_mce/'
TINY_MCE_MEDIA_URL = STATIC_ROOT + '/tiny_mce/'
TINY_MCE_MEDIA_ROOT = '/usr/share/tinymce/www/'
TINY_MCE_MEDIA_URL = URL_ROOT + DIR_ROOT + '/tinymce_media/'
TEMPLATE_DIRS = (
PYTHON_PATH + "templates",
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
LOGFILE = PYTHON_PATH + 'troggle_log.txt'

View File

@@ -15,8 +15,6 @@ DATABASES = {
}
}
ALLOWED_HOSTS = ['*']
REPOS_ROOT_PATH = '/home/expo/'
sys.path.append(REPOS_ROOT_PATH)
sys.path.append(REPOS_ROOT_PATH + 'troggle')
@@ -55,6 +53,13 @@ JSLIB_PATH = '/usr/share/javascript/'
TINY_MCE_MEDIA_ROOT = '/usr/share/tinymce/www/'
TINY_MCE_MEDIA_URL = URL_ROOT + DIR_ROOT + 'tinymce_media/'
TEMPLATE_DIRS = (
PYTHON_PATH + "templates",
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
LOGFILE = '/home/expo/troggle/troggle_log.txt'
FEINCMS_ADMIN_MEDIA='/site_media/feincms/'

View File

@@ -1,6 +1,6 @@
import sys
# This is an example file. Copy it to localsettings.py, set the
# password and _don't_ check that file back to the repo as it exposes
# password and _don't_ check that file back to the repo as it exposes
# your/our password to the world!
DATABASES = {
@@ -14,8 +14,6 @@ DATABASES = {
}
}
ALLOWED_HOSTS = ['*']
EXPOUSER = 'expo'
EXPOUSERPASS = 'realpasshere'
EXPOUSER_EMAIL = 'wookey@wookware.org'
@@ -54,8 +52,15 @@ MEDIA_ADMIN_DIR = '/usr/lib/python2.7/site-packages/django/contrib/admin/media/'
JSLIB_URL = URL_ROOT + 'javascript/'
TINY_MCE_MEDIA_ROOT = STATIC_ROOT + '/tiny_mce/'
TINY_MCE_MEDIA_URL = STATIC_ROOT + '/tiny_mce/'
TINY_MCE_MEDIA_ROOT = '/usr/share/tinymce/www/'
TINY_MCE_MEDIA_URL = URL_ROOT + DIR_ROOT + 'tinymce_media/'
TEMPLATE_DIRS = (
PYTHON_PATH + "templates",
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
LOGFILE = '/home/expo/troggle/troggle_log.txt'

View File

@@ -2,7 +2,7 @@ import sys
# link localsettings to this file for use on expo computer in austria
DATABASES = {
'default': {
'default': {
'ENGINE': 'django.db.backends.mysql', # 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME' : 'troggle', # Or path to database file if using sqlite3.
'USER' : 'expo', # Not used with sqlite3.
@@ -12,8 +12,6 @@ DATABASES = {
}
}
ALLOWED_HOSTS = ['*']
EXPOUSER = 'expo'
EXPOUSERPASS = 'realpasshere'
EXPOUSER_EMAIL = 'wookey@wookware.org'
@@ -59,4 +57,11 @@ JSLIB_URL = URL_ROOT + 'javascript/'
TINY_MCE_MEDIA_ROOT = '/usr/share/tinymce/www/'
TINY_MCE_MEDIA_URL = URL_ROOT + DIR_ROOT + '/tinymce_media/'
TEMPLATE_DIRS = (
PYTHON_PATH + "templates",
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
LOGFILE = PYTHON_PATH + 'troggle_log.txt'

View File

@@ -9,8 +9,6 @@ DATABASES = {
}
}
ALLOWED_HOSTS = ['*']
EXPOUSER = 'expo'
EXPOUSERPASS = 'realpasshere'
EXPOUSER_EMAIL = 'wookey@wookware.org'
@@ -32,7 +30,7 @@ URL_ROOT = 'http://127.0.0.1:8000'
DIR_ROOT = ''#this should end in / if a value is given
PUBLIC_SITE = False
TINY_MCE_MEDIA_ROOT = '/usr/share/tinymce/www/'
TINY_MCE_MEDIA_ROOT = '/usr/share/tinymce/www/'
TINY_MCE_MEDIA_URL = URL_ROOT + DIR_ROOT + 'tinymce_media/'
PYTHON_PATH = 'C:\\expoweb\\troggle\\'
@@ -58,3 +56,14 @@ EMAIL_USE_TLS = True
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
TEMPLATE_DIRS = (
"C:/Expo/expoweb/troggle/templates",
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)

View File

@@ -29,12 +29,12 @@
}
function redirectSurvey(){
window.location = "{{ URL_ROOT }}/survey/" + document.getElementById("expeditionChooser").value + "%23" + document.getElementById("surveyChooser").value;
window.location = "{{ settings.URL_ROOT }}/survey/" + document.getElementById("expeditionChooser").value + "%23" + document.getElementById("surveyChooser").value;
document.getElementById("progressTableContent").style.display='hidden'
}
function redirectYear(){
window.location = "{{ URL_ROOT }}/survey/" + document.getElementById("expeditionChooser").value + "%23"
window.location = "{{ settings.URL_ROOT }}/survey/" + document.getElementById("expeditionChooser").value + "%23"
}

View File

@@ -46,4 +46,4 @@ def _resolves(url):
return True
except http.Http404:
return False

View File

@@ -17,20 +17,20 @@ def parseCaveQMs(cave,inputFile):
try:
steinBr=Cave.objects.get(official_name="Steinbr&uuml;ckenh&ouml;hle")
except Cave.DoesNotExist:
print("Steinbruckenhoehle is not in the database. Please run parsers.cavetab first.")
print "Steinbruckenhoehle is not in the database. Please run parsers.cavetab first."
return
elif cave=='hauch':
try:
hauchHl=Cave.objects.get(official_name="Hauchh&ouml;hle")
except Cave.DoesNotExist:
print("Hauchhoele is not in the database. Please run parsers.cavetab first.")
print "Hauchhoele is not in the database. Please run parsers.cavetab first."
return
elif cave =='kh':
try:
kh=Cave.objects.get(official_name="Kaninchenh&ouml;hle")
except Cave.DoesNotExist:
print("KH is not in the database. Please run parsers.cavetab first.")
parse_KH_QMs(kh, inputFile=inputFile)
print "KH is not in the database. Please run parsers.cavetab first."
parse_KH_QMs(kh, inputFile=inputFile)
return
qmPath = settings.EXPOWEB+inputFile
@@ -46,9 +46,9 @@ def parseCaveQMs(cave,inputFile):
if cave=='stein':
placeholder, hadToCreate = LogbookEntry.objects.get_or_create(date__year=year, title="placeholder for QMs in 204", text="QMs temporarily attached to this should be re-attached to their actual trips", defaults={"date": date(year, 1, 1),"cave":steinBr})
elif cave=='hauch':
placeholder, hadToCreate = LogbookEntry.objects.get_or_create(date__year=year, title="placeholder for QMs in 234", text="QMs temporarily attached to this should be re-attached to their actual trips", defaults={"date": date(year, 1, 1),"cave":hauchHl})
placeholder, hadToCreate = LogbookEntry.objects.get_or_create(date__year=year, title="placeholder for QMs in 234", text="QMs temporarily attached to this should be re-attached to their actual trips", defaults={"date": date(year, 1, 1),"cave":hauchHl})
if hadToCreate:
print(cave + " placeholder logbook entry for " + str(year) + " added to database")
print cave+" placeholder logbook entry for " + str(year) + " added to database"
QMnum=re.match(r".*?-\d*?-X?(?P<numb>\d*)",line[0]).group("numb")
newQM = QM()
newQM.found_by=placeholder
@@ -59,7 +59,7 @@ def parseCaveQMs(cave,inputFile):
newQM.grade=line[1]
newQM.area=line[2]
newQM.location_description=line[3]
newQM.completion_description=line[4]
newQM.nearest_station_description=line[5]
if newQM.completion_description: # Troggle checks if QMs are completed by checking if they have a ticked_off_by trip. In the table, completion is indicated by the presence of a completion discription.
@@ -71,18 +71,19 @@ def parseCaveQMs(cave,inputFile):
if preexistingQM.new_since_parsing==False: #if the pre-existing QM has not been modified, overwrite it
preexistingQM.delete()
newQM.save()
print("overwriting " + str(preexistingQM) +"\r")
print "overwriting " + str(preexistingQM) +"\r",
else: # otherwise, print that it was ignored
print("preserving " + str(preexistingQM) + ", which was edited in admin \r")
print "preserving "+ str(preexistingQM) + ", which was edited in admin \r",
except QM.DoesNotExist: #if there is no pre-existing QM, save the new one
newQM.save()
print("QM "+str(newQM) + ' added to database\r')
newQM.save()
print "QM "+str(newQM) + ' added to database\r',
except KeyError: #check on this one
continue
except IndexError:
print("Index error in " + str(line))
print "Index error in " + str(line)
continue
def parse_KH_QMs(kh, inputFile):
@@ -103,15 +104,14 @@ def parse_KH_QMs(kh, inputFile):
}
nonLookupArgs={
'grade':res['grade'],
'nearest_station_name':res['nearest_station'],
'nearest_station':res['nearest_station'],
'location_description':res['description']
}
save_carefully(QM,lookupArgs,nonLookupArgs)
parseCaveQMs(cave='stein',inputFile=r"1623/204/qm.csv")
parseCaveQMs(cave='hauch',inputFile=r"1623/234/qm.csv")
parseCaveQMs(cave='kh', inputFile="1623/161/qmtodo.htm")
#parseCaveQMs(cave='balkonhoehle',inputFile=r"1623/264/qm.csv")

View File

@@ -6,18 +6,16 @@ import re
def readcaves():
# Clear the cave data issues as we are reloading
models.DataIssue.objects.filter(parser='caves').delete()
area_1623 = models.Area.objects.update_or_create(short_name = "1623", parent = None)
area_1626 = models.Area.objects.update_or_create(short_name = "1626", parent = None)
print(" - Reading Entrances")
newArea = models.Area(short_name = "1623", parent = None)
newArea.save()
newArea = models.Area(short_name = "1626", parent = None)
newArea.save()
print("Reading Entrances")
#print "list of <Slug> <Filename>"
for filename in os.walk(settings.ENTRANCEDESCRIPTIONS).next()[2]: #Should be a better way of getting a list of files
if filename.endswith('.html'):
readentrance(filename)
print (" - Reading Caves")
print ("Reading Caves")
for filename in os.walk(settings.CAVEDESCRIPTIONS).next()[2]: #Should be a better way of getting a list of files
if filename.endswith('.html'):
readcave(filename)
@@ -53,7 +51,7 @@ def readentrance(filename):
bearings = getXML(entrancecontents, "bearings", maxItems = 1, context = context)
url = getXML(entrancecontents, "url", maxItems = 1, context = context)
if len(non_public) == 1 and len(slugs) >= 1 and len(name) >= 1 and len(entrance_description) == 1 and len(explorers) == 1 and len(map_description) == 1 and len(location_description) == 1 and len(approach) == 1 and len(underground_description) == 1 and len(marking) == 1 and len(marking_comment) == 1 and len(findability) == 1 and len(findability_description) == 1 and len(alt) == 1 and len(northing) == 1 and len(easting) == 1 and len(tag_station) == 1 and len(exact_station) == 1 and len(other_station) == 1 and len(other_description) == 1 and len(bearings) == 1 and len(url) == 1:
e, state = models.Entrance.objects.update_or_create(name = name[0],
e = models.Entrance(name = name[0],
non_public = {"True": True, "False": False, "true": True, "false": False,}[non_public[0]],
entrance_description = entrance_description[0],
explorers = explorers[0],
@@ -77,12 +75,14 @@ def readentrance(filename):
url = url[0],
filename = filename,
cached_primary_slug = slugs[0])
e.save()
primary = True
for slug in slugs:
#print slug, filename
cs = models.EntranceSlug.objects.update_or_create(entrance = e,
cs = models.EntranceSlug(entrance = e,
slug = slug,
primary = primary)
cs.save()
primary = False
def readcave(filename):
@@ -117,7 +117,7 @@ def readcave(filename):
url = getXML(cavecontents, "url", maxItems = 1, context = context)
entrances = getXML(cavecontents, "entrance", context = context)
if len(non_public) == 1 and len(slugs) >= 1 and len(official_name) == 1 and len(areas) >= 1 and len(kataster_code) == 1 and len(kataster_number) == 1 and len(unofficial_number) == 1 and len(explorers) == 1 and len(underground_description) == 1 and len(equipment) == 1 and len(references) == 1 and len(survey) == 1 and len(kataster_status) == 1 and len(underground_centre_line) == 1 and len(notes) == 1 and len(length) == 1 and len(depth) == 1 and len(extent) == 1 and len(survex_file) == 1 and len(description_file ) == 1 and len(url) == 1 and len(entrances) >= 1:
c, state = models.Cave.objects.update_or_create(non_public = {"True": True, "False": False, "true": True, "false": False,}[non_public[0]],
c = models.Cave(non_public = {"True": True, "False": False, "true": True, "false": False,}[non_public[0]],
official_name = official_name[0],
kataster_code = kataster_code[0],
kataster_number = kataster_number[0],
@@ -137,6 +137,7 @@ def readcave(filename):
description_file = description_file[0],
url = url[0],
filename = filename)
c.save()
for area_slug in areas:
area = models.Area.objects.filter(short_name = area_slug)
if area:
@@ -148,40 +149,33 @@ def readcave(filename):
primary = True
for slug in slugs:
try:
cs = models.CaveSlug.objects.update_or_create(cave = c,
cs = models.CaveSlug(cave = c,
slug = slug,
primary = primary)
cs.save()
except:
message = "Can't find text (slug): %s, skipping %s" % (slug, context)
models.DataIssue.objects.create(parser='caves', message=message)
print(message)
print("Can't find text (slug): %s, skipping %s" % (slug, context))
primary = False
for entrance in entrances:
slug = getXML(entrance, "entranceslug", maxItems = 1, context = context)[0]
letter = getXML(entrance, "letter", maxItems = 1, context = context)[0]
try:
entrance = models.Entrance.objects.get(entranceslug__slug = slug)
ce = models.CaveAndEntrance.objects.update_or_create(cave = c, entrance_letter = letter, entrance = entrance)
ce = models.CaveAndEntrance(cave = c, entrance_letter = letter, entrance = entrance)
ce.save()
except:
message = "Entrance text (slug) %s missing %s" % (slug, context)
models.DataIssue.objects.create(parser='caves', message=message)
print(message)
print ("Entrance text (slug) %s missing %s" % (slug, context))
def getXML(text, itemname, minItems = 1, maxItems = None, printwarnings = True, context = ""):
items = re.findall("<%(itemname)s>(.*?)</%(itemname)s>" % {"itemname": itemname}, text, re.S)
if len(items) < minItems and printwarnings:
message = "%(count)i %(itemname)s found, at least %(min)i expected" % {"count": len(items),
print("%(count)i %(itemname)s found, at least %(min)i expected" % {"count": len(items),
"itemname": itemname,
"min": minItems} + context
models.DataIssue.objects.create(parser='caves', message=message)
print(message)
"min": minItems} + context)
if maxItems is not None and len(items) > maxItems and printwarnings:
message = "%(count)i %(itemname)s found, no more than %(max)i expected" % {"count": len(items),
print("%(count)i %(itemname)s found, no more than %(max)i expected" % {"count": len(items),
"itemname": itemname,
"max": maxItems} + context
models.DataIssue.objects.create(parser='caves', message=message)
print(message)
"max": maxItems} + context)
return items

View File

@@ -7,18 +7,15 @@ from parsers.people import GetPersonExpeditionNameLookup
from parsers.cavetab import GetCaveLookup
from django.template.defaultfilters import slugify
from django.utils.timezone import get_current_timezone
from django.utils.timezone import make_aware
import csv
import re
import datetime
import os
from fuzzywuzzy import fuzz
from utils import save_carefully
#
#
# When we edit logbook entries, allow a "?" after any piece of data to say we've frigged it and
# it can be checked up later from the hard-copy if necessary; or it's not possible to determin (name, trip place, etc)
#
@@ -26,33 +23,19 @@ from utils import save_carefully
#
# the logbook loading section
#
def GetTripPersons(trippeople, expedition, logtime_underground):
def GetTripPersons(trippeople, expedition, logtime_underground):
res = [ ]
author = None
round_bracket_regex = re.compile(r"[\(\[].*?[\)\]]")
for tripperson in re.split(r",|\+|&amp;|&(?!\w+;)| and ", trippeople):
for tripperson in re.split(",|\+|&amp;|&(?!\w+;)| and ", trippeople):
tripperson = tripperson.strip()
tripperson = tripperson.strip('.')
mul = re.match(r"<u>(.*?)</u>$(?i)", tripperson)
mul = re.match("<u>(.*?)</u>$(?i)", tripperson)
if mul:
tripperson = mul.group(1).strip()
if tripperson and tripperson[0] != '*':
#assert tripperson in personyearmap, "'%s' << %s\n\n %s" % (tripperson, trippeople, personyearmap)
tripperson = re.sub(round_bracket_regex, "", tripperson).strip()
personyear = GetPersonExpeditionNameLookup(expedition).get(tripperson.lower())
if not personyear:
print(" - No name match for: '%s'" % tripperson)
message = "No name match for: '%s' in year '%s'" % (tripperson, expedition.year)
models.DataIssue.objects.create(parser='logbooks', message=message)
print(' - Lets try something fuzzy')
fuzzy_matches = {}
for person in GetPersonExpeditionNameLookup(expedition):
fuzz_num = fuzz.ratio(tripperson.lower(), person)
if fuzz_num > 50:
#print(" - %s -> %s = %d" % (tripperson.lower(), person, fuzz_num))
fuzzy_matches[person] = fuzz_num
for i in sorted(fuzzy_matches.items(), key = lambda kv:(kv[1]), reverse=True):
print(' - %s -> %s' % (i[0], i[1]))
print "NoMatchFor: '%s'" % tripperson
res.append((personyear, logtime_underground))
if mul:
author = personyear
@@ -62,7 +45,7 @@ def GetTripPersons(trippeople, expedition, logtime_underground):
author = res[-1][0]
return res, author
def GetTripCave(place): #need to be fuzzier about matching here. Already a very slow function...
def GetTripCave(place): #need to be fuzzier about matching here. Already a very slow function...
# print "Getting cave for " , place
try:
katastNumRes=[]
@@ -82,36 +65,34 @@ def GetTripCave(place): #need to be fuzzier about matching here. Already a very
return tripCaveRes
elif len(tripCaveRes)>1:
print("Ambiguous place " + str(place) + " entered. Choose from " + str(tripCaveRes))
print "Ambiguous place " + str(place) + " entered. Choose from " + str(tripCaveRes)
correctIndex=input("type list index of correct cave")
return tripCaveRes[correctIndex]
else:
print("No cave found for place " , place)
print "No cave found for place " , place
return
noncaveplaces = [ "Journey", "Loser Plateau" ]
def EnterLogIntoDbase(date, place, title, text, trippeople, expedition, logtime_underground, entry_type="wiki"):
def EnterLogIntoDbase(date, place, title, text, trippeople, expedition, logtime_underground):
""" saves a logbook entry and related persontrips """
trippersons, author = GetTripPersons(trippeople, expedition, logtime_underground)
if not author:
print(" - Skipping logentry: " + title + " - no author for entry")
message = "Skipping logentry: %s - no author for entry in year '%s'" % (title, expedition.year)
models.DataIssue.objects.create(parser='logbooks', message=message)
print "skipping logentry", title
return
#tripCave = GetTripCave(place)
# tripCave = GetTripCave(place)
#
lplace = place.lower()
if lplace not in noncaveplaces:
cave=GetCaveLookup().get(lplace)
#Check for an existing copy of the current entry, and save
expeditionday = expedition.get_expedition_day(date)
lookupAttribs={'date':date, 'title':title}
nonLookupAttribs={'place':place, 'text':text, 'expedition':expedition, 'cave':cave, 'slug':slugify(title)[:50], 'entry_type':entry_type}
lookupAttribs={'date':date, 'title':title}
nonLookupAttribs={'place':place, 'text':text, 'expedition':expedition, 'cave':cave, 'slug':slugify(title)[:50]}
lbo, created=save_carefully(models.LogbookEntry, lookupAttribs, nonLookupAttribs)
for tripperson, time_underground in trippersons:
lookupAttribs={'personexpedition':tripperson, 'logbook_entry':lbo}
nonLookupAttribs={'time_underground':time_underground, 'is_logbook_entry_author':(tripperson == author)}
@@ -121,8 +102,8 @@ def EnterLogIntoDbase(date, place, title, text, trippeople, expedition, logtime_
def ParseDate(tripdate, year):
""" Interprets dates in the expo logbooks and returns a correct datetime.date object """
mdatestandard = re.match(r"(\d\d\d\d)-(\d\d)-(\d\d)", tripdate)
mdategoof = re.match(r"(\d\d?)/0?(\d)/(20|19)?(\d\d)", tripdate)
mdatestandard = re.match("(\d\d\d\d)-(\d\d)-(\d\d)", tripdate)
mdategoof = re.match("(\d\d?)/0?(\d)/(20|19)?(\d\d)", tripdate)
if mdatestandard:
assert mdatestandard.group(1) == year, (tripdate, year)
year, month, day = int(mdatestandard.group(1)), int(mdatestandard.group(2)), int(mdatestandard.group(3))
@@ -132,11 +113,11 @@ def ParseDate(tripdate, year):
day, month, year = int(mdategoof.group(1)), int(mdategoof.group(2)), int(mdategoof.group(4)) + yadd
else:
assert False, tripdate
return make_aware(datetime.datetime(year, month, day), get_current_timezone())
return datetime.date(year, month, day)
# 2006, 2008 - 2010
# 2007, 2008, 2006
def Parselogwikitxt(year, expedition, txt):
trippara = re.findall(r"===(.*?)===([\s\S]*?)(?====)", txt)
trippara = re.findall("===(.*?)===([\s\S]*?)(?====)", txt)
for triphead, triptext in trippara:
tripheadp = triphead.split("|")
#print "ttt", tripheadp
@@ -145,7 +126,7 @@ def Parselogwikitxt(year, expedition, txt):
tripsplace = tripplace.split(" - ")
tripcave = tripsplace[0].strip()
tul = re.findall(r"T/?U:?\s*(\d+(?:\.\d*)?|unknown)\s*(hrs|hours)?", triptext)
tul = re.findall("T/?U:?\s*(\d+(?:\.\d*)?|unknown)\s*(hrs|hours)?", triptext)
if tul:
#assert len(tul) <= 1, (triphead, triptext)
#assert tul[0][1] in ["hrs", "hours"], (triphead, triptext)
@@ -159,16 +140,12 @@ def Parselogwikitxt(year, expedition, txt):
#print "\n", tripcave, "--- ppp", trippeople, len(triptext)
EnterLogIntoDbase(date = ldate, place = tripcave, title = tripplace, text = triptext, trippeople=trippeople, expedition=expedition, logtime_underground=0)
# 2002, 2004, 2005, 2007, 2011 - 2018
# 2002, 2004, 2005
def Parseloghtmltxt(year, expedition, txt):
#print(" - Starting log html parser")
tripparas = re.findall(r"<hr\s*/>([\s\S]*?)(?=<hr)", txt)
logbook_entry_count = 0
tripparas = re.findall("<hr\s*/>([\s\S]*?)(?=<hr)", txt)
for trippara in tripparas:
#print(" - HR detected - maybe a trip?")
logbook_entry_count += 1
s = re.match(r'''(?x)(?:\s*<div\sclass="tripdate"\sid=".*?">.*?</div>\s*<p>)? # second date
s = re.match('''(?x)(?:\s*<div\sclass="tripdate"\sid=".*?">.*?</div>\s*<p>)? # second date
\s*(?:<a\s+id="(.*?)"\s*/>\s*</a>)?
\s*<div\s+class="tripdate"\s*(?:id="(.*?)")?>(.*?)</div>(?:<p>)?
\s*<div\s+class="trippeople">\s*(.*?)</div>
@@ -178,46 +155,46 @@ def Parseloghtmltxt(year, expedition, txt):
\s*$
''', trippara)
if not s:
if not re.search(r"Rigging Guide", trippara):
print("can't parse: ", trippara) # this is 2007 which needs editing
if not re.search("Rigging Guide", trippara):
print "can't parse: ", trippara # this is 2007 which needs editing
#assert s, trippara
continue
tripid, tripid1, tripdate, trippeople, triptitle, triptext, tu = s.groups()
ldate = ParseDate(tripdate.strip(), year)
#assert tripid[:-1] == "t" + tripdate, (tripid, tripdate)
trippeople = re.sub("Ol(?!l)", "Olly", trippeople)
trippeople = re.sub("Wook(?!e)", "Wookey", trippeople)
triptitles = triptitle.split(" - ")
if len(triptitles) >= 2:
tripcave = triptitles[0]
else:
tripcave = "UNKNOWN"
#print("\n", tripcave, "--- ppp", trippeople, len(triptext))
ltriptext = re.sub(r"</p>", "", triptext)
ltriptext = re.sub(r"\s*?\n\s*", " ", ltriptext)
ltriptext = re.sub(r"<p>", "\n\n", ltriptext).strip()
EnterLogIntoDbase(date = ldate, place = tripcave, title = triptitle, text = ltriptext,
trippeople=trippeople, expedition=expedition, logtime_underground=0,
entry_type="html")
if logbook_entry_count == 0:
print(" - No trip entrys found in logbook, check the syntax matches htmltxt format")
#print "\n", tripcave, "--- ppp", trippeople, len(triptext)
ltriptext = re.sub("</p>", "", triptext)
ltriptext = re.sub("\s*?\n\s*", " ", ltriptext)
ltriptext = re.sub("<p>", "\n\n", ltriptext).strip()
EnterLogIntoDbase(date = ldate, place = tripcave, title = triptitle, text = ltriptext, trippeople=trippeople, expedition=expedition, logtime_underground=0)
# main parser for 1991 - 2001. simpler because the data has been hacked so much to fit it
# main parser for pre-2001. simpler because the data has been hacked so much to fit it
def Parseloghtml01(year, expedition, txt):
tripparas = re.findall(r"<hr[\s/]*>([\s\S]*?)(?=<hr)", txt)
tripparas = re.findall("<hr[\s/]*>([\s\S]*?)(?=<hr)", txt)
for trippara in tripparas:
s = re.match(u"(?s)\s*(?:<p>)?(.*?)</?p>(.*)$(?i)", trippara)
assert s, trippara[:300]
tripheader, triptext = s.group(1), s.group(2)
mtripid = re.search(r'<a id="(.*?)"', tripheader)
mtripid = re.search('<a id="(.*?)"', tripheader)
tripid = mtripid and mtripid.group(1) or ""
tripheader = re.sub(r"</?(?:[ab]|span)[^>]*>", "", tripheader)
tripheader = re.sub("</?(?:[ab]|span)[^>]*>", "", tripheader)
#print " ", [tripheader]
#continue
tripdate, triptitle, trippeople = tripheader.split("|")
ldate = ParseDate(tripdate.strip(), year)
mtu = re.search(r'<p[^>]*>(T/?U.*)', triptext)
mtu = re.search('<p[^>]*>(T/?U.*)', triptext)
if mtu:
tu = mtu.group(1)
triptext = triptext[:mtu.start(0)] + triptext[mtu.end():]
@@ -228,40 +205,39 @@ def Parseloghtml01(year, expedition, txt):
tripcave = triptitles[0].strip()
ltriptext = triptext
mtail = re.search(r'(?:<a href="[^"]*">[^<]*</a>|\s|/|-|&amp;|</?p>|\((?:same day|\d+)\))*$', ltriptext)
mtail = re.search('(?:<a href="[^"]*">[^<]*</a>|\s|/|-|&amp;|</?p>|\((?:same day|\d+)\))*$', ltriptext)
if mtail:
#print mtail.group(0)
ltriptext = ltriptext[:mtail.start(0)]
ltriptext = re.sub(r"</p>", "", ltriptext)
ltriptext = re.sub(r"\s*?\n\s*", " ", ltriptext)
ltriptext = re.sub(r"<p>|<br>", "\n\n", ltriptext).strip()
ltriptext = re.sub("</p>", "", ltriptext)
ltriptext = re.sub("\s*?\n\s*", " ", ltriptext)
ltriptext = re.sub("<p>|<br>", "\n\n", ltriptext).strip()
#ltriptext = re.sub("[^\s0-9a-zA-Z\-.,:;'!]", "NONASCII", ltriptext)
ltriptext = re.sub(r"</?u>", "_", ltriptext)
ltriptext = re.sub(r"</?i>", "''", ltriptext)
ltriptext = re.sub(r"</?b>", "'''", ltriptext)
ltriptext = re.sub("</?u>", "_", ltriptext)
ltriptext = re.sub("</?i>", "''", ltriptext)
ltriptext = re.sub("</?b>", "'''", ltriptext)
#print ldate, trippeople.strip()
# could includ the tripid (url link for cross referencing)
EnterLogIntoDbase(date=ldate, place=tripcave, title=triptitle, text=ltriptext,
trippeople=trippeople, expedition=expedition, logtime_underground=0,
entry_type="html")
EnterLogIntoDbase(date=ldate, place=tripcave, title=triptitle, text=ltriptext, trippeople=trippeople, expedition=expedition, logtime_underground=0)
# parser for 2003
def Parseloghtml03(year, expedition, txt):
tripparas = re.findall(r"<hr\s*/>([\s\S]*?)(?=<hr)", txt)
tripparas = re.findall("<hr\s*/>([\s\S]*?)(?=<hr)", txt)
for trippara in tripparas:
s = re.match(u"(?s)\s*<p>(.*?)</p>(.*)$", trippara)
assert s, trippara
tripheader, triptext = s.group(1), s.group(2)
tripheader = re.sub(r"&nbsp;", " ", tripheader)
tripheader = re.sub(r"\s+", " ", tripheader).strip()
tripheader = re.sub("&nbsp;", " ", tripheader)
tripheader = re.sub("\s+", " ", tripheader).strip()
sheader = tripheader.split(" -- ")
tu = ""
if re.match("T/U|Time underwater", sheader[-1]):
tu = sheader.pop()
if len(sheader) != 3:
print("header not three pieces", sheader)
print "header not three pieces", sheader
tripdate, triptitle, trippeople = sheader
ldate = ParseDate(tripdate.strip(), year)
triptitles = triptitle.split(" , ")
@@ -270,14 +246,37 @@ def Parseloghtml03(year, expedition, txt):
else:
tripcave = "UNKNOWN"
#print tripcave, "--- ppp", triptitle, trippeople, len(triptext)
ltriptext = re.sub(r"</p>", "", triptext)
ltriptext = re.sub(r"\s*?\n\s*", " ", ltriptext)
ltriptext = re.sub(r"<p>", "\n\n", ltriptext).strip()
ltriptext = re.sub(r"[^\s0-9a-zA-Z\-.,:;'!&()\[\]<>?=+*%]", "_NONASCII_", ltriptext)
EnterLogIntoDbase(date = ldate, place = tripcave, title = triptitle,
text = ltriptext, trippeople=trippeople, expedition=expedition,
logtime_underground=0, entry_type="html")
ltriptext = re.sub("</p>", "", triptext)
ltriptext = re.sub("\s*?\n\s*", " ", ltriptext)
ltriptext = re.sub("<p>", "\n\n", ltriptext).strip()
ltriptext = re.sub("[^\s0-9a-zA-Z\-.,:;'!&()\[\]<>?=+*%]", "_NONASCII_", ltriptext)
EnterLogIntoDbase(date = ldate, place = tripcave, title = triptitle, text = ltriptext, trippeople=trippeople, expedition=expedition, logtime_underground=0)
yearlinks = [
# ("2013", "2013/logbook.html", Parseloghtmltxt),
("2012", "2012/logbook.html", Parseloghtmltxt),
("2011", "2011/logbook.html", Parseloghtmltxt),
("2010", "2010/logbook.html", Parselogwikitxt),
("2009", "2009/2009logbook.txt", Parselogwikitxt),
("2008", "2008/2008logbook.txt", Parselogwikitxt),
("2007", "2007/logbook.html", Parseloghtmltxt),
("2006", "2006/logbook/logbook_06.txt", Parselogwikitxt),
("2005", "2005/logbook.html", Parseloghtmltxt),
("2004", "2004/logbook.html", Parseloghtmltxt),
("2003", "2003/logbook.html", Parseloghtml03),
("2002", "2002/logbook.html", Parseloghtmltxt),
("2001", "2001/log.htm", Parseloghtml01),
("2000", "2000/log.htm", Parseloghtml01),
("1999", "1999/log.htm", Parseloghtml01),
("1998", "1998/log.htm", Parseloghtml01),
("1997", "1997/log.htm", Parseloghtml01),
("1996", "1996/log.htm", Parseloghtml01),
("1995", "1995/log.htm", Parseloghtml01),
("1994", "1994/log.htm", Parseloghtml01),
("1993", "1993/log.htm", Parseloghtml01),
("1992", "1992/log.htm", Parseloghtml01),
("1991", "1991/log.htm", Parseloghtml01),
]
def SetDatesFromLogbookEntries(expedition):
"""
@@ -296,67 +295,54 @@ def SetDatesFromLogbookEntries(expedition):
persontrip.persontrip_next = None
lprevpersontrip = persontrip
persontrip.save()
def LoadLogbookForExpedition(expedition):
""" Parses all logbook entries for one expedition """
expowebbase = os.path.join(settings.EXPOWEB, "years")
yearlinks = settings.LOGBOOK_PARSER_SETTINGS
logbook_parseable = False
if expedition.year in yearlinks:
year_settings = yearlinks[expedition.year]
file_in = open(os.path.join(expowebbase, year_settings[0]))
txt = file_in.read().decode("latin1")
file_in.close()
parsefunc = year_settings[1]
logbook_parseable = True
print(" - Parsing logbook: " + year_settings[0] + "\n - Using parser: " + year_settings[1])
else:
try:
file_in = open(os.path.join(expowebbase, expedition.year, settings.DEFAULT_LOGBOOK_FILE))
txt = file_in.read().decode("latin1")
file_in.close()
logbook_parseable = True
print("No set parser found using default")
parsefunc = settings.DEFAULT_LOGBOOK_PARSER
except (IOError):
logbook_parseable = False
print("Couldn't open default logbook file and nothing in settings for expo " + expedition.year)
if logbook_parseable:
parser = globals()[parsefunc]
parser(expedition.year, expedition, txt)
SetDatesFromLogbookEntries(expedition)
#return "TOLOAD: " + year + " " + str(expedition.personexpedition_set.all()[1].logbookentry_set.count()) + " " + str(models.PersonTrip.objects.filter(personexpedition__expedition=expedition).count())
expowebbase = os.path.join(settings.EXPOWEB, "years")
year = str(expedition.year)
for lyear, lloc, parsefunc in yearlinks:
if lyear == year:
break
fin = open(os.path.join(expowebbase, lloc))
print "opennning", lloc
txt = fin.read().decode("latin1")
fin.close()
parsefunc(year, expedition, txt)
SetDatesFromLogbookEntries(expedition)
return "TOLOAD: " + year + " " + str(expedition.personexpedition_set.all()[1].logbookentry_set.count()) + " " + str(models.PersonTrip.objects.filter(personexpedition__expedition=expedition).count())
def LoadLogbooks():
""" This is the master function for parsing all logbooks into the Troggle database. """
""" This is the master function for parsing all logbooks into the Troggle database. Requires yearlinks, which is a list of tuples for each expedition with expedition year, logbook path, and parsing function. """
#Deletion has been moved to a seperate function to enable the non-destructive importing
#models.LogbookEntry.objects.all().delete()
expowebbase = os.path.join(settings.EXPOWEB, "years")
#yearlinks = [ ("2001", "2001/log.htm", Parseloghtml01), ] #overwrite
#yearlinks = [ ("1996", "1996/log.htm", Parseloghtml01),] # overwrite
# Clear the logbook data issues as we are reloading
models.DataIssue.objects.filter(parser='logbooks').delete()
# Fetch all expos
expos = models.Expedition.objects.all()
for expo in expos:
print("\nLoading Logbook for: " + expo.year)
for year, lloc, parsefunc in yearlinks:
# This will not work until the corresponding year exists in the database.
# In 2012 this needed noscript/folk.csv to be updated first.
expedition = models.Expedition.objects.filter(year = year)[0]
fin = open(os.path.join(expowebbase, lloc))
txt = fin.read().decode("latin1")
fin.close()
parsefunc(year, expedition, txt)
SetDatesFromLogbookEntries(expedition)
# Load logbook for expo
LoadLogbookForExpedition(expo)
dateRegex = re.compile(r'<span\s+class="date">(\d\d\d\d)-(\d\d)-(\d\d)</span>', re.S)
expeditionYearRegex = re.compile(r'<span\s+class="expeditionyear">(.*?)</span>', re.S)
titleRegex = re.compile(r'<H1>(.*?)</H1>', re.S)
reportRegex = re.compile(r'<div\s+class="report">(.*)</div>\s*</body>', re.S)
personRegex = re.compile(r'<div\s+class="person">(.*?)</div>', re.S)
nameAuthorRegex = re.compile(r'<span\s+class="name(,author|)">(.*?)</span>', re.S)
TURegex = re.compile(r'<span\s+class="TU">([0-9]*\.?[0-9]+)</span>', re.S)
locationRegex = re.compile(r'<span\s+class="location">(.*?)</span>', re.S)
caveRegex = re.compile(r'<span\s+class="cave">(.*?)</span>', re.S)
dateRegex = re.compile('<span\s+class="date">(\d\d\d\d)-(\d\d)-(\d\d)</span>', re.S)
expeditionYearRegex = re.compile('<span\s+class="expeditionyear">(.*?)</span>', re.S)
titleRegex = re.compile('<H1>(.*?)</H1>', re.S)
reportRegex = re.compile('<div\s+class="report">(.*)</div>\s*</body>', re.S)
personRegex = re.compile('<div\s+class="person">(.*?)</div>', re.S)
nameAuthorRegex = re.compile('<span\s+class="name(,author|)">(.*?)</span>', re.S)
TURegex = re.compile('<span\s+class="TU">([0-9]*\.?[0-9]+)</span>', re.S)
locationRegex = re.compile('<span\s+class="location">(.*?)</span>', re.S)
caveRegex = re.compile('<span\s+class="cave">(.*?)</span>', re.S)
def parseAutoLogBookEntry(filename):
errors = []
@@ -377,17 +363,17 @@ def parseAutoLogBookEntry(filename):
expedition = models.Expedition.objects.get(year = expeditionYearMatch.groups()[0])
personExpeditionNameLookup = GetPersonExpeditionNameLookup(expedition)
except models.Expedition.DoesNotExist:
errors.append("Expedition not in database")
errors.append("Expedition not in database")
else:
errors.append("Expediton Year could not be parsed")
errors.append("Expediton Year could not be parsed")
titleMatch = titleRegex.search(contents)
if titleMatch:
title, = titleMatch.groups()
if len(title) > settings.MAX_LOGBOOK_ENTRY_TITLE_LENGTH:
errors.append("Title too long")
errors.append("Title too long")
else:
errors.append("Title could not be found")
errors.append("Title could not be found")
caveMatch = caveRegex.search(contents)
if caveMatch:
@@ -396,24 +382,24 @@ def parseAutoLogBookEntry(filename):
cave = models.getCaveByReference(caveRef)
except AssertionError:
cave = None
errors.append("Cave not found in database")
errors.append("Cave not found in database")
else:
cave = None
locationMatch = locationRegex.search(contents)
if locationMatch:
location, = locationMatch.groups()
location, = locationMatch.groups()
else:
location = None
if cave is None and location is None:
errors.append("Location nor cave could not be found")
errors.append("Location nor cave could not be found")
reportMatch = reportRegex.search(contents)
if reportMatch:
report, = reportMatch.groups()
else:
errors.append("Contents could not be found")
errors.append("Contents could not be found")
if errors:
return errors # Easiest to bail out at this point as we need to make sure that we know which expedition to look for people from.
people = []
@@ -428,7 +414,7 @@ def parseAutoLogBookEntry(filename):
author = bool(author)
else:
errors.append("Persons name could not be found")
TUMatch = TURegex.search(contents)
if TUMatch:
TU, = TUMatch.groups()
@@ -438,15 +424,15 @@ def parseAutoLogBookEntry(filename):
people.append((name, author, TU))
if errors:
return errors # Bail out before commiting to the database
logbookEntry = models.LogbookEntry(date = date,
logbookEntry = models.LogbookEntry(date = date,
expedition = expedition,
title = title, cave = cave, place = location,
title = title, cave = cave, place = location,
text = report, slug = slugify(title)[:50],
filename = filename)
logbookEntry.save()
for name, author, TU in people:
models.PersonTrip(personexpedition = personExpo,
time_underground = TU,
logbook_entry = logbookEntry,
models.PersonTrip(personexpedition = personExpo,
time_underground = TU,
logbook_entry = logbookEntry,
is_logbook_entry_author = author).save()
print(logbookEntry)
print logbookEntry

View File

@@ -4,30 +4,28 @@ from django.conf import settings
import troggle.core.models as models
import csv, re, datetime, os, shutil
from utils import save_carefully
from HTMLParser import HTMLParser
from unidecode import unidecode
def saveMugShot(mugShotPath, mugShotFilename, person):
if mugShotFilename.startswith(r'i/'): #if filename in cell has the directory attached (I think they all do), remove it
mugShotFilename=mugShotFilename[2:]
else:
mugShotFilename=mugShotFilename # just in case one doesn't
dummyObj=models.DPhoto(file=mugShotFilename)
#Put a copy of the file in the right place. mugShotObj.file.path is determined by the django filesystemstorage specified in models.py
if not os.path.exists(dummyObj.file.path):
shutil.copy(mugShotPath, dummyObj.file.path)
mugShotObj, created = save_carefully(
models.DPhoto,
lookupAttribs={'is_mugshot':True, 'file':mugShotFilename},
nonLookupAttribs={'caption':"Mugshot for "+person.first_name+" "+person.last_name}
)
if created:
mugShotObj.contains_person.add(person)
mugShotObj.save()
mugShotObj.save()
def parseMugShotAndBlurb(personline, header, person):
"""create mugshot Photo instance"""
@@ -45,53 +43,40 @@ def parseMugShotAndBlurb(personline, header, person):
person.save()
def LoadPersonsExpos():
persontab = open(os.path.join(settings.EXPOWEB, "folk", "folk.csv"))
persontab = open(os.path.join(settings.EXPOWEB, "noinfo", "folk.csv"))
personreader = csv.reader(persontab)
headers = personreader.next()
header = dict(zip(headers, range(len(headers))))
# make expeditions
print("Loading expeditions")
print "Loading expeditions"
years = headers[5:]
for year in years:
lookupAttribs = {'year':year}
nonLookupAttribs = {'name':"CUCC expo %s" % year}
save_carefully(models.Expedition, lookupAttribs, nonLookupAttribs)
# make persons
print("Loading personexpeditions")
print "Loading personexpeditions"
#expoers2008 = """Edvin Deadman,Kathryn Hopkins,Djuke Veldhuis,Becka Lawson,Julian Todd,Natalie Uomini,Aaron Curtis,Tony Rooke,Ollie Stevens,Frank Tully,Martin Jahnke,Mark Shinwell,Jess Stirrups,Nial Peters,Serena Povia,Olly Madge,Steve Jones,Pete Harley,Eeva Makiranta,Keith Curtis""".split(",")
#expomissing = set(expoers2008)
for personline in personreader:
name = personline[header["Name"]]
name = re.sub(r"<.*?>", "", name)
firstname = ""
nickname = ""
rawlastname = personline[header["Lastname"]].strip()
matchlastname = re.match(r"^([\w&;\s]+)(?:\(([^)]*)\))?", rawlastname)
lastname = matchlastname.group(1).strip()
splitnick = re.match(r"^([\w&;\s]+)(?:\(([^)]*)\))?", name)
fullname = splitnick.group(1)
nickname = splitnick.group(2) or ""
fullname = fullname.strip()
names = fullname.split(' ')
firstname = names[0]
if len(names) == 1:
lastname = ""
lookupAttribs={'first_name':firstname, 'last_name':(lastname or "")}
nonLookupAttribs={'is_vfho':bool(personline[header["VfHO member"]]), 'fullname':fullname}
name = re.sub("<.*?>", "", name)
mname = re.match("(\w+)(?:\s((?:van |ten )?\w+))?(?:\s\(([^)]*)\))?", name)
nickname = mname.group(3) or ""
lookupAttribs={'first_name':mname.group(1), 'last_name':(mname.group(2) or "")}
nonLookupAttribs={'is_vfho':personline[header["VfHO member"]],}
person, created = save_carefully(models.Person, lookupAttribs, nonLookupAttribs)
parseMugShotAndBlurb(personline=personline, header=header, person=person)
# make person expedition from table
for year, attended in zip(headers, personline)[5:]:
expedition = models.Expedition.objects.get(year=year)
@@ -100,6 +85,36 @@ def LoadPersonsExpos():
nonLookupAttribs = {'nickname':nickname, 'is_guest':(personline[header["Guest"]] == "1")}
save_carefully(models.PersonExpedition, lookupAttribs, nonLookupAttribs)
# this fills in those people for whom 2008 was their first expo
#print "Loading personexpeditions 2008"
#for name in expomissing:
# firstname, lastname = name.split()
# is_guest = name in ["Eeva Makiranta", "Keith Curtis"]
# print "2008:", name
# persons = list(models.Person.objects.filter(first_name=firstname, last_name=lastname))
# if not persons:
# person = models.Person(first_name=firstname, last_name = lastname, is_vfho = False, mug_shot = "")
# #person.Sethref()
# person.save()
# else:
# person = persons[0]
# expedition = models.Expedition.objects.get(year="2008")
# personexpedition = models.PersonExpedition(person=person, expedition=expedition, nickname="", is_guest=is_guest)
# personexpedition.save()
#Notability is now a method of person. Makes no sense to store it in the database; it would need to be recalculated every time something changes. - AC 16 Feb 09
# could rank according to surveying as well
#print "Setting person notability"
#for person in models.Person.objects.all():
#person.notability = 0.0
#for personexpedition in person.personexpedition_set.all():
#if not personexpedition.is_guest:
#person.notability += 1.0 / (2012 - int(personexpedition.expedition.year))
#person.bisnotable = person.notability > 0.3 # I don't know how to filter by this
#person.save()
# used in other referencing parser functions
# expedition name lookup cached for speed (it's a very big list)
Gpersonexpeditionnamelookup = { }
@@ -108,47 +123,34 @@ def GetPersonExpeditionNameLookup(expedition):
res = Gpersonexpeditionnamelookup.get(expedition.name)
if res:
return res
res = { }
duplicates = set()
print("Calculating GetPersonExpeditionNameLookup for " + expedition.year)
print "Calculating GetPersonExpeditionNameLookup for", expedition.year
personexpeditions = models.PersonExpedition.objects.filter(expedition=expedition)
htmlparser = HTMLParser()
for personexpedition in personexpeditions:
possnames = [ ]
f = unidecode(htmlparser.unescape(personexpedition.person.first_name.lower()))
l = unidecode(htmlparser.unescape(personexpedition.person.last_name.lower()))
full = unidecode(htmlparser.unescape(personexpedition.person.fullname.lower()))
f = personexpedition.person.first_name.lower()
l = personexpedition.person.last_name.lower()
if l:
possnames.append(f + " " + l)
possnames.append(f + " " + l[0])
possnames.append(f + l[0])
possnames.append(f[0] + " " + l)
possnames.append(f)
if full not in possnames:
possnames.append(full)
if personexpedition.nickname not in possnames:
if personexpedition.nickname:
possnames.append(personexpedition.nickname.lower())
if l:
# This allows for nickname to be used for short name eg Phil
# adding Phil Sargent to the list
if str(personexpedition.nickname.lower() + " " + l) not in possnames:
possnames.append(personexpedition.nickname.lower() + " " + l)
if str(personexpedition.nickname.lower() + " " + l[0]) not in possnames:
possnames.append(personexpedition.nickname.lower() + " " + l[0])
if str(personexpedition.nickname.lower() + l[0]) not in possnames:
possnames.append(personexpedition.nickname.lower() + l[0])
for possname in possnames:
if possname in res:
duplicates.add(possname)
else:
res[possname] = personexpedition
for possname in duplicates:
del res[possname]
Gpersonexpeditionnamelookup[expedition.name] = res
return res

View File

@@ -1,7 +1,5 @@
'''
This module is the part of troggle that parses descriptions of cave parts (subcaves) from the legacy html
files and saves them in the troggle database as instances of the model Subcave.
Unfortunately, this parser can not be very flexible because the legacy format is poorly structured.
This module is the part of troggle that parses descriptions of cave parts (subcaves) from the legacy html files and saves them in the troggle database as instances of the model Subcave. Unfortunately, this parser can not be very flexible because the legacy format is poorly structured.
'''
import sys, os
@@ -31,12 +29,12 @@ def importSubcaves(cave):
link[0])
subcaveFile=open(subcaveFilePath,'r')
description=subcaveFile.read().decode('iso-8859-1').encode('utf-8')
lookupAttribs={'title':link[1], 'cave':cave}
nonLookupAttribs={'description':description}
newSubcave=save_carefully(Subcave,lookupAttribs=lookupAttribs,nonLookupAttribs=nonLookupAttribs)
logging.info("Added " + unicode(newSubcave) + " to " + unicode(cave))
logging.info("Added " + unicode(newSubcave) + " to " + unicode(cave))
except IOError:
logging.info("Subcave import couldn't open "+subcaveFilePath)

View File

@@ -5,26 +5,20 @@ import troggle.settings as settings
from subprocess import call, Popen, PIPE
from troggle.parsers.people import GetPersonExpeditionNameLookup
from django.utils.timezone import get_current_timezone
from django.utils.timezone import make_aware
import re
import os
from datetime import datetime
line_leg_regex = re.compile(r"[\d\-+.]+$")
def LoadSurvexLineLeg(survexblock, stardata, sline, comment, cave):
# The try catches here need replacing as they are relativly expensive
def LoadSurvexLineLeg(survexblock, stardata, sline, comment):
ls = sline.lower().split()
ssfrom = survexblock.MakeSurvexStation(ls[stardata["from"]])
ssto = survexblock.MakeSurvexStation(ls[stardata["to"]])
survexleg = models.SurvexLeg(block=survexblock, stationfrom=ssfrom, stationto=ssto)
if stardata["type"] == "normal":
try:
survexleg.tape = float(ls[stardata["tape"]])
except ValueError:
except ValueError:
print("Tape misread in", survexblock.survexfile.path)
print("Stardata:", stardata)
print("Line:", ls)
@@ -59,17 +53,14 @@ def LoadSurvexLineLeg(survexblock, stardata, sline, comment, cave):
survexleg.compass = 1000
survexleg.clino = -90.0
else:
assert line_leg_regex.match(lcompass), ls
assert line_leg_regex.match(lclino) and lclino != "-", ls
assert re.match(r"[\d\-+.]+$", lcompass), ls
assert re.match(r"[\d\-+.]+$", lclino) and lclino != "-", ls
survexleg.compass = float(lcompass)
survexleg.clino = float(lclino)
if cave:
survexleg.cave = cave
# only save proper legs
survexleg.save()
itape = stardata.get("tape")
if itape:
try:
@@ -89,212 +80,96 @@ def LoadSurvexEquate(survexblock, sline):
def LoadSurvexLinePassage(survexblock, stardata, sline, comment):
pass
stardatadefault = {"type":"normal", "t":"leg", "from":0, "to":1, "tape":2, "compass":3, "clino":4}
stardataparamconvert = {"length":"tape", "bearing":"compass", "gradient":"clino"}
regex_comment = re.compile(r"([^;]*?)\s*(?:;\s*(.*))?\n?$")
regex_ref = re.compile(r'.*?ref.*?(\d+)\s*#\s*(\d+)')
regex_star = re.compile(r'\s*\*[\s,]*(\w+)\s*(.*?)\s*(?:;.*)?$')
regex_team = re.compile(r"(Insts|Notes|Tape|Dog|Useless|Pics|Helper|Disto|Consultant)\s+(.*)$(?i)")
regex_team_member = re.compile(r" and | / |, | & | \+ |^both$|^none$(?i)")
regex_qm = re.compile(r'^\s*QM(\d)\s+?([a-dA-DxX])\s+([\w\-]+)\.(\d+)\s+(([\w\-]+)\.(\d+)|\-)\s+(.+)$')
def RecursiveLoad(survexblock, survexfile, fin, textlines):
iblankbegins = 0
text = [ ]
stardata = stardatadefault
teammembers = [ ]
# uncomment to print out all files during parsing
print(" - Reading file: " + survexblock.survexfile.path)
stamp = datetime.now()
lineno = 0
# Try to find the cave in the DB if not use the string as before
path_match = re.search(r"caves-(\d\d\d\d)/(\d+|\d\d\d\d-?\w+-\d+)/", survexblock.survexfile.path)
if path_match:
pos_cave = '%s-%s' % (path_match.group(1), path_match.group(2))
# print('Match')
# print(pos_cave)
cave = models.getCaveByReference(pos_cave)
if cave:
survexfile.cave = cave
svxlines = ''
svxlines = fin.read().splitlines()
# print('Cave - preloop ' + str(survexfile.cave))
# print(survexblock)
for svxline in svxlines:
# print(survexblock)
# print(svxline)
# if not svxline:
# print(' - Not survex')
# return
# textlines.append(svxline)
lineno += 1
# print(' - Line: %d' % lineno)
# uncomment to print out all files during parsing
print("Reading file:", survexblock.survexfile.path)
while True:
svxline = fin.readline().decode("latin1")
if not svxline:
return
textlines.append(svxline)
# break the line at the comment
sline, comment = regex_comment.match(svxline.strip()).groups()
sline, comment = re.match(r"([^;]*?)\s*(?:;\s*(.*))?\n?$", svxline.strip()).groups()
# detect ref line pointing to the scans directory
mref = comment and regex_ref.match(comment)
mref = comment and re.match(r'.*?ref.*?(\d+)\s*#\s*(\d+)', comment)
if mref:
refscan = "%s#%s" % (mref.group(1), mref.group(2))
survexscansfolders = models.SurvexScansFolder.objects.filter(walletname=refscan)
if survexscansfolders:
survexblock.survexscansfolder = survexscansfolders[0]
#survexblock.refscandir = "%s/%s%%23%s" % (mref.group(1), mref.group(1), mref.group(2))
survexblock.save()
survexblock.save()
continue
# This whole section should be moved if we can have *QM become a proper survex command
# Spec of QM in SVX files, currently commented out need to add to survex
# needs to match regex_qm
# ;Serial number grade(A/B/C/D/X) nearest-station resolution-station description
# ;QM1 a hobnob_hallway_2.42 hobnob-hallway_3.42 junction of keyhole passage
# ;QM1 a hobnob_hallway_2.42 - junction of keyhole passage
qmline = comment and regex_qm.match(comment)
if qmline:
print(qmline.groups())
#(u'1', u'B', u'miraclemaze', u'1.17', u'-', None, u'\tcontinuation of rift')
qm_no = qmline.group(1)
qm_grade = qmline.group(2)
qm_from_section = qmline.group(3)
qm_from_station = qmline.group(4)
qm_resolve_section = qmline.group(6)
qm_resolve_station = qmline.group(7)
qm_notes = qmline.group(8)
print('Cave - %s' % survexfile.cave)
print('QM no %d' % int(qm_no))
print('QM grade %s' % qm_grade)
print('QM section %s' % qm_from_section)
print('QM station %s' % qm_from_station)
print('QM res section %s' % qm_resolve_section)
print('QM res station %s' % qm_resolve_station)
print('QM notes %s' % qm_notes)
# If the QM isn't resolved (has a resolving station) then load it
if not qm_resolve_section or qm_resolve_section is not '-' or qm_resolve_section is not 'None':
from_section = models.SurvexBlock.objects.filter(name=qm_from_section)
# If we can find a section (survex note chunck, named)
if len(from_section) > 0:
print(from_section[0])
from_station = models.SurvexStation.objects.filter(block=from_section[0], name=qm_from_station)
# If we can find a from station then we have the nearest station and can import it
if len(from_station) > 0:
print(from_station[0])
qm = models.QM.objects.create(number=qm_no,
nearest_station=from_station[0],
grade=qm_grade.upper(),
location_description=qm_notes)
else:
print('QM found but resolved')
#print('Cave -sline ' + str(cave))
if not sline:
continue
# detect the star command
mstar = regex_star.match(sline)
mstar = re.match(r'\s*\*[\s,]*(\w+)\s*(.*?)\s*(?:;.*)?$', sline)
if not mstar:
if "from" in stardata:
# print('Cave ' + str(survexfile.cave))
# print(survexblock)
LoadSurvexLineLeg(survexblock, stardata, sline, comment, survexfile.cave)
# print(' - From: ')
#print(stardata)
pass
LoadSurvexLineLeg(survexblock, stardata, sline, comment)
elif stardata["type"] == "passage":
LoadSurvexLinePassage(survexblock, stardata, sline, comment)
# print(' - Passage: ')
#Missing "station" in stardata.
continue
# detect the star command
cmd, line = mstar.groups()
cmd = cmd.lower()
if re.match("include$(?i)", cmd):
includepath = os.path.join(os.path.split(survexfile.path)[0], re.sub(r"\.svx$", "", line))
print(' - Include file found including - ' + includepath)
# Try to find the cave in the DB if not use the string as before
path_match = re.search(r"caves-(\d\d\d\d)/(\d+|\d\d\d\d-?\w+-\d+)/", includepath)
if path_match:
pos_cave = '%s-%s' % (path_match.group(1), path_match.group(2))
# print(pos_cave)
cave = models.getCaveByReference(pos_cave)
if cave:
survexfile.cave = cave
else:
print('No match for %s' % includepath)
includesurvexfile = models.SurvexFile(path=includepath)
includesurvexfile = models.SurvexFile(path=includepath, cave=survexfile.cave)
includesurvexfile.save()
includesurvexfile.SetDirectory()
if includesurvexfile.exists():
survexblock.save()
fininclude = includesurvexfile.OpenFile()
RecursiveLoad(survexblock, includesurvexfile, fininclude, textlines)
elif re.match("begin$(?i)", cmd):
if line:
newsvxpath = os.path.join(os.path.split(survexfile.path)[0], re.sub(r"\.svx$", "", line))
# Try to find the cave in the DB if not use the string as before
path_match = re.search(r"caves-(\d\d\d\d)/(\d+|\d\d\d\d-?\w+-\d+)/", newsvxpath)
if path_match:
pos_cave = '%s-%s' % (path_match.group(1), path_match.group(2))
print(pos_cave)
cave = models.getCaveByReference(pos_cave)
if cave:
survexfile.cave = cave
else:
print('No match for %s' % newsvxpath)
if line:
name = line.lower()
print(' - Begin found for: ' + name)
# print('Block cave: ' + str(survexfile.cave))
survexblockdown = models.SurvexBlock(name=name, begin_char=fin.tell(), parent=survexblock, survexpath=survexblock.survexpath+"."+name, cave=survexfile.cave, survexfile=survexfile, totalleglength=0.0)
survexblockdown = models.SurvexBlock(name=name, begin_char=fin.tell(), parent=survexblock, survexpath=survexblock.survexpath+"."+name, cave=survexblock.cave, survexfile=survexfile, totalleglength=0.0)
survexblockdown.save()
survexblock.save()
survexblock = survexblockdown
# print(survexblockdown)
textlinesdown = [ ]
RecursiveLoad(survexblockdown, survexfile, fin, textlinesdown)
else:
iblankbegins += 1
elif re.match("end$(?i)", cmd):
if iblankbegins:
iblankbegins -= 1
else:
survexblock.text = "".join(textlines)
survexblock.save()
# print(' - End found: ')
endstamp = datetime.now()
timetaken = endstamp - stamp
# print(' - Time to process: ' + str(timetaken))
return
elif re.match("date$(?i)", cmd):
if len(line) == 10:
#print(' - Date found: ' + line)
survexblock.date = make_aware(datetime.strptime(re.sub(r"\.", "-", line), '%Y-%m-%d'), get_current_timezone())
survexblock.date = re.sub(r"\.", "-", line)
expeditions = models.Expedition.objects.filter(year=line[:4])
if expeditions:
assert len(expeditions) == 1
survexblock.expedition = expeditions[0]
survexblock.expeditionday = survexblock.expedition.get_expedition_day(survexblock.date)
survexblock.save()
elif re.match("team$(?i)", cmd):
pass
# print(' - Team found: ')
mteammember = regex_team.match(line)
mteammember = re.match(r"(Insts|Notes|Tape|Dog|Useless|Pics|Helper|Disto|Consultant)\s+(.*)$(?i)", line)
if mteammember:
for tm in regex_team_member.split(mteammember.group(2)):
for tm in re.split(r" and | / |, | & | \+ |^both$|^none$(?i)", mteammember.group(2)):
if tm:
personexpedition = survexblock.expedition and GetPersonExpeditionNameLookup(survexblock.expedition).get(tm.lower())
if (personexpedition, tm) not in teammembers:
@@ -304,23 +179,18 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
if personexpedition:
personrole.person=personexpedition.person
personrole.save()
elif cmd == "title":
#print(' - Title found: ')
survextitle = models.SurvexTitle(survexblock=survexblock, title=line.strip('"'), cave=survexfile.cave)
survextitle = models.SurvexTitle(survexblock=survexblock, title=line.strip('"'), cave=survexblock.cave)
survextitle.save()
pass
elif cmd == "require":
# should we check survex version available for processing?
pass
elif cmd == "data":
#print(' - Data found: ')
ls = line.lower().split()
stardata = { "type":ls[0] }
#print(' - Star data: ', stardata)
#print(ls)
for i in range(0, len(ls)):
stardata[stardataparamconvert.get(ls[i], ls[i])] = i - 1
if ls[0] in ["normal", "cartesian", "nosurvey"]:
@@ -329,23 +199,40 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
stardata = stardatadefault
else:
assert ls[0] == "passage", line
elif cmd == "equate":
#print(' - Equate found: ')
LoadSurvexEquate(survexblock, line)
elif cmd == "fix":
#print(' - Fix found: ')
survexblock.MakeSurvexStation(line.split()[0])
else:
#print(' - Stuff')
if cmd not in ["sd", "include", "units", "entrance", "data", "flags", "title", "export", "instrument",
"calibrate", "set", "infer", "alias", "ref", "cs", "declination", "case"]:
print("Unrecognised command in line:", cmd, line, survexblock, survexblock.survexfile.path)
endstamp = datetime.now()
timetaken = endstamp - stamp
# print(' - Time to process: ' + str(timetaken))
def ReloadSurvexCave(survex_cave, area):
print(survex_cave, area)
cave = models.Cave.objects.get(kataster_number=survex_cave, area__short_name=area)
print(cave)
#cave = models.Cave.objects.get(kataster_number=survex_cave)
cave.survexblock_set.all().delete()
cave.survexfile_set.all().delete()
cave.survexdirectory_set.all().delete()
survexfile = models.SurvexFile(path="caves-" + cave.kat_area() + "/" + survex_cave + "/" + survex_cave, cave=cave)
survexfile.save()
survexfile.SetDirectory()
survexblockroot = models.SurvexBlock(name="root", survexpath="caves-" + cave.kat_area(), begin_char=0, cave=cave, survexfile=survexfile, totalleglength=0.0)
survexblockroot.save()
fin = survexfile.OpenFile()
textlines = [ ]
RecursiveLoad(survexblockroot, survexfile, fin, textlines)
survexblockroot.text = "".join(textlines)
survexblockroot.save()
def LoadAllSurvexBlocks():
@@ -362,7 +249,7 @@ def LoadAllSurvexBlocks():
print(" - Data flushed")
survexfile = models.SurvexFile(path=settings.SURVEX_TOPNAME, cave=None)
survexfile = models.SurvexFile(path="all", cave=None)
survexfile.save()
survexfile.SetDirectory()
@@ -371,13 +258,22 @@ def LoadAllSurvexBlocks():
survexblockroot.save()
fin = survexfile.OpenFile()
textlines = [ ]
# The real work starts here
RecursiveLoad(survexblockroot, survexfile, fin, textlines)
fin.close()
survexblockroot.text = "".join(textlines)
survexblockroot.save()
#Load each cave,
#FIXME this should be dealt with load all above
print(" - Reloading all caves")
caves = models.Cave.objects.all()
for cave in caves:
if cave.kataster_number and os.path.isdir(os.path.join(settings.SURVEX_DATA, "caves-" + cave.kat_area(), cave.kataster_number)):
if cave.kataster_number not in ['40']:
print("loading", cave, cave.kat_area())
ReloadSurvexCave(cave.kataster_number, cave.kat_area())
poslineregex = re.compile(r"^\(\s*([+-]?\d*\.\d*),\s*([+-]?\d*\.\d*),\s*([+-]?\d*\.\d*)\s*\)\s*([^\s]+)$")
@@ -385,12 +281,12 @@ def LoadPos():
print('Loading Pos....')
call([settings.CAVERN, "--output=%s%s.3d" % (settings.SURVEX_DATA, settings.SURVEX_TOPNAME), "%s%s.svx" % (settings.SURVEX_DATA, settings.SURVEX_TOPNAME)])
call([settings.THREEDTOPOS, '%s%s.3d' % (settings.SURVEX_DATA, settings.SURVEX_TOPNAME)], cwd = settings.SURVEX_DATA)
posfile = open("%s%s.pos" % (settings.SURVEX_DATA, settings.SURVEX_TOPNAME))
call([settings.CAVERN, "--output=%s/all.3d" % settings.SURVEX_DATA, "%s/all.svx" % settings.SURVEX_DATA])
call([settings.THREEDTOPOS, '%sall.3d' % settings.SURVEX_DATA], cwd = settings.SURVEX_DATA)
posfile = open("%sall.pos" % settings.SURVEX_DATA)
posfile.readline() #Drop header
for line in posfile.readlines():
r = poslineregex.match(line)
r = poslineregex.match(line)
if r:
x, y, z, name = r.groups()
try:

View File

@@ -1,7 +1,12 @@
import sys, os, types, logging, stat
#sys.path.append('C:\\Expo\\expoweb')
#from troggle import *
#os.environ['DJANGO_SETTINGS_MODULE']='troggle.settings'
import settings
from troggle.core.models import *
from PIL import Image
#import settings
#import core.models as models
import csv
import re
import datetime
@@ -24,7 +29,7 @@ def readSurveysFromCSV():
try: # could probably combine these two
surveytab = open(os.path.join(settings.SURVEY_SCANS, "Surveys.csv"))
except IOError:
import cStringIO, urllib
import cStringIO, urllib
surveytab = cStringIO.StringIO(urllib.urlopen(settings.SURVEY_SCANS + "/Surveys.csv").read())
dialect=csv.Sniffer().sniff(surveytab.read())
surveytab.seek(0,0)
@@ -37,21 +42,24 @@ def readSurveysFromCSV():
print("There are no expeditions in the database. Please run the logbook parser.")
sys.exit()
logging.info("Deleting all scanned images")
ScannedImage.objects.all().delete()
logging.info("Deleting all survey objects")
Survey.objects.all().delete()
logging.info("Beginning to import surveys from "+str(os.path.join(settings.SURVEYS, "Surveys.csv"))+"\n"+"-"*60+"\n")
for survey in surveyreader:
# I hate this, but some surveys have a letter eg 2000#34a. The next line deals with that.
walletNumberLetter = re.match(r'(?P<number>\d*)(?P<letter>[a-zA-Z]*)',survey[header['Survey Number']])
# print(walletNumberLetter.groups())
#I hate this, but some surveys have a letter eg 2000#34a. The next line deals with that.
walletNumberLetter = re.match(r'(?P<number>\d*)(?P<letter>[a-zA-Z]*)',survey[header['Survey Number']])
# print(walletNumberLetter.groups())
year=survey[header['Year']]
surveyobj = Survey(
expedition = Expedition.objects.filter(year=year)[0],
wallet_number = walletNumberLetter.group('number'),
@@ -65,6 +73,7 @@ def readSurveysFromCSV():
pass
surveyobj.save()
logging.info("added survey " + survey[header['Year']] + "#" + surveyobj.wallet_number + "\r")
# dead
@@ -90,7 +99,7 @@ def parseSurveyScans(expedition, logfile=None):
#scanList = listdir(expedition.year, surveyFolder)
scanList=os.listdir(os.path.join(yearPath,surveyFolder))
except AttributeError:
print("Folder: " + surveyFolder + " ignored\r")
print(surveyFolder + " ignored\r",)
continue
for scan in scanList:
@@ -98,7 +107,7 @@ def parseSurveyScans(expedition, logfile=None):
scanChopped=re.match(r'(?i).*(notes|elev|plan|elevation|extend)(\d*)\.(png|jpg|jpeg)',scan).groups()
scanType,scanNumber,scanFormat=scanChopped
except AttributeError:
print("File: " + scan + " ignored\r")
print(scan + " ignored\r",)
continue
if scanType == 'elev' or scanType == 'extend':
scanType = 'elevation'
@@ -132,14 +141,14 @@ def parseSurveyScans(expedition, logfile=None):
yearPath=os.path.join(settings.SURVEY_SCANS, "surveyscans", expedition.year)
print("No folder found for " + expedition.year + " at:- " + yearPath)
# dead
def parseSurveys(logfile=None):
try:
readSurveysFromCSV()
except (IOError, OSError):
print("Survey CSV not found..")
pass
for expedition in Expedition.objects.filter(year__gte=2000): #expos since 2000, because paths and filenames were nonstandard before then
parseSurveyScans(expedition)
@@ -165,25 +174,28 @@ def GetListDir(sdir):
ff = os.path.join(sdir, f)
res.append((f, ff, os.path.isdir(ff)))
return res
def LoadListScansFile(survexscansfolder):
gld = [ ]
# flatten out any directories in these book files
for (fyf, ffyf, fisdiryf) in GetListDir(survexscansfolder.fpath):
if fisdiryf:
gld.extend(GetListDir(ffyf))
else:
gld.append((fyf, ffyf, fisdiryf))
for (fyf, ffyf, fisdiryf) in gld:
#assert not fisdiryf, ffyf
if re.search(r"\.(?:png|jpg|jpeg)(?i)$", fyf):
survexscansingle = SurvexScanSingle(ffile=ffyf, name=fyf, survexscansfolder=survexscansfolder)
survexscansingle.save()
# this iterates through the scans directories (either here or on the remote server)
# and builds up the models we can access later
def LoadListScans():
@@ -194,17 +206,17 @@ def LoadListScans():
SurvexScansFolder.objects.all().delete()
# first do the smkhs (large kh survey scans) directory
survexscansfoldersmkhs = SurvexScansFolder(fpath=os.path.join(settings.SURVEY_SCANS, "smkhs"), walletname="smkhs")
survexscansfoldersmkhs = SurvexScansFolder(fpath=os.path.join(settings.SURVEY_SCANS, "smkhs"), walletname="smkhs")
if os.path.isdir(survexscansfoldersmkhs.fpath):
survexscansfoldersmkhs.save()
LoadListScansFile(survexscansfoldersmkhs)
# iterate into the surveyscans directory
for f, ff, fisdir in GetListDir(os.path.join(settings.SURVEY_SCANS, "surveyscans")):
if not fisdir:
continue
# do the year folders
if re.match(r"\d\d\d\d$", f):
for fy, ffy, fisdiry in GetListDir(ff):
@@ -213,13 +225,13 @@ def LoadListScans():
survexscansfolder = SurvexScansFolder(fpath=ffy, walletname=fy)
survexscansfolder.save()
LoadListScansFile(survexscansfolder)
# do the
# do the
elif f != "thumbs":
survexscansfolder = SurvexScansFolder(fpath=ff, walletname=f)
survexscansfolder.save()
LoadListScansFile(survexscansfolder)
def FindTunnelScan(tunnelfile, path):
scansfolder, scansfile = None, None
@@ -235,12 +247,12 @@ def FindTunnelScan(tunnelfile, path):
print(scansfilel, len(scansfilel))
assert len(scansfilel) == 1
scansfile = scansfilel[0]
if scansfolder:
tunnelfile.survexscansfolders.add(scansfolder)
if scansfile:
tunnelfile.survexscans.add(scansfile)
elif path and not re.search(r"\.(?:png|jpg|jpeg)$(?i)", path):
name = os.path.split(path)[1]
print("ttt", tunnelfile.tunnelpath, path, name)
@@ -260,22 +272,21 @@ def SetTunnelfileInfo(tunnelfile):
fin = open(ff)
ttext = fin.read()
fin.close()
mtype = re.search("<(fontcolours|sketch)", ttext)
#assert mtype, ff
if mtype:
tunnelfile.bfontcolours = (mtype.group(1)=="fontcolours")
assert mtype, ff
tunnelfile.bfontcolours = (mtype.group(1)=="fontcolours")
tunnelfile.npaths = len(re.findall("<skpath", ttext))
tunnelfile.save()
# <tunnelxml tunnelversion="version2009-06-21 Matienzo" tunnelproject="ireby" tunneluser="goatchurch" tunneldate="2009-06-29 23:22:17">
# <pcarea area_signal="frame" sfscaledown="12.282584" sfrotatedeg="-90.76982" sfxtrans="11.676667377221136" sfytrans="-15.677173422877454" sfsketch="204description/scans/plan(38).png" sfstyle="" nodeconnzsetrelative="0.0">
for path, style in re.findall('<pcarea area_signal="frame".*?sfsketch="([^"]*)" sfstyle="([^"]*)"', ttext):
FindTunnelScan(tunnelfile, path)
# should also scan and look for survex blocks that might have been included
# and also survex titles as well.
# and also survex titles as well.
tunnelfile.save()
@@ -295,6 +306,6 @@ def LoadTunnelFiles():
elif f[-4:] == ".xml":
tunnelfile = TunnelFile(tunnelpath=lf, tunnelname=os.path.split(f[:-4])[1])
tunnelfile.save()
for tunnelfile in TunnelFile.objects.all():
SetTunnelfileInfo(tunnelfile)

View File

@@ -27,7 +27,7 @@ from django.conf.urls import *
from profiles import views
urlpatterns = [
urlpatterns = patterns('',
url(r'^select/$',
views.select_profile,
name='profiles_select_profile'),
@@ -43,4 +43,4 @@ urlpatterns = [
url(r'^$',
views.profile_list,
name='profiles_profile_list'),
]
)

View File

@@ -14,7 +14,8 @@ try:
except ImportError: # django >= 1.7
SiteProfileNotAvailable = type('SiteProfileNotAvailable', (Exception,), {})
from django.apps import apps
from django.db.models import get_model
def get_profile_model():
"""
@@ -27,7 +28,7 @@ def get_profile_model():
if (not hasattr(settings, 'AUTH_PROFILE_MODULE')) or \
(not settings.AUTH_PROFILE_MODULE):
raise SiteProfileNotAvailable
profile_mod = apps.get_model(*settings.AUTH_PROFILE_MODULE.split('.'))
profile_mod = get_model(*settings.AUTH_PROFILE_MODULE.split('.'))
if profile_mod is None:
raise SiteProfileNotAvailable
return profile_mod

View File

@@ -8,8 +8,9 @@ BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Django settings for troggle project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = [u'expo.survex.com']
ALLOWED_HOSTS = []
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
@@ -44,87 +45,34 @@ NOTABLECAVESHREFS = [ "161", "204", "258", "76", "107", "264" ]
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/troggle/media-admin/'
PHOTOS_ROOT = os.path.join(EXPOWEB, 'photos')
CAVEDESCRIPTIONS = os.path.join(EXPOWEB, "cave_data")
ENTRANCEDESCRIPTIONS = os.path.join(EXPOWEB, "entrance_data")
CAVEDESCRIPTIONS = os.path.join(EXPOWEB, "noinfo", "cave_data")
ENTRANCEDESCRIPTIONS = os.path.join(EXPOWEB, "noinfo", "entrance_data")
MEDIA_URL = urlparse.urljoin(URL_ROOT , '/site_media/')
SURVEYS_URL = urlparse.urljoin(URL_ROOT , '/survey_scans/')
PHOTOS_URL = urlparse.urljoin(URL_ROOT , '/photos/')
SVX_URL = urlparse.urljoin(URL_ROOT , '/survex/')
# top-level survex file basename (without .svx)
SURVEX_TOPNAME = "1623"
KAT_AREAS = ['1623', '1624', '1626', '1627']
DEFAULT_LOGBOOK_PARSER = "Parseloghtmltxt"
DEFAULT_LOGBOOK_FILE = "logbook.html"
LOGBOOK_PARSER_SETTINGS = {
"2018": ("2018/logbook.html", "Parseloghtmltxt"),
"2017": ("2017/logbook.html", "Parseloghtmltxt"),
"2016": ("2016/logbook.html", "Parseloghtmltxt"),
"2015": ("2015/logbook.html", "Parseloghtmltxt"),
"2014": ("2014/logbook.html", "Parseloghtmltxt"),
"2013": ("2013/logbook.html", "Parseloghtmltxt"),
"2012": ("2012/logbook.html", "Parseloghtmltxt"),
"2011": ("2011/logbook.html", "Parseloghtmltxt"),
"2010": ("2010/logbook.html", "Parselogwikitxt"),
"2009": ("2009/2009logbook.txt", "Parselogwikitxt"),
"2008": ("2008/2008logbook.txt", "Parselogwikitxt"),
"2007": ("2007/logbook.html", "Parseloghtmltxt"),
"2006": ("2006/logbook/logbook_06.txt", "Parselogwikitxt"),
"2005": ("2005/logbook.html", "Parseloghtmltxt"),
"2004": ("2004/logbook.html", "Parseloghtmltxt"),
"2003": ("2003/logbook.html", "Parseloghtml03"),
"2002": ("2002/logbook.html", "Parseloghtmltxt"),
"2001": ("2001/log.htm", "Parseloghtml01"),
"2000": ("2000/log.htm", "Parseloghtml01"),
"1999": ("1999/log.htm", "Parseloghtml01"),
"1998": ("1998/log.htm", "Parseloghtml01"),
"1997": ("1997/log.htm", "Parseloghtml01"),
"1996": ("1996/log.htm", "Parseloghtml01"),
"1995": ("1995/log.htm", "Parseloghtml01"),
"1994": ("1994/log.htm", "Parseloghtml01"),
"1993": ("1993/log.htm", "Parseloghtml01"),
"1992": ("1992/log.htm", "Parseloghtml01"),
"1991": ("1991/log.htm", "Parseloghtml01"),
}
APPEND_SLASH = False
SMART_APPEND_SLASH = True
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'a#vaeozn0)uz_9t_%v5n#tj)m+%ace6b_0(^fj!355qki*v)j2'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PYTHON_PATH, 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
#'core.context.troggle_context'
]
},
},
]
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.load_template_source',
)
if django.VERSION[0] == 1 and django.VERSION[1] < 4:
authmodule = 'django.core.context_processors.auth'
else:
authmodule = 'django.contrib.auth.context_processors.auth'
TEMPLATE_CONTEXT_PROCESSORS = ( authmodule, "core.context.troggle_context", )
LOGIN_REDIRECT_URL = '/'
INSTALLED_APPS = (
@@ -137,13 +85,14 @@ INSTALLED_APPS = (
'django.contrib.messages',
'django.contrib.staticfiles',
#'troggle.photologue',
#'troggle.reversion',
#'django_evolution',
'tinymce',
'registration',
'troggle.profiles',
'troggle.core',
'troggle.flatpages',
'imagekit',
'django_extensions',
)
MIDDLEWARE_CLASSES = (

View File

@@ -2,14 +2,14 @@
<html lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"/>
<link rel="stylesheet" type="text/css" href="{{ MEDIA_URL }}css/main3.css" title="eyeCandy"/>
<link rel="alternate stylesheet" type="text/css" href="{{ MEDIA_URL }}css/mainplain.css" title="plain"/>
<link rel="stylesheet" type="text/css" href="{{ MEDIA_URL }}css/dropdownNavStyle.css" />
<link rel="stylesheet" type="text/css" href="{{ settings.MEDIA_URL }}css/main3.css" title="eyeCandy"/>
<link rel="alternate stylesheet" type="text/css" href="{{ settings.MEDIA_URL }}css/mainplain.css" title="plain"/>
<link rel="stylesheet" type="text/css" href="{{ settings.MEDIA_URL }}css/dropdownNavStyle.css" />
<title>{% block title %}Troggle{% endblock %}</title>
<!-- <script src="{{ settings.JSLIB_URL }}jquery/jquery.min.js" type="text/javascript"></script> -->
<script src="{{ MEDIA_URL }}js/jquery.quicksearch.js" type="text/javascript"></script>
<script src="{{ MEDIA_URL }}js/base.js" type="text/javascript"></script>
<script src="{{ MEDIA_URL }}js/jquery.dropdownPlain.js" type="text/javascript"></script>
<script src="{{ settings.JSLIB_URL }}jquery/jquery.min.js" type="text/javascript"></script>
<script src="{{ settings.MEDIA_URL }}js/jquery.quicksearch.js" type="text/javascript"></script>
<script src="{{ settings.MEDIA_URL }}js/base.js" type="text/javascript"></script>
<script src="{{ settings.MEDIA_URL }}js/jquery.dropdownPlain.js" type="text/javascript"></script>
{% block head %}{% endblock %}
</head>
@@ -64,8 +64,8 @@
<div id="related">
{% block related %}
<script language="javascript">
$('#related').remove()
/*This is a hack to stop a line appearing because of the empty div border*/
$('#related').remove()
/*This is a hack to stop a line appearing because of the empty div border*/
</script>
{% endblock %}
</div>

View File

@@ -17,7 +17,7 @@ div.cv-panel {
}
div.cv-compass, div.cv-ahi {
position: absolute;
position: absolute;
bottom: 95px;
right: 5px;
margin: 0;
@@ -31,7 +31,7 @@ div.cv-compass, div.cv-ahi {
background-color: black;
color: white;
}
div.cv-ahi {
right: 95px;
}
@@ -152,7 +152,7 @@ div.linear-scale-caption {
position: absolute;
top: 64px;
left: 0px;
height: auto;
height: auto;
margin-top:0;
bottom: 44px;
background-color: #222222;
@@ -220,7 +220,7 @@ div.linear-scale-caption {
}
#frame .tab {
position: absolute;
right: 0px;
right: 0px;lass="cavedisplay"
width: 40px;
height: 40px;
box-sizing: border-box;
@@ -421,7 +421,7 @@ div#scene {
CV.UI.init( 'scene', {
home: '/javascript/CaveView/',
surveyDirectory: '/cave/3d/',
terrainDirectory: '/loser/surface/terrain/'
terrainDirectory: '/loser/surface/terrain/'
} );
// load a single survey to display
@@ -516,17 +516,14 @@ div#scene {
{% if ent.entrance.exact_station %}
<dt>Exact Station</dt><dd>{{ ent.entrance.exact_station|safe }} {{ ent.entrance.exact_location.y|safe }}, {{ ent.entrance.exact_location.x|safe }}, {{ ent.entrance.exact_location.z|safe }}m</dd>
{% endif %}
{% if ent.entrance.find_location %}
<dt>Coordinates</dt><dd>{{ ent.entrance.find_location|safe }}</dd>
{% endif %}
{% if ent.entrance.other_station %}
{% if ent.entrance.other_station %}
<dt>Other Station</dt><dd>{{ ent.entrance.other_station|safe }}
{% if ent.entrance.other_description %}
- {{ ent.entrance.other_description|safe }}
{% endif %} {{ ent.entrance.other_location.y|safe }}, {{ ent.entrance.other_location.x|safe }}, {{ ent.entrance.other_location.z|safe }}m
</dd>
{% endif %}
</dl>
</dl>
</li>
{% endfor %}
</ul>

View File

@@ -11,7 +11,7 @@
<h3>Notable caves</h3>
<ul>
{% for cave in notablecaves %}
<li> <a href="{{ cave.url }}">{% if cave.kataster_number %}{{ cave.kataster_number }}{% else %}{{cave.unofficial_number }}{% endif %} {{cave.official_name|safe}}</a> </li>
<li> <a href="{{ cave.url }}">{% if cave.kataster_number %}{{ cave.kataster_number }}{% else %}{{cave.unofficial_number }}{%endif %} {{cave.official_name|safe}}</a> </li>
{% endfor %}
</ul>
@@ -20,7 +20,7 @@
<table class="searchable">
{% for cave in caves1623 %}
<tr><td> <a href="{{ cave.url }}">{% if cave.kataster_number %}{{ cave.kataster_number }} {{cave.official_name|safe}}</a> {% if cave.unofficial_number %}({{cave.unofficial_number }}){% endif %}{% else %}{{cave.unofficial_number }} {{cave.official_name|safe}}</a> {% endif %}</td></tr>
<tr><td> <a href="{{ cave.url }}">{% if cave.kataster_number %}{{ cave.kataster_number }}{% else %}{{cave.unofficial_number }}{%endif %} {{cave.official_name|safe}}</a> </td></tr>
{% endfor %}
</table>
@@ -30,8 +30,7 @@
<ul class="searchable">
{% for cave in caves1626 %}
<li> <a href="{{ cave.url }}">{% if cave.kataster_number %}{{ cave.kataster_number }} {{cave.official_name|safe}}</a> {% if cave.unofficial_number %}({{cave.unofficial_number }}){% endif %}{% else %}{{cave.unofficial_number }} {{cave.official_name|safe}}</a> {% endif %}
</li>
<li> <a href="{{ cave.url }}">{% if cave.kataster_number %}{{ cave.kataster_number }}{% else %}{{cave.unofficial_number }}{%endif %} {{cave.official_name|safe}}</a> </li>
{% endfor %}
</ul>

View File

@@ -23,45 +23,25 @@
<form name="reset" method="post" action="">
<h3>Wipe:</h3>
<table>
<tr>
<td>Wipe entire database and recreate tables: </td>
<td><input type="checkbox" name="reload_db" /></td>
<td>
<input type="submit" id="Import" value="I really want to delete all information in troggle, and accept all responsibility.">
</td>
</tr>
</table>
<h3>Wipe:</h3>
<table>
<tr><td>Wipe entire database and recreate tables: </td><td><input type="checkbox" name="reload_db" /></td><td> <input type="submit" id="Import" value="I really want to delete all information in troggle, and accept all responsibility."></td></tr>
</table>
</form>
<h3>Import (non-destructive):</h3>
<form name="import" method="post" action="">
<table>
<tr>
<td>people from folk.csv using parsers\people.py</td>
<td><input type="checkbox" name="import_people"/></td>
</tr>
<tr>
<td>caves from cavetab2.csv using parsers\cavetab.py</td>
<td> <input type="checkbox" class="parser" name="import_cavetab"/></td>
</tr>
<tr>
<td>logbook entries using parsers\logbooks.py</td>
<td><input type="checkbox" name="import_logbooks"/></td>
</tr>
<tr>
<td>QMs using parsers\QMs.py</td>
<td><input type="checkbox" name="import_QMs" /></td>
</tr>
<tr>
<td>survey scans using parsers\surveys.py</td>
<td><input type="checkbox" name="import_surveys" /></td>
</tr>
<tr>
<td>survex data using parsers\survex.py</td>
<td><input type="checkbox" name="import_survex" /></td>
</tr>
</table>
<table>
<tr><td>people from folk.csv using parsers\people.py</td><td><input type="checkbox" name="import_people"/></td></tr>
<tr><td>caves from cavetab2.csv using parsers\cavetab.py</td><td> <input type="checkbox" class="parser" name="import_cavetab"/></td></tr>
<tr><td>logbook entries using parsers\logbooks.py</td><td><input type="checkbox" name="import_logbooks"/></td></tr>
<tr><td>QMs using parsers\QMs.py</td><td><input type="checkbox" name="import_QMs" /></td></tr>
<tr><td>survey scans using parsers\surveys.py</td><td><input type="checkbox" name="import_surveys" /></td></tr>
<tr><td>survex data using parsers\survex.py</td><td><input type="checkbox" name="import_survex" /></td></tr>
</table>
<p>
<input type="submit" id="Import" value="Import">

View File

@@ -9,7 +9,6 @@
<script src="{{ settings.TINY_MCE_MEDIA_URL }}tiny_mce.js" type="text/javascript"></script>
{% endblock %}
{% block content %}
<h1>Edit Cave</h1>
<form action="" method="post">{% csrf_token %}
<table>{{ form }}{{caveAndEntranceFormSet}}</table>
{{ versionControlForm }}

View File

@@ -3,11 +3,6 @@
{% block extrahead %}
{% load csrffaker %}
<script src="{{ settings.TINY_MCE_MEDIA_URL }}tiny_mce.js" type="text/javascript"></script>
<script type="text/javascript">
tinyMCE.init({
mode : "textareas"
});
</script>
{% endblock %}
{% block body %}
<h1>Edit {{ path }}</h1>

View File

@@ -2,15 +2,19 @@
{% load wiki_markup %}
{% load link %}
{% block title %}Expedition {{this_expedition.name}}{% endblock %}
{% block editLink %}<a href={{this_expedition.get_admin_url}}>Edit expedition {{expedition|wiki_to_html_short}}</a>{% endblock %}
{% block title %}Expedition {{expedition.name}}{% endblock %}
{% block editLink %}<a href={{expedition.get_admin_url}}>Edit expedition {{expedition|wiki_to_html_short}}</a>{% endblock %}
{% block related %}
{% endblock %}
{% block content %}
<h2>{{this_expedition.name}}</h2>
{% if message %}
<p>debug message: {{message}}</p>
{% endif %}
<h2>{{expedition.name}}</h2>
<p><b>Other years:</b>
{% for otherexpedition in expeditions %}
@@ -29,7 +33,7 @@ an "S" for a survey trip. The colours are the same for people on the same trip.
<table class="expeditionpersonlist">
<tr>
<th>Caver</th>
{% for expeditionday in this_expedition.expeditionday_set.all %}
{% for expeditionday in expedition.expeditionday_set.all %}
<th>
{{expeditionday.date.day}}
</th>
@@ -37,13 +41,13 @@ an "S" for a survey trip. The colours are the same for people on the same trip.
</tr>
{% for personexpeditionday in personexpeditiondays %}
<tr>
<td><a href="{{ personexpeditionday.personexpedition.get_absolute_url }}">{{personexpeditionday.personexpedition.person|safe}}</a></td>
<td><a href="{{ personexpeditionday.personexpedition.get_absolute_url }}">{{personexpeditionday.personexpedition.person}}</a></td>
{% for persondayactivities in personexpeditionday.personrow %}
{% if persondayactivities.persontrips or persondayactivities.survexblocks %}
<td class="persondayactivity">
{% for persontrip in persondayactivities.persontrips %}
<a href="{{persontrip.logbook_entry.get_absolute_url}}" class="dayindexlog-1">T</a>
<a href="{{persontrip.logbook_entry.get_absolute_url}}" class="dayindexlog-{{persontrip.logbook_entry.DayIndex}}">T</a>
{% endfor %}
<br/>
{% for survexblock in persondayactivities.survexblocks %}
@@ -63,13 +67,13 @@ an "S" for a survey trip. The colours are the same for people on the same trip.
<form action="" method="GET"><input type="submit" name="reload" value="Reload"></form>
<h3>Logbooks and survey trips per day</h3>
<a href="{% url "newLogBookEntry" expeditionyear=this_expedition.year %}">New logbook entry</a>
<a href="{% url "newLogBookEntry" expeditionyear=expedition.year %}">New logbook entry</a>
<table class="expeditionlogbooks">
<tr><th>Date</th><th>Logged trips</th><th>Surveys</th></tr>
{% regroup dateditems|dictsort:"date" by date as dates %}
{% for date in dates %}
<tr>
<td>{{date.grouper|date:"D d M Y"}}</td>
<td>{{date.grouper}}</td>
<td>{% for item in date.list %}
{% if item.isLogbookEntry %}<a href="{{ item.get_absolute_url }}">{{item.title|safe}}</a><br/>{% endif %}
{% endfor %}</td>

View File

@@ -16,7 +16,7 @@
{% if entry.is_deletion %}
{{ entry.object_repr }}
{% else %}
<a href="admin/{{ entry.get_admin_url }}/">{{ entry.object_repr }}</a>
<a href="admin/{{ entry.get_admin_url }}">{{ entry.object_repr }}</a>
{% endif %}
<br/>
{% if entry.content_type %}
@@ -49,6 +49,17 @@ Here you will find information about the {{expedition.objects.count}} expedition
If you are an expedition member, please sign up using the link to the top right and begin editing.
</p>
<h3>News</h3>
<p class="indent">
Everyone is gearing up for the 2009 expedition; please see the link below for the main expedition website.
</p>
<h3>Troggle development</h3>
<p class="indent">
Troggle is still under development. Check out the <a href="http://troggle.googlecode.com">development page</a> on google code, where you can file bug reports, make suggestions, and help develop the code. There is also an old todo list at <a href="{%url "todo"%}">here</a>.
</p>
</div>
{% endblock content %}
{% block margins %}

View File

@@ -2,12 +2,12 @@
{% load wiki_markup %}
{% block title %}Logbook {{logbookentry.id}}{% endblock %}
{% block editLink %}<a href={{logbookentry.get_admin_url}}/>Edit logbook entry {{logbookentry|wiki_to_html_short}}</a>{% endblock %}
{% block editLink %}<a href={{logbookentry.get_admin_url}}>Edit logbook entry {{logbookentry|wiki_to_html_short}}</a>{% endblock %}
{% block content %}
{% block related %}{% endblock %}
{% block nav %}{% endblock %}
<h2>{{logbookentry.title|safe}}</h2>
<h2>{{logbookentry.title}}</h2>
<div id="related">
<p><a href="{{ logbookentry.expedition.get_absolute_url }}">{{logbookentry.expedition.name}}</a></p>
@@ -20,10 +20,10 @@
<p>
{% if logbookentry.get_previous_by_date %}
<a href="{{ logbookentry.get_previous_by_date.get_absolute_url }}">{{logbookentry.get_previous_by_date.date|date:"D d M Y"}}</a>
<a href="{{ logbookentry.get_previous_by_date.get_absolute_url }}">{{logbookentry.get_previous_by_date.date}}</a>
{% endif %}
{% if logbookentry.get_next_by_date %}
<a href="{{ logbookentry.get_next_by_date.get_absolute_url }}">{{logbookentry.get_next_by_date.date|date:"D d M Y"}}</a>
<a href="{{ logbookentry.get_next_by_date.get_absolute_url }}">{{logbookentry.get_next_by_date.date}}</a>
{% endif %}
</p>
@@ -47,12 +47,12 @@
<td>
{% if persontrip.persontrip_prev %}
<a href="{{ persontrip.persontrip_prev.logbook_entry.get_absolute_url }}">{{persontrip.persontrip_prev.logbook_entry.date|date:"D d M Y"}}</a>
<a href="{{ persontrip.persontrip_prev.logbook_entry.get_absolute_url }}">{{persontrip.persontrip_prev.logbook_entry.date}}</a>
{% endif %}
</td>
<td>
{% if persontrip.persontrip_next %}
<a href="{{ persontrip.persontrip_next.logbook_entry.get_absolute_url }}">{{persontrip.persontrip_next.logbook_entry.date|date:"D d M Y"}}</a>
<a href="{{ persontrip.persontrip_next.logbook_entry.get_absolute_url }}">{{persontrip.persontrip_next.logbook_entry.date}}</a>
{% endif %}
</td>
@@ -65,14 +65,9 @@
</div>
<div id="col1">
<div class="logbookentry">
<b>{{logbookentry.date|date:"D d M Y"}}</b>
{% if logbookentry.entry_type == "html" %}
<p>{{logbookentry.text|safe}}</p>
{% else %}
{{logbookentry.text|wiki_to_html}}
{% endif %}
</div>
<div class="logbookentry">
<b>{{logbookentry.date}}</b>
{{logbookentry.text|wiki_to_html}}</div>
</div>
</div>

View File

@@ -18,8 +18,8 @@
{% if pic.is_mugshot %}
<div class="figure">
<p> <img src="{{ pic.thumbnail_image.url }}" class="thumbnail" />
<p> {{ pic.caption }} </p>
<p> <a href="{{ pic.get_admin_url }}">edit {{pic}}</a>
<p> {{ pic.caption }}</p>
<p> <a href="{{ pic.get_admin_url }}">edit {{pic}}</a> </>
</p>
</p>
</div>
@@ -32,7 +32,7 @@
<ul>
{% for personexpedition in person.personexpedition_set.all %}
<li> <a href="{{ personexpedition.get_absolute_url }}">{{personexpedition.expedition.year}}</a>
<span style="padding-left:{{ personexpedition.persontrip_set.all|length }}0px; background-color:red"></span>
<span style="padding-left:{{personexpedition.persontrip_set.all|length}}0px; background-color:red"></span>
{{personexpedition.persontrip_set.all|length}} trips
</li>
{% endfor %}

View File

@@ -7,7 +7,7 @@
{% block content %}
<h1>
<a href="{{personexpedition.person.get_absolute_url}}">{{personexpedition.person|safe}}</a> :
<a href="{{personexpedition.person.get_absolute_url}}">{{personexpedition.person}}</a> :
<a href="{{personexpedition.expedition.get_absolute_url}}">{{personexpedition.expedition}}</a>
</h1>

View File

@@ -8,12 +8,12 @@
<h2>Notable expoers</h2>
<table class="searchable">
<tr><th>Person</th><th>First</th><th>Last</th><th>Notability</th></tr>
{% for person in notablepersons|dictsortreversed:"notability" %}
{% for person in notablepersons %}
<tr>
<td><a href="{{ person.get_absolute_url }}">{{person|wiki_to_html_short}}</a></td>
<td><a href="{{ person.first.get_absolute_url }}">{{ person.first.expedition.year }}</a></td>
<td><a href="{{ person.last.get_absolute_url }}">{{ person.last.expedition.year }}</a></td>
<td>{{person.notability|floatformat:2}}</td>
<td>{{person.notability}}</td>
</tr>
{% endfor %}
</table>
@@ -31,8 +31,8 @@
<tr>
<td><a href="{{ person.get_absolute_url }}">{{person|wiki_to_html_short}}</a></td>
<td><a href="{{ person.first.get_absolute_url }}">{{person.first.expedition.year}}</a></td>
<td><a href="{{ person.last.get_absolute_url }}">{{person.last.expedition.year}}</a></td>
<td></td>
<td><a href="{{ person.last.get_absolute_url }}">{{person.last.expedition.year}}</a></td>
<td>{{ person.surveyedleglength }}</td>
</tr>
{% endfor %}
</table>

View File

@@ -4,7 +4,9 @@
{% block title %} QM: {{qm|wiki_to_html_short}} {% endblock %}
{% block editLink %}| <a href="{{qm.get_admin_url}}/">Edit QM {{qm|wiki_to_html_short}}</a>{% endblock %}
{% block editLink %}| <a href={{qm.get_admin_url}}>Edit QM {{qm|wiki_to_html_short}}</a>{% endblock %}
{% block contentheader %}
<table id="cavepage">

View File

@@ -5,7 +5,7 @@
{% block title %}CUCC Virtual Survey Binder: {{ current_expedition }}{{ current_survey }}{%endblock%}
{% block head %}
<link rel="stylesheet" type="text/css" href="{{ MEDIA_URL }}css/nav.css" />
<link rel="stylesheet" type="text/css" href="{{ settings.MEDIA_URL }}css/nav.css" />
<script language="javascript">
blankColor = "rgb(153, 153, 153)"
@@ -164,7 +164,7 @@
</p>
</div>
{% endfor %}
<div class="figure"> <a href="{{ URL_ROOT }}/admin/expo/scannedimage/add/"> <img src="{{ URL_ROOT }}{{ ADMIN_MEDIA_PREFIX }}img/admin/icon_addlink.gif" /> Add a new scanned notes page. </a> </div>
<div class="figure"> <a href="{{ settings.URL_ROOT }}/admin/expo/scannedimage/add/"> <img src="{{ settings.URL_ROOT }}{{ settings.ADMIN_MEDIA_PREFIX }}img/admin/icon_addlink.gif" /> Add a new scanned notes page. </a> </div>
</div>
<br class="clearfloat" />
<div id="survexFileContent" class="behind"> survex file editor, keeping file in original structure <br />

View File

@@ -41,7 +41,7 @@
<td>{{survexblock.name}}</td>
<td>
{% if survexblock.expedition %}
<a href="{{survexblock.expedition.get_absolute_url}}">{{survexblock.date|date:"D d M Y"}}</a>
<a href="{{survexblock.expedition.get_absolute_url}}">{{survexblock.date}}</a>
{% else %}
{{survexblock.date}}
{% endif %}

View File

@@ -4,7 +4,7 @@
{% block title %}{{ title }}{% endblock %}
{% block head %}
<script src="{{ MEDIA_URL }}js/base.js" type="text/javascript"></script>
<script src="{{ settings.MEDIA_URL }}js/base.js" type="text/javascript"></script>
<script type="text/javascript" src="{{settings.JSLIB_URL}}jquery-form/jquery.form.min.js"></script>
<script type="text/javascript" src="{{settings.JSLIB_URL}}codemirror/codemirror.min.js"></script>
@@ -46,7 +46,7 @@ $(document).ready(function()
</p>
{% endif %}
<form id="codewikiform" action="" method="POST">{% csrf_token %}
<form id="codewikiform" action="" method="POST">
<div class="codeframebit">{{form.code}}</div>
<div style="display:none">{{form.filename}} {{form.dirname}} {{form.datetime}} {{form.outputtype}}</div>
<input type="submit" name="diff" value="Diffy" />

View File

@@ -34,6 +34,6 @@ add wikilinks
{% endblock content %}
{% block margins %}
<img class="leftMargin eyeCandy fadeIn" src="{{ MEDIA_URL }}eieshole.jpg">
<img class="rightMargin eyeCandy fadeIn" src="{{ MEDIA_URL }}goesser.jpg">
<img class="leftMargin eyeCandy fadeIn" src="{{ settings.MEDIA_URL }}eieshole.jpg">
<img class="rightMargin eyeCandy fadeIn" src="{{ settings.MEDIA_URL }}goesser.jpg">
{% endblock margins %}

129
urls.py
View File

@@ -1,19 +1,17 @@
from django.conf.urls import *
from django.conf import settings
from django.conf.urls.static import static
from django.views.static import serve
from core.views import * # flat import
from core.views_other import *
from core.views_caves import *
from core.views_survex import *
from core.models import *
from flatpages.views import *
from django.views.generic.edit import UpdateView
from django.contrib import admin
from django.views.generic.list import ListView
from django.contrib import admin
#admin.autodiscover()
admin.autodiscover()
# type url probably means it's used.
@@ -22,27 +20,27 @@ from django.contrib import admin
# <reference to python function in 'core' folder>,
# <name optional argument for URL reversing (doesn't do much)>)
actualurlpatterns = [
actualurlpatterns = patterns('',
url(r'^testingurl/?$' , views_caves.millenialcaves, name="testing"),
url(r'^millenialcaves/?$', views_caves.millenialcaves, name="millenialcaves"),
url(r'^troggle$', views_other.frontpage, name="frontpage"),
url(r'^todo/$', views_other.todo, name="todo"),
url(r'^caves/?$', views_caves.caveindex, name="caveindex"),
url(r'^caves/?$', views_caves.caveindex, name="caveindex"),
url(r'^people/?$', views_logbooks.personindex, name="personindex"),
url(r'^newqmnumber/?$', views_other.ajax_QM_number, ),
url(r'^lbo_suggestions/?$', logbook_entry_suggestions),
url(r'^lbo_suggestions/?$', logbook_entry_suggestions),
#(r'^person/(?P<person_id>\d*)/?$', views_logbooks.person),
url(r'^person/(?P<first_name>[A-Z]*[a-z\-\'&;]*)[^a-zA-Z]*(?P<last_name>[a-z\-\']*[^a-zA-Z]*[A-Z]*[a-z\-&;]*)/?', views_logbooks.person, name="person"),
url(r'^person/(?P<first_name>[A-Z]*[a-z\-\']*)[^a-zA-Z]*(?P<last_name>[a-z\-\']*[^a-zA-Z]*[A-Z]*[a-z\-]*)/?', views_logbooks.person, name="person"),
#url(r'^person/(\w+_\w+)$', views_logbooks.person, name="person"),
url(r'^expedition/(\d+)$', views_logbooks.expedition, name="expedition"),
url(r'^expeditions/?$', views_logbooks.ExpeditionListView.as_view(), name="expeditions"),
url(r'^personexpedition/(?P<first_name>[A-Z]*[a-z&;]*)[^a-zA-Z]*(?P<last_name>[A-Z]*[a-zA-Z&;]*)/(?P<year>\d+)/?$', views_logbooks.personexpedition, name="personexpedition"),
url(r'^personexpedition/(?P<first_name>[A-Z]*[a-z]*)[^a-zA-Z]*(?P<last_name>[A-Z]*[a-z]*)/(?P<year>\d+)/?$', views_logbooks.personexpedition, name="personexpedition"),
url(r'^logbookentry/(?P<date>.*)/(?P<slug>.*)/?$', views_logbooks.logbookentry,name="logbookentry"),
url(r'^newlogbookentry/(?P<expeditionyear>.*)$', views_logbooks.newLogbookEntry, name="newLogBookEntry"),
url(r'^editlogbookentry/(?P<expeditionyear>[^/]*)/(?P<pdate>[^/]*)/(?P<pslug>[^/]*)/$', views_logbooks.newLogbookEntry, name="editLogBookEntry"),
@@ -54,8 +52,8 @@ actualurlpatterns = [
url(r'^getPeople/(?P<expeditionslug>.*)', views_logbooks.get_people, name = "get_people"),
url(r'^getLogBookEntries/(?P<expeditionslug>.*)', views_logbooks.get_logbook_entries, name = "get_logbook_entries"),
url(r'^cave/new/$', views_caves.edit_cave, name="newcave"),
url(r'^cave/new/$', edit_cave, name="newcave"),
url(r'^cave/(?P<cave_id>[^/]+)/?$', views_caves.cave, name="cave"),
url(r'^caveslug/([^/]+)/?$', views_caves.caveSlug, name="caveSlug"),
url(r'^cave/entrance/([^/]+)/?$', views_caves.caveEntrance),
@@ -73,91 +71,100 @@ actualurlpatterns = [
# url(r'^jgtuploadfile$', view_surveys.jgtuploadfile, name="jgtuploadfile"),
url(r'^cave/(?P<cave_id>[^/]+)/?(?P<ent_letter>[^/])$', ent),
url(r'^cave/(?P<slug>[^/]+)/edit/$', views_caves.edit_cave, name="edit_cave"),
url(r'^cave/(?P<slug>[^/]+)/edit/$', edit_cave, name="edit_cave"),
#(r'^cavesearch', caveSearch),
# url(r'^cave/(?P<cave_id>[^/]+)/(?P<year>\d\d\d\d)-(?P<qm_id>\d*)(?P<grade>[ABCDX]?)?$', views_caves.qm, name="qm"),
url(r'^cave/qm/(?P<qm_id>[^/]+)?$', views_caves.qm, name="qm"),
url(r'^prospecting_guide/$', views_caves.prospecting),
url(r'^cave/(?P<cave_id>[^/]+)/(?P<year>\d\d\d\d)-(?P<qm_id>\d*)(?P<grade>[ABCDX]?)?$', views_caves.qm, name="qm"),
url(r'^prospecting_guide/$', views_caves.prospecting),
url(r'^logbooksearch/(.*)/?$', views_logbooks.logbookSearch),
url(r'^statistics/?$', views_other.stats, name="stats"),
url(r'^survey/?$', surveyindex, name="survey"),
url(r'^survey/(?P<year>\d\d\d\d)\#(?P<wallet_number>\d*)$', survey, name="survey"),
url(r'^controlpanel/?$', views_other.controlPanel, name="controlpanel"),
url(r'^CAVETAB2\.CSV/?$', views_other.downloadCavetab, name="downloadcavetab"),
url(r'^CAVETAB2\.CSV/?$', views_other.downloadCavetab, name="downloadcavetab"),
url(r'^Surveys\.csv/?$', views_other.downloadSurveys, name="downloadsurveys"),
url(r'^logbook(?P<year>\d\d\d\d)\.(?P<extension>.*)/?$',views_other.downloadLogbook),
url(r'^logbook/?$',views_other.downloadLogbook, name="downloadlogbook"),
url(r'^cave/(?P<cave_id>[^/]+)/qm\.csv/?$', views_other.downloadQMs, name="downloadqms"),
url(r'^downloadqms$', views_other.downloadQMs),
url(r'^cave/(?P<cave_id>[^/]+)/qm\.csv/?$', views_other.downloadQMs, name="downloadqms"),
(r'^downloadqms$', views_other.downloadQMs),
url(r'^eyecandy$', views_other.eyecandy),
url(r'^admin/doc/?', include('django.contrib.admindocs.urls')),
(r'^admin/doc/?', include('django.contrib.admindocs.urls')),
#url(r'^admin/(.*)', admin.site.get_urls, name="admin"),
url(r'^admin/', include(admin.site.urls)),
(r'^admin/', include(admin.site.urls)),
# don't know why this needs troggle/ in here. nice to get it out
# url(r'^troggle/media-admin/(?P<path>.*)$', static, {'document_root': settings.MEDIA_ADMIN_DIR, 'show_indexes':True}),
url(r'^troggle/media-admin/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ADMIN_DIR, 'show_indexes':True}),
url(r'^accounts/', include('registration.backends.default.urls')),
url(r'^profiles/', include('profiles.urls')),
(r'^accounts/', include('registration.backends.default.urls')),
(r'^profiles/', include('profiles.urls')),
# (r'^personform/(.*)$', personForm),
url(r'^site_media/(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
(r'^site_media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
(r'^tinymce_media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.TINY_MCE_MEDIA_ROOT, 'show_indexes': True}),
url(r'^survexblock/(.+)$', views_caves.survexblock, name="survexblock"),
url(r'^survexfile/(?P<survex_file>.*?)\.svx$', views_survex.svx, name="svx"),
url(r'^survexfile/(?P<survex_file>.*?)\.3d$', views_survex.threed, name="threed"),
url(r'^survexfile/(?P<survex_file>.*?)\.log$', views_survex.svxraw),
url(r'^survexfile/(?P<survex_file>.*?)\.err$', views_survex.err),
url(r'^survexfile/caves/$', views_survex.survexcaveslist, name="survexcaveslist"),
url(r'^survexfile/caves/(?P<survex_cave>.*)$', views_survex.survexcavesingle, name="survexcavessingle"),
url(r'^survexfileraw/(?P<survex_file>.*?)\.svx$', views_survex.svxraw, name="svxraw"),
url(r'^survey_files/listdir/(?P<path>.*)$', view_surveys.listdir),
url(r'^survey_files/download/(?P<path>.*)$', view_surveys.download),
(r'^survey_files/listdir/(?P<path>.*)$', view_surveys.listdir),
(r'^survey_files/download/(?P<path>.*)$', view_surveys.download),
#(r'^survey_files/upload/(?P<path>.*)$', view_surveys.upload),
#(r'^survey_scans/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.SURVEY_SCANS, 'show_indexes':True}),
url(r'^survey_scans/$', view_surveys.surveyscansfolders, name="surveyscansfolders"),
url(r'^survey_scans/(?P<path>[^/]+)/$', view_surveys.surveyscansfolder, name="surveyscansfolder"),
url(r'^survey_scans/(?P<path>[^/]+)/(?P<file>[^/]+(?:png|jpg|jpeg))$',
view_surveys.surveyscansingle, name="surveyscansingle"),
url(r'^tunneldata/$', view_surveys.tunneldata, name="tunneldata"),
url(r'^tunneldataraw/(?P<path>.+?\.xml)$', view_surveys.tunnelfile, name="tunnelfile"),
url(r'^tunneldataraw/(?P<path>.+?\.xml)/upload$',view_surveys.tunnelfileupload, name="tunnelfileupload"),
#url(r'^tunneldatainfo/(?P<path>.+?\.xml)$', view_surveys.tunnelfileinfo, name="tunnelfileinfo"),
# url(r'^photos/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.PHOTOS_ROOT, 'show_indexes':True}),
url(r'^survey_scans/$', view_surveys.surveyscansfolders, name="surveyscansfolders"),
url(r'^survey_scans/(?P<path>[^/]+)/$', view_surveys.surveyscansfolder, name="surveyscansfolder"),
url(r'^survey_scans/(?P<path>[^/]+)/(?P<file>[^/]+(?:png|jpg|jpeg))$',
view_surveys.surveyscansingle, name="surveyscansingle"),
url(r'^tunneldata/$', view_surveys.tunneldata, name="tunneldata"),
url(r'^tunneldataraw/(?P<path>.+?\.xml)$', view_surveys.tunnelfile, name="tunnelfile"),
url(r'^tunneldataraw/(?P<path>.+?\.xml)/upload$',view_surveys.tunnelfileupload, name="tunnelfileupload"),
#url(r'^tunneldatainfo/(?P<path>.+?\.xml)$', view_surveys.tunnelfileinfo, name="tunnelfileinfo"),
(r'^photos/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.PHOTOS_ROOT, 'show_indexes':True}),
url(r'^prospecting/(?P<name>[^.]+).png$', prospecting_image, name="prospecting_image"),
# (r'^gallery/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.PHOTOS_ROOT, 'show_indexes':True}),
# (r'^gallery/(?P<path>.*)$', 'django.views.static.serve',
# {'document_root': settings.PHOTOS_ROOT, 'show_indexes':True}),
# for those silly ideas
url(r'^experimental.*$', views_logbooks.experimental, name="experimental"),
#url(r'^trip_report/?$',views_other.tripreport,name="trip_report")
url(r'^(.*)_edit$', editflatpage, name="editflatpage"),
url(r'^(.*)$', flatpage, name="flatpage"),
]
url(r'^(.*)_edit$', 'flatpages.views.editflatpage', name="editflatpage"),
url(r'^(.*)$', 'flatpages.views.flatpage', name="flatpage"),
)
#Allow prefix to all urls
urlpatterns = [
url('^%s' % settings.DIR_ROOT, include(actualurlpatterns))
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns = patterns ('',
('^%s' % settings.DIR_ROOT, include(actualurlpatterns))
)

View File

@@ -1,5 +1,4 @@
from django.conf import settings
from django.shortcuts import render
import random, re, logging
from troggle.core.models import CaveDescription
@@ -23,12 +22,12 @@ def randomLogbookSentence():
#Choose again if there are no sentances (this happens if it is a placeholder entry)
while len(re.findall('[A-Z].*?\.',randSent['entry'].text))==0:
randSent['entry']=LogbookEntry.objects.order_by('?')[0]
#Choose a random sentence from that entry. Store the sentence as randSent['sentence'], and the number of that sentence in the entry as randSent['number']
sentenceList=re.findall('[A-Z].*?\.',randSent['entry'].text)
randSent['number']=random.randrange(0,len(sentenceList))
randSent['sentence']=sentenceList[randSent['number']]
return randSent
@@ -37,22 +36,22 @@ def save_carefully(objectType, lookupAttribs={}, nonLookupAttribs={}):
-if instance does not exist in DB: add instance to DB, return (new instance, True)
-if instance exists in DB and was modified using Troggle: do nothing, return (existing instance, False)
-if instance exists in DB and was not modified using Troggle: overwrite instance, return (instance, False)
The checking is accomplished using Django's get_or_create and the new_since_parsing boolean field
defined in core.models.TroggleModel.
"""
instance, created=objectType.objects.get_or_create(defaults=nonLookupAttribs, **lookupAttribs)
if not created and not instance.new_since_parsing:
for k, v in list(nonLookupAttribs.items()): #overwrite the existing attributes from the logbook text (except date and title)
for k, v in nonLookupAttribs.items(): #overwrite the existing attributes from the logbook text (except date and title)
setattr(instance, k, v)
instance.save()
if created:
logging.info(str(instance) + ' was just added to the database for the first time. \n')
if not created and instance.new_since_parsing:
logging.info(str(instance) + " has been modified using Troggle, so the current script left it as is. \n")
@@ -60,6 +59,21 @@ def save_carefully(objectType, lookupAttribs={}, nonLookupAttribs={}):
logging.info(str(instance) + " existed in the database unchanged since last parse. It was overwritten by the current script. \n")
return (instance, created)
def render_with_context(req, *args, **kwargs):
"""this is the snippet from http://www.djangosnippets.org/snippets/3/
Django uses Context, not RequestContext when you call render_to_response.
We always want to use RequestContext, so that django adds the context from
settings.TEMPLATE_CONTEXT_PROCESSORS. This way we automatically get
necessary settings variables passed to each template. So we use a custom
method, render_response instead of render_to_response. Hopefully future
Django releases will make this unnecessary."""
from django.shortcuts import render_to_response
from django.template import RequestContext
kwargs['context_instance'] = RequestContext(req)
return render_to_response(*args, **kwargs)
re_body = re.compile(r"\<body[^>]*\>(.*)\</body\>", re.DOTALL)
re_title = re.compile(r"\<title[^>]*\>(.*)\</title\>", re.DOTALL)
@@ -80,7 +94,7 @@ def get_single_match(regex, text):
def href_to_wikilinks(matchobj):
"""
Given an html link, checks for possible valid wikilinks.
Returns the first valid wikilink. Valid means the target
object actually exists.
"""
@@ -91,7 +105,7 @@ def href_to_wikilinks(matchobj):
return matchobj.group()
#except:
#print 'fail'
re_subs = [(re.compile(r"\<b[^>]*\>(.*?)\</b\>", re.DOTALL), r"'''\1'''"),
(re.compile(r"\<i\>(.*?)\</i\>", re.DOTALL), r"''\1''"),
@@ -107,12 +121,12 @@ re_subs = [(re.compile(r"\<b[^>]*\>(.*?)\</b\>", re.DOTALL), r"'''\1'''"),
(re.compile(r"\<a\s+href=['\"]#([^'\"]*)['\"]\s*\>(.*?)\</a\>", re.DOTALL), r"[[cavedescription:\1|\2]]"), #assumes that all links with target ids are cave descriptions. Not great.
(re.compile(r"\[\<a\s+href=['\"][^'\"]*['\"]\s+id=['\"][^'\"]*['\"]\s*\>([^\s]*).*?\</a\>\]", re.DOTALL), r"[[qm:\1]]"),
(re.compile(r'<a\shref="?(?P<target>.*)"?>(?P<text>.*)</a>'),href_to_wikilinks),
]
def html_to_wiki(text, codec = "utf-8"):
if type(text) == str:
text = str(text, codec)
text = unicode(text, codec)
text = re.sub("</p>", r"", text)
text = re.sub("<p>$", r"", text)
text = re.sub("<p>", r"\n\n", text)