ran 'black' to reformat all the core files

This commit is contained in:
Philip Sargent 2023-01-30 19:04:36 +00:00
parent d06dd3d166
commit 7808005498
28 changed files with 3844 additions and 3075 deletions

View File

@ -27,36 +27,44 @@ from django.test import Client, SimpleTestCase, TestCase
class SimpleTest(SimpleTestCase):
def test_test_setting(self):
from django.conf import settings
self.assertEqual(settings.EMAIL_BACKEND, 'django.core.mail.backends.locmem.EmailBackend')
from django.conf import settings
self.assertEqual(settings.EMAIL_BACKEND, "django.core.mail.backends.locmem.EmailBackend")
import troggle.settings as settings
def test_import_TroggleModel(self):
from troggle.core.models.troggle import TroggleModel
def test_import_Cave(self):
from troggle.core.models.caves import Cave
def test_import_parsers_surveys(self):
#from PIL import Image
# from PIL import Image
from functools import reduce
from troggle.core.utils import save_carefully
def test_import_parsers_survex(self):
import troggle.core.models.caves as models_caves
import troggle.core.models.survex as models_survex
import troggle.core.models.troggle as models
import troggle.settings as settings
from troggle.core.views import (caves, drawings, other, scans,
statistics, survex, uploads)
from troggle.core.views import caves, drawings, other, scans, statistics, survex, uploads
from troggle.core.views.caves import cavepage, ent
from troggle.core.views.other import frontpage
from troggle.parsers.people import GetPersonExpeditionNameLookup
def test_import_views_uploads(self):
from troggle.core.views.uploads import dwgupload, scanupload
def test_import_parsers_QMs(self):
from troggle.core.models.logbooks import QM
def test_import_parsers_people(self):
from html import unescape
from unidecode import unidecode
def test_import_parsers_logbooks(self):
from django.template.defaultfilters import slugify
from django.utils.timezone import get_current_timezone, make_aware
@ -64,6 +72,7 @@ class SimpleTest(SimpleTestCase):
from parsers.people import GetPersonExpeditionNameLookup
from troggle.core.models.logbooks import CaveSlug, QM, LogbookEntry, PersonLogEntry
from troggle.core.models.troggle import DataIssue, Expedition
def test_import_core_views_caves(self):
from django.conf import settings
from django.contrib.auth.decorators import login_required
@ -71,13 +80,11 @@ class SimpleTest(SimpleTestCase):
from django.shortcuts import get_object_or_404, render
import troggle.core.views.expo
from troggle.core.forms import (CaveAndEntranceFormSet, CaveForm,
EntranceForm, EntranceLetterForm)
from troggle.core.models.caves import (Area, Cave, CaveAndEntrance,
Entrance,
EntranceSlug, SurvexStation)
from troggle.core.forms import CaveAndEntranceFormSet, CaveForm, EntranceForm, EntranceLetterForm
from troggle.core.models.caves import Area, Cave, CaveAndEntrance, Entrance, EntranceSlug, SurvexStation
from troggle.core.models.troggle import Expedition
from troggle.core.views.auth import login_required_if_public
def test_import_parsers_mix(self):
import troggle.parsers.caves
import troggle.parsers.drawings
@ -87,7 +94,8 @@ class SimpleTest(SimpleTestCase):
import troggle.parsers.scans
import troggle.parsers.survex
import troggle.settings
from troggle.parsers.logbooks import GetCaveLookup
from troggle.parsers.logbooks import GetCaveLookup
def test_import_imports(self):
from django.contrib.auth.models import User
from django.core import management
@ -107,25 +115,33 @@ class SimpleTest(SimpleTestCase):
from troggle.core.views import caves, other, statistics, survex
from troggle.core.views.auth import expologin, expologout
from troggle.core.views.caves import cavepage, ent
from troggle.core.views.expo import (editexpopage, expofiles_redirect,
expofilessingle, expopage, map,
mapfile, mediapage)
from troggle.core.views.logbooks import (Expeditions_jsonListView,
Expeditions_tsvListView,
expedition,
get_logbook_entries,
get_people, logbookentry,
notablepersons, person,
personexpedition)
from troggle.core.views.expo import (
editexpopage,
expofiles_redirect,
expofilessingle,
expopage,
map,
mapfile,
mediapage,
)
from troggle.core.views.logbooks import (
Expeditions_jsonListView,
Expeditions_tsvListView,
expedition,
get_logbook_entries,
get_people,
logbookentry,
notablepersons,
person,
personexpedition,
)
from troggle.core.views.other import controlpanel
from troggle.core.views.prospect import prospecting, prospecting_image
from troggle.core.views.statistics import (dataissues, pathsreport,
stats)
from troggle.core.views.survex import (survexcavesingle,
survexcaveslist, svx)
from troggle.core.views.statistics import dataissues, pathsreport, stats
from troggle.core.views.survex import survexcavesingle, survexcaveslist, svx
class SubprocessTest(TestCase):
@classmethod
def setUpTestData(cls):
pass
@ -137,91 +153,103 @@ class SubprocessTest(TestCase):
pass
def test_utf8(self):
'''Expects that utf8 is the default encoding when opening files
'''
"""Expects that utf8 is the default encoding when opening files"""
import locale
import sys
self.assertTrue( sys.getdefaultencoding() == "utf-8", f'{sys.getdefaultencoding()} - UTF8 error in getdefaultencoding')
self.assertTrue( sys.getfilesystemencoding() == "utf-8", f'{sys.getfilesystemencoding()} - UTF8 error in getfilesystemencoding')
self.assertTrue( locale.getdefaultlocale()[1] == "UTF-8", f'{locale.getdefaultlocale()} - UTF8 error in locale.getdefaultlocale')
self.assertTrue( locale.getpreferredencoding() == "UTF-8", f'{locale.getpreferredencoding()} - UTF8 error in locale.getpreferredencoding')
self.assertTrue(
sys.getdefaultencoding() == "utf-8", f"{sys.getdefaultencoding()} - UTF8 error in getdefaultencoding"
)
self.assertTrue(
sys.getfilesystemencoding() == "utf-8",
f"{sys.getfilesystemencoding()} - UTF8 error in getfilesystemencoding",
)
self.assertTrue(
locale.getdefaultlocale()[1] == "UTF-8",
f"{locale.getdefaultlocale()} - UTF8 error in locale.getdefaultlocale",
)
self.assertTrue(
locale.getpreferredencoding() == "UTF-8",
f"{locale.getpreferredencoding()} - UTF8 error in locale.getpreferredencoding",
)
def test_installs(self):
''' Expects external software installed: cavern, survexport, git
"""Expects external software installed: cavern, survexport, git
(but not whether it actually works)
'''
"""
import troggle.settings as settings
for i in [settings.CAVERN, settings.SURVEXPORT, settings.GIT]:
# Define command as string and then split() into list format
cmd = f'which {i}'.split()
cmd = f"which {i}".split()
try:
sp = subprocess.check_call(cmd, shell=False)
except subprocess.CalledProcessError:
self.assertTrue( False, f'no {i} installed')
self.assertTrue(False, f"no {i} installed")
def test_repos_git_status(self):
''' Expects clean git repos with no added files and no merge failures
'''
"""Expects clean git repos with no added files and no merge failures"""
from pathlib import Path
import troggle.settings as settings
TROGGLE_PATH = Path(settings.REPOS_ROOT_PATH) / "troggle"
for cwd in [settings.SURVEX_DATA, settings.EXPOWEB, settings.DRAWINGS_DATA, TROGGLE_PATH]:
for cwd in [settings.SURVEX_DATA, settings.EXPOWEB, settings.DRAWINGS_DATA, TROGGLE_PATH]:
sp = subprocess.run([settings.GIT, "status"], cwd=cwd, capture_output=True, text=True)
out = str(sp.stdout)
if len(out) > 160:
out = out[:75] + "\n <Long output curtailed>\n" + out[-75:]
print(f'git output: {cwd}:\n # {sp.stderr=}\n # sp.stdout={out} \n # return code: {str(sp.returncode)}')
print(f"git output: {cwd}:\n # {sp.stderr=}\n # sp.stdout={out} \n # return code: {str(sp.returncode)}")
if sp.returncode != 0:
print(f'git output: {cwd}:\n # {sp.stderr=}\n # sp.stdout={out} \n # return code: {str(sp.returncode)}')
self.assertTrue( sp.returncode == 0, f'{cwd} - git is unhappy')
print(f"git output: {cwd}:\n # {sp.stderr=}\n # sp.stdout={out} \n # return code: {str(sp.returncode)}")
self.assertTrue(sp.returncode == 0, f"{cwd} - git is unhappy")
content = sp.stdout
ph = r'Your branch is up[ -]to[ -]date'
phmatch = re.search(ph, content)
ph = r"Your branch is up[ -]to[ -]date"
phmatch = re.search(ph, content)
msg = f'{cwd} - Failed to find expected git output: "{ph}"'
self.assertIsNotNone(phmatch, msg)
ph1 = r'no changes added to commit'
phmatch1 = re.search(ph1, content)
ph2 = r'nothing to commit'
phmatch2 = re.search(ph2, content)
ph1 = r"no changes added to commit"
phmatch1 = re.search(ph1, content)
ph2 = r"nothing to commit"
phmatch2 = re.search(ph2, content)
phmatch = phmatch1 or phmatch2
msg = f'{cwd} - Failed to find expected git output: "{ph1}" or "{ph2}"'
self.assertIsNotNone(phmatch, msg)
def test_loser_survex_status(self):
''' Expects no failures of survex files
'''
"""Expects no failures of survex files"""
from pathlib import Path
import troggle.settings as settings
cwd = settings.SURVEX_DATA
for survey in ["1623.svx", "1626.svx"]:
for survey in ["1623.svx", "1626.svx"]:
sp = subprocess.run([settings.CAVERN, survey], cwd=cwd, capture_output=True, text=True)
out = str(sp.stdout)
if len(out) > 160:
out = out[:75] + "\n <Long output curtailed>\n" + out[-75:]
# print(f'survex output: {cwd}:\n # {sp.stderr=}\n # sp.stdout={out} \n # return code: {str(sp.returncode)}')
if sp.returncode != 0:
print(f'survex output: {cwd}:\n # {sp.stderr=}\n # sp.stdout={out} \n # return code: {str(sp.returncode)}')
self.assertTrue( sp.returncode == 0, f'{cwd} - survex is unhappy')
print(
f"survex output: {cwd}:\n # {sp.stderr=}\n # sp.stdout={out} \n # return code: {str(sp.returncode)}"
)
self.assertTrue(sp.returncode == 0, f"{cwd} - survex is unhappy")
content = sp.stdout
ph = r'Total length of survey legs'
phmatch = re.search(ph, content)
ph = r"Total length of survey legs"
phmatch = re.search(ph, content)
msg = f'{cwd} - Failed to find expected survex output: "{ph}"'
self.assertIsNotNone(phmatch, msg)
ph1 = r'Time used'
phmatch1 = re.search(ph1, content)
ph2 = r'vertical length of survey le'
phmatch2 = re.search(ph2, content)
ph1 = r"Time used"
phmatch1 = re.search(ph1, content)
ph2 = r"vertical length of survey le"
phmatch2 = re.search(ph2, content)
phmatch = phmatch1 or phmatch2
msg = f'{cwd} - Failed to find expected survex output: "{ph1}" or "{ph2}"'
self.assertIsNotNone(phmatch, msg)

View File

@ -1,4 +1,4 @@
'''
"""
We are using unittest for troggle.
Note that the database has not been parsed from the source files when these tests are run,
@ -17,13 +17,13 @@ which rely on database resolution will fail unless a fixture has been set up for
them.
https://docs.djangoproject.com/en/3.0/topics/testing/tools/
'''
"""
todo = '''ADD TESTS when we are redirecting /expofiles/ to a remote file-delivering site
todo = """ADD TESTS when we are redirecting /expofiles/ to a remote file-delivering site
- Add test for running cavern to produce a .3d file
'''
"""
import re
import unittest
@ -32,436 +32,450 @@ from http import HTTPStatus
from django.test import Client, SimpleTestCase, TestCase
#class SimplePageTest(unittest.TestCase):
# class SimplePageTest(unittest.TestCase):
class PageTests(TestCase):
'''These tests may appear to be redundant, but in fact they exercise different bits of code. The urls.py
"""These tests may appear to be redundant, but in fact they exercise different bits of code. The urls.py
dispatcher is sending these URLs view via different 'view' handlers, and they all need verifying.
'''
"""
@classmethod
def setUpTestData(cls):
# Set up data for the whole TestCase
#cls.foo = Foo.objects.create(bar="Test")
# Some test using self.foo in tests below..
# cls.foo = Foo.objects.create(bar="Test")
# Some test using self.foo in tests below..
# read in some SQL ?
pass
def setUp(self):
# Every test needs a client.
self.client = Client()
def test_expoweb_root(self):
response = self.client.get('')
response = self.client.get("")
content = response.content.decode()
self.assertEqual(response.status_code, 200)
ph = r'CUCC in Austria'
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
ph = r"CUCC in Austria"
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_expoweb_root_slash(self):
response = self.client.get('/')
response = self.client.get("/")
content = response.content.decode()
self.assertEqual(response.status_code, 200)
ph = r'CUCC in Austria'
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
ph = r"CUCC in Austria"
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_expoweb_paths(self):
response = self.client.get('/pathsreport')
response = self.client.get("/pathsreport")
self.assertEqual(response.status_code, 200)
content = response.content.decode()
ph = r'This report is generated from'
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
ph = r"This report is generated from"
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_expoweb_dir(self):
response = self.client.get('/handbook')
response = self.client.get("/handbook")
content = response.content.decode()
self.assertEqual(response.status_code, 302) # directory, so redirects to /index.htm
self.assertEqual(response.status_code, 302) # directory, so redirects to /index.htm
def test_expoweb_dirslash(self):
response = self.client.get('/handbook/')
response = self.client.get("/handbook/")
content = response.content.decode()
self.assertEqual(response.status_code, 302) # directory, so redirects to /index.htm
self.assertEqual(response.status_code, 302) # directory, so redirects to /index.htm
def test_expoweb_dir_no_index(self):
response = self.client.get('/handbook/troggle')
response = self.client.get("/handbook/troggle")
content = response.content.decode()
self.assertEqual(response.status_code, 404)
ph = r'Page not found handbook/troggle/index.html'
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
ph = r"Page not found handbook/troggle/index.html"
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_expoweb_dir_with_index_htm(self):
response = self.client.get('/years/1999/index.htm')
response = self.client.get("/years/1999/index.htm")
content = response.content.decode()
self.assertEqual(response.status_code, 200) # directory, so redirects to /index.htm
ph = r'Passage descriptions for 1999'
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
self.assertEqual(response.status_code, 200) # directory, so redirects to /index.htm
ph = r"Passage descriptions for 1999"
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_expoweb_dir_with_index_html(self):
response = self.client.get('/years/2015/index.html')
response = self.client.get("/years/2015/index.html")
content = response.content.decode()
self.assertEqual(response.status_code, 200) # directory, so redirects to /index.htm
ph = r'Things left at top camp 2014'
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
self.assertEqual(response.status_code, 200) # directory, so redirects to /index.htm
ph = r"Things left at top camp 2014"
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_expoweb_dir_with_index2(self):
response = self.client.get('/handbook/index.htm')
content = response.content.decode()
self.assertEqual(response.status_code, 200)
ph = r'Introduction to expo'
phmatch = re.search(ph, content)
#print("\n ! - test_expoweb_dir_with_index2\n{}\n{}".format(response.reason_phrase, content))
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
def test_expoweb_htm(self):
response = self.client.get('/handbook/index.htm')
response = self.client.get("/handbook/index.htm")
content = response.content.decode()
self.assertEqual(response.status_code, 200)
ph = r'Introduction to expo'
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
ph = r"Introduction to expo"
phmatch = re.search(ph, content)
# print("\n ! - test_expoweb_dir_with_index2\n{}\n{}".format(response.reason_phrase, content))
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_expoweb_htm(self):
response = self.client.get("/handbook/index.htm")
content = response.content.decode()
self.assertEqual(response.status_code, 200)
ph = r"Introduction to expo"
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_expoweb_notfound(self):
response = self.client.get('/handbook/_test_zyxxypqrqx.html')
response = self.client.get("/handbook/_test_zyxxypqrqx.html")
content = response.content.decode()
self.assertEqual(response.status_code, 404)
ph = r'<h1>Page not found'
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
ph = r"<h1>Page not found"
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_expoweb_no_dir(self):
# slash where there should not be one
response = self.client.get('/handbook/_test_zyxxypqrqx/')
response = self.client.get("/handbook/_test_zyxxypqrqx/")
self.assertEqual(response.status_code, 200)
content = response.content.decode()
ph = r"<h1>Directory not found"
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_expoweb_troggle_default(self):
# default page after logon
response = self.client.get('/troggle')
response = self.client.get("/troggle")
self.assertEqual(response.status_code, 200)
content = response.content.decode()
ph = r'expeditions the club has undertaken'
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
ph = r"expeditions the club has undertaken"
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_expoweb_troggle_default_slash(self):
response = self.client.get('/troggle/')
response = self.client.get("/troggle/")
self.assertEqual(response.status_code, 200)
content = response.content.decode()
ph = r"<h1>Directory not found"
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_expoweb_via_areaid(self):
# the dispatcher takes a detour via the cave renering procedure for this
response = self.client.get('/guidebook/t/via201.jpg')
response = self.client.get("/guidebook/t/via201.jpg")
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.content), 6057)
self.assertEqual(len(response.content), 6057)
def test_cave_kataster_not_found(self):
# database not loaded, so no caves found; so looks for a generic expopage and fails
response = self.client.get('/1623/115.htm')
response = self.client.get("/1623/115.htm")
self.assertEqual(response.status_code, 404)
content = response.content.decode()
ph = r"Page not found 1623/115.htm"
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_caves_page(self):
response = self.client.get('/caves')
response = self.client.get("/caves")
self.assertEqual(response.status_code, 200)
content = response.content.decode()
ph = r"Cave Number Index - kept updated"
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_caves_page_kataster_not_found(self):
response = self.client.get('/caves')
response = self.client.get("/caves")
self.assertEqual(response.status_code, 200)
content = response.content.decode()
ph = r"115"
phmatch = re.search(ph, content)
self.assertIsNone(phmatch, "Failed to find expected text: '" + ph +"'")
phmatch = re.search(ph, content)
self.assertIsNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_page_ss(self):
response = self.client.get('/survey_scans/')
self.assertEqual(response.status_code, 200)
ph = r'All Survey scans folders '
response = self.client.get("/survey_scans/")
self.assertEqual(response.status_code, 200)
ph = r"All Survey scans folders "
content = response.content.decode()
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_page_admin(self):
# see the login page
response = self.client.get('/admin/login/')
response = self.client.get("/admin/login/")
content = response.content.decode()
self.assertEqual(response.status_code, 200)
ph = r'<h1 id="site-name">Troggle database administration</h1>'
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_page_admindocs_exped(self):
# Get redirected to login page
response = self.client.get('/admin/doc/models/core.expedition/')
response = self.client.get("/admin/doc/models/core.expedition/")
content = response.content.decode()
self.assertEqual(response.status_code, 302)
def test_page_expofiles_root_dir(self):
# Root expofiles - odd interaction with url parsing so needs testing
response = self.client.get('/expofiles')
response = self.client.get("/expofiles")
if response.status_code != 200:
self.assertEqual(response.status_code, 302)
self.assertEqual(response.status_code, 302)
if response.status_code != 302:
self.assertEqual(response.status_code, 200)
content = response.content.decode()
for ph in [ r'a href="/expofiles/geotiffsurveys">/geotiffsurveys/',
r'<a href="/expofiles/photos">/photos/',
r'<a href="/expofiles/surveyscans">/surveyscans/' ]:
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
self.assertEqual(response.status_code, 200)
content = response.content.decode()
for ph in [
r'a href="/expofiles/geotiffsurveys">/geotiffsurveys/',
r'<a href="/expofiles/photos">/photos/',
r'<a href="/expofiles/surveyscans">/surveyscans/',
]:
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_page_expofiles_root_slash_dir(self):
# Root expofiles - odd interaction with url parsing so needs testing
response = self.client.get('/expofiles/')
response = self.client.get("/expofiles/")
if response.status_code != 200:
self.assertEqual(response.status_code, 302)
self.assertEqual(response.status_code, 302)
if response.status_code != 302:
self.assertEqual(response.status_code, 200)
content = response.content.decode()
for ph in [ r'a href="/expofiles/geotiffsurveys">/geotiffsurveys/',
r'<a href="/expofiles/photos">/photos/',
r'<a href="/expofiles/surveyscans">/surveyscans/' ]:
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
self.assertEqual(response.status_code, 200)
content = response.content.decode()
for ph in [
r'a href="/expofiles/geotiffsurveys">/geotiffsurveys/',
r'<a href="/expofiles/photos">/photos/',
r'<a href="/expofiles/surveyscans">/surveyscans/',
]:
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_page_expofiles_badness(self):
# should display expofiles directory contents not its parent
response = self.client.get('/expofiles/99badness99')
response = self.client.get("/expofiles/99badness99")
if response.status_code != 200:
self.assertEqual(response.status_code, 302)
self.assertEqual(response.status_code, 302)
if response.status_code != 302:
self.assertEqual(response.status_code, 200)
content = response.content.decode()
for ph in [ r'a href="/expofiles/geotiffsurveys">/geotiffsurveys/',
r'<a href="/expofiles/photos">/photos/',
r'<a href="/expofiles/surveyscans">/surveyscans/' ]:
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
self.assertEqual(response.status_code, 200)
content = response.content.decode()
for ph in [
r'a href="/expofiles/geotiffsurveys">/geotiffsurveys/',
r'<a href="/expofiles/photos">/photos/',
r'<a href="/expofiles/surveyscans">/surveyscans/',
]:
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_page_expofiles_docs_dir(self):
# Flat file tests.
response = self.client.get('/expofiles/documents/')
response = self.client.get("/expofiles/documents/")
if response.status_code != 200:
self.assertEqual(response.status_code, 302)
self.assertEqual(response.status_code, 302)
if response.status_code != 302:
self.assertEqual(response.status_code, 200)
content = response.content.decode()
for ph in [ r'a href="/expofiles/documents/bier-tent-instructions.pdf">bier-tent-instructions.pdf',
r'a href="/expofiles/documents/boc.pdf">boc.pdf',
r'a href="/expofiles/documents/bierbook">/bierbook' ]:
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
self.assertEqual(response.status_code, 200)
content = response.content.decode()
for ph in [
r'a href="/expofiles/documents/bier-tent-instructions.pdf">bier-tent-instructions.pdf',
r'a href="/expofiles/documents/boc.pdf">boc.pdf',
r'a href="/expofiles/documents/bierbook">/bierbook',
]:
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_page_survey_scans_dir(self):
# Flat file tests.
response = self.client.get('/expofiles/surveyscans')
response = self.client.get("/expofiles/surveyscans")
if response.status_code != 200:
self.assertEqual(response.status_code, 302)
self.assertEqual(response.status_code, 302)
if response.status_code != 302:
self.assertEqual(response.status_code, 200)
content = response.content.decode()
for ph in [ r'<a href="/expofiles/surveyscans/2004">/2004/',
r'<a href="/expofiles/surveyscans/1989LUSS">/1989LUSS/',
r'<a href="/expofiles/surveyscans/2018">/2018' ]:
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
self.assertEqual(response.status_code, 200)
content = response.content.decode()
for ph in [
r'<a href="/expofiles/surveyscans/2004">/2004/',
r'<a href="/expofiles/surveyscans/1989LUSS">/1989LUSS/',
r'<a href="/expofiles/surveyscans/2018">/2018',
]:
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_page_folk(self):
# This page is separately generated, so it has the full data content
response = self.client.get('/folk/index.htm')
content = response.content.decode()
self.assertEqual(response.status_code, 200)
for ph in [ r'involves some active contribution',
r'Naomi Griffiths',
r'Gail Smith',
r'Phil Wigglesworth',
r'A more obscure record of longest gap between expos has' ]:
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
response = self.client.get("/folk/index.htm")
content = response.content.decode()
self.assertEqual(response.status_code, 200)
for ph in [
r"involves some active contribution",
r"Naomi Griffiths",
r"Gail Smith",
r"Phil Wigglesworth",
r"A more obscure record of longest gap between expos has",
]:
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_page_expofile_documents(self):
# this gets an empty page as the database has not been loaded
response = self.client.get('/expofiles/documents')
response = self.client.get("/expofiles/documents")
if response.status_code != 200:
self.assertEqual(response.status_code, 302)
self.assertEqual(response.status_code, 302)
if response.status_code != 302:
self.assertEqual(response.status_code, 200)
self.assertEqual(response.status_code, 200)
content = response.content.decode()
ph = r'notice_generale_cordes_courant'
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
ph = r"notice_generale_cordes_courant"
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_page_expofile_documents_slash(self):
# this gets an empty page as the database has not been loaded
response = self.client.get('/expofiles/documents/')
response = self.client.get("/expofiles/documents/")
if response.status_code != 200:
self.assertEqual(response.status_code, 302)
self.assertEqual(response.status_code, 302)
if response.status_code != 302:
self.assertEqual(response.status_code, 200)
self.assertEqual(response.status_code, 200)
content = response.content.decode()
ph = r'notice_generale_cordes_courant'
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
ph = r"notice_generale_cordes_courant"
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_page_expofile_document_loeffler_pdf(self):
# Flat file tests.
response = self.client.get('/expofiles/documents/surveying/tunnel-loefflerCP35-only.pdf')
response = self.client.get("/expofiles/documents/surveying/tunnel-loefflerCP35-only.pdf")
if response.status_code != 200:
self.assertEqual(response.status_code, 302)
self.assertEqual(response.status_code, 302)
if response.status_code != 302:
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.content), 2299270)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.content), 2299270)
def test_page_expofile_document_rope_pdf(self):
# Flat file tests.
response = self.client.get('/expofiles/documents/rope-age-agm-2019.pdf')
response = self.client.get("/expofiles/documents/rope-age-agm-2019.pdf")
if response.status_code != 200:
self.assertEqual(response.status_code, 302)
self.assertEqual(response.status_code, 302)
if response.status_code != 302:
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.content), 76197)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.content), 76197)
def test_page_expofile_document_png(self):
# Flat file tests.
response = self.client.get('/expofiles/documents/callout-2012.png')
response = self.client.get("/expofiles/documents/callout-2012.png")
if response.status_code != 200:
self.assertEqual(response.status_code, 302)
self.assertEqual(response.status_code, 302)
if response.status_code != 302:
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.content), 69921)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.content), 69921)
def test_page_expofile_writeup(self):
# Flat file tests.
response = self.client.get('/expofiles/writeups/1982/logbook1982.pdf')
response = self.client.get("/expofiles/writeups/1982/logbook1982.pdf")
if response.status_code != 200:
self.assertEqual(response.status_code, 302)
self.assertEqual(response.status_code, 302)
if response.status_code != 302:
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.content), 12915413)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.content), 12915413)
def test_page_site_media_ok(self):
# Flat file tests.
response = self.client.get('/site_media/surveyHover.gif')
response = self.client.get("/site_media/surveyHover.gif")
if response.status_code != 200:
self.assertEqual(response.status_code, 302)
self.assertEqual(response.status_code, 302)
if response.status_code != 302:
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.content), 39482 ) # need to check it is not just an error page
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.content), 39482) # need to check it is not just an error page
def test_page_site_media_css(self):
# Flat file tests.
response = self.client.get('/site_media/css/trog3.css')
response = self.client.get("/site_media/css/trog3.css")
if response.status_code != 200:
self.assertEqual(response.status_code, 302)
self.assertEqual(response.status_code, 302)
if response.status_code != 302:
self.assertEqual(response.status_code, 200)
content = response.content.decode() # need to check it is not just an error page
ph = r'This text is used by the test system to determine that trog3.css loaded correctly'
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
self.assertEqual(response.status_code, 200)
content = response.content.decode() # need to check it is not just an error page
ph = r"This text is used by the test system to determine that trog3.css loaded correctly"
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_page_photos_ok(self):
# Flat file tests.
response = self.client.get('/photos/2018/PhilipSargent/corin.jpg') #exists
response = self.client.get("/photos/2018/PhilipSargent/corin.jpg") # exists
if response.status_code != 200:
self.assertEqual(response.status_code, 302)
self.assertEqual(response.status_code, 302)
if response.status_code != 302:
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.content), 67487 ) # need to check it is not just an error page
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.content), 67487) # need to check it is not just an error page
def test_page_photos_not_ok(self):
# Flat file tests.
response = self.client.get('/photos/2018/PhilipSargent/_corin.jpeg') # does not exist
response = self.client.get("/photos/2018/PhilipSargent/_corin.jpeg") # does not exist
self.assertEqual(response.status_code, 404)
content = response.content.decode()
ph = r'<title>Page not found 2018/PhilipSargent/_corin.jpeg</title>'
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
ph = r"<title>Page not found 2018/PhilipSargent/_corin.jpeg</title>"
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_page_photos_dir(self):
# Flat file tests.
response = self.client.get('/photos/2018/PhilipSargent/')
response = self.client.get("/photos/2018/PhilipSargent/")
self.assertEqual(response.status_code, 200)
content = response.content.decode()
ph = r'Directory not displayed'
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
ph = r"Directory not displayed"
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_page_survey_scans_empty(self):
# this gets an empty page as the database has not been loaded
response = self.client.get('/survey_scans/')
response = self.client.get("/survey_scans/")
self.assertEqual(response.status_code, 200)
content = response.content.decode()
ph = r'contains the scanned original in-cave survey notes and sketches'
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
ph = r"contains the scanned original in-cave survey notes and sketches"
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_page_dwgdataraw_empty(self):
# this gets an empty page as the database has not been loaded
response = self.client.get('/dwgdataraw/')
response = self.client.get("/dwgdataraw/")
self.assertEqual(response.status_code, 200)
content = response.content.decode()
ph = r"<h1>Directory not found"
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_page_dwgallfiles_empty(self):
# this gets an empty page as the database has not been loaded
response = self.client.get('/dwgfiles')
response = self.client.get("/dwgfiles")
self.assertEqual(response.status_code, 200)
content = response.content.decode()
for ph in [ r'All Tunnel and Therion files',
r'<th>Wallets</th><th>Scan files in the wallets</th><th>Frames</th></tr>']:
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
for ph in [
r"All Tunnel and Therion files",
r"<th>Wallets</th><th>Scan files in the wallets</th><th>Frames</th></tr>",
]:
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_page_dwgallfiles_empty_slash(self):
# this gets an empty page as the database has not been loaded
response = self.client.get('/dwgfiles/')
response = self.client.get("/dwgfiles/")
self.assertEqual(response.status_code, 200)
content = response.content.decode()
for ph in [ r'All Tunnel and Therion files',
r'<th>Wallets</th><th>Scan files in the wallets</th><th>Frames</th></tr>']:
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
for ph in [
r"All Tunnel and Therion files",
r"<th>Wallets</th><th>Scan files in the wallets</th><th>Frames</th></tr>",
]:
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_page_slash_empty(self):
# tslash where there should not be one
response = self.client.get('/expedition/1979/')
response = self.client.get("/expedition/1979/")
self.assertEqual(response.status_code, 200)
content = response.content.decode()
ph = r"<h1>Directory not found"
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_not_found_survexfile_cave(self):
response = self.client.get('/survexfile/not_a_real_cave_number')
response = self.client.get("/survexfile/not_a_real_cave_number")
self.assertEqual(response.status_code, 200)
content = response.content.decode()
ph = r'Cave Identifier not found in database'
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
ph = r"Cave Identifier not found in database"
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
# ADD TESTS when we are redirecting /expofiles/ to get the actual files using e.g.
# import requests
@ -470,39 +484,35 @@ class PageTests(TestCase):
# these need a fixture to load the datbase before they will pass
# we also need tests for invalid queries to check that error pages are right
# def test_page_survey_scans_khplan2_png(self):
# # this has an error as the database has not been loaded yet in the tests
# response = self.client.get('/survey_scans/smkhs/khplan2.png')
# if response.status_code != 200:
# self.assertEqual(response.status_code, 302)
# if response.status_code != 302:
# self.assertEqual(response.status_code, 200)
# self.assertEqual(len(response.content), 823304) # fails, but is working manually!
# def test_page_dwgdataraw_107sketch_xml(self):
# # this has an error as the database has not been loaded yet in the tests
# response = self.client.get('/dwgdataraw/107/107sketch-v2.xml')
# if response.status_code != 200:
# self.assertEqual(response.status_code, 302)
# if response.status_code != 302:
# self.assertEqual(response.status_code, 200)
# content = response.content.decode()
# for ph in [ r'tunneldate="2014-08-21 11:34:00"',
# r'<sketchsubset subname="Caves of the Loser Plateau"/>',
# r'sfsketch="ollyjen107drawings',
# r'sfsketch="surveyscans/2014/2014#01',
# r'aa-js-plan.png"' ]:
# phmatch = re.search(ph, content)
# self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
# database not loaded yet:
#response = self.client.get('/survey_scans/1991surveybook/page0002.png')
#response = self.client.get('/survey_scans/1991surveybook/')
#content = response.content.decode()
#print(content)
#png93 = re.search(r'/page0093.png">page0093.png</a></td>', content)
# def test_page_survey_scans_khplan2_png(self):
# # this has an error as the database has not been loaded yet in the tests
# response = self.client.get('/survey_scans/smkhs/khplan2.png')
# if response.status_code != 200:
# self.assertEqual(response.status_code, 302)
# if response.status_code != 302:
# self.assertEqual(response.status_code, 200)
# self.assertEqual(len(response.content), 823304) # fails, but is working manually!
# def test_page_dwgdataraw_107sketch_xml(self):
# # this has an error as the database has not been loaded yet in the tests
# response = self.client.get('/dwgdataraw/107/107sketch-v2.xml')
# if response.status_code != 200:
# self.assertEqual(response.status_code, 302)
# if response.status_code != 302:
# self.assertEqual(response.status_code, 200)
# content = response.content.decode()
# for ph in [ r'tunneldate="2014-08-21 11:34:00"',
# r'<sketchsubset subname="Caves of the Loser Plateau"/>',
# r'sfsketch="ollyjen107drawings',
# r'sfsketch="surveyscans/2014/2014#01',
# r'aa-js-plan.png"' ]:
# phmatch = re.search(ph, content)
# self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
# database not loaded yet:
# response = self.client.get('/survey_scans/1991surveybook/page0002.png')
# response = self.client.get('/survey_scans/1991surveybook/')
# content = response.content.decode()
# print(content)
# png93 = re.search(r'/page0093.png">page0093.png</a></td>', content)

View File

@ -12,171 +12,166 @@ from troggle.core.models.troggle import Expedition, Person, PersonExpedition
class FixtureTests(TestCase):
'''These just hit the database.
"""These just hit the database.
They do not exercise the GET and url functions
'''
fixtures = ['auth_users', 'expo_areas', 'expo_caves', 'expo_exped']
ph = r'and leads in 800m of tortuous going to'
"""
fixtures = ["auth_users", "expo_areas", "expo_caves", "expo_exped"]
ph = r"and leads in 800m of tortuous going to"
def setUp(self):
pass
def tearDown(self):
pass
def test_fix_person_loaded(self):
p = Person.objects.get(fullname='Michael Sargent')
p = Person.objects.get(fullname="Michael Sargent")
self.assertEqual(str(p.first_name), "Michael")
def test_fix_person_loaded(self):
pe = PersonExpedition.objects.get(pk='681')
self.assertEqual(str(pe.person.fullname), 'Michael Sargent')
self.assertEqual(str(pe.expedition.year), '2019')
pe = PersonExpedition.objects.get(pk="681")
self.assertEqual(str(pe.person.fullname), "Michael Sargent")
self.assertEqual(str(pe.expedition.year), "2019")
def test_fix_area_loaded(self):
a = Area.objects.get(short_name='1623')
a = Area.objects.get(short_name="1623")
self.assertEqual(str(a.short_name), "1623")
def test_fix_cave_loaded115(self):
c = Cave.objects.get(kataster_number='115')
c = Cave.objects.get(kataster_number="115")
self.assertEqual(str(c.description_file), "1623/115.htm")
self.assertEqual(str(c.url), "1623/115.url") # intentional
self.assertEqual(str(c.url), "1623/115.url") # intentional
self.assertEqual(str(c.filename), "1623-115.html")
# c.area is a 'ManyRelatedManager' object and not iterable
#self.assertEqual(str(c.[0].short_name), "1623")
# self.assertEqual(str(c.[0].short_name), "1623")
ph = self.ph
phmatch = re.search(ph, c.underground_description)
self.assertIsNotNone(phmatch, "In fixture-loaded cave, failed to find expected text: '" + ph +"'")
phmatch = re.search(ph, c.underground_description)
self.assertIsNotNone(phmatch, "In fixture-loaded cave, failed to find expected text: '" + ph + "'")
def test_fix_cave_loaded284(self):
c = Cave.objects.get(kataster_number='284')
c = Cave.objects.get(kataster_number="284")
self.assertEqual(str(c.description_file), "")
self.assertEqual(str(c.url), "1623/284/284.html")
self.assertEqual(str(c.filename), "1623-284.html")
ph = r'at a depth of 72m, there are large round blocks'
phmatch = re.search(ph, c.notes)
self.assertIsNotNone(phmatch, "In fixture-loaded cave, failed to find expected text: '" + ph +"'")
ph = r"at a depth of 72m, there are large round blocks"
phmatch = re.search(ph, c.notes)
self.assertIsNotNone(phmatch, "In fixture-loaded cave, failed to find expected text: '" + ph + "'")
def test_page_personexpedition(self):
response = self.client.get('/personexpedition/MichaelSargent/2019')
content = response.content.decode()
response = self.client.get("/personexpedition/MichaelSargent/2019")
content = response.content.decode()
# with open('testresponse.html','w') as tr:
# tr.writelines(content)
self.assertEqual(response.status_code, 200)
for ph in [ r'Michael Sargent',
r'Table of all trips and surveys aligned by date' ]:
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
# tr.writelines(content)
self.assertEqual(response.status_code, 200)
for ph in [r"Michael Sargent", r"Table of all trips and surveys aligned by date"]:
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
# Need to add a fixture so that this actually has a logbook entry and a trip/svx in it.
class FixturePageTests(TestCase):
'''Currently nothing that runs troggle works - all do 404. Must be something in a template rendering crash?
"""Currently nothing that runs troggle works - all do 404. Must be something in a template rendering crash?
ordinary pages are OK, and expopages and expofiles are OK, even though they come through troggle.
'''
"""
# The fixtures have a password hash which is compatible with plain-text password 'secretword'
fixtures = ['auth_users', 'expo_areas', 'expo_caves', 'expo_exped']
ph = r'and leads in 800m of tortuous going to'
fixtures = ["auth_users", "expo_areas", "expo_caves", "expo_exped"]
ph = r"and leads in 800m of tortuous going to"
@classmethod
def setUpTestData(cls):
pass
def setUp(self):
from django.contrib.auth.models import User
self.user = User.objects.get(username='expotest')
# Every test needs a client.
self.user = User.objects.get(username="expotest")
# Every test needs a client.
self.client = Client()
def tearDown(self):
pass
def test_fix_expedition(self):
response = self.client.get('/expedition/2019')
self.assertEqual(response.status_code, 200)
ph = r'Michael Sargent'
content = response.content.decode()
phmatch = re.search(ph, content)
# with open('exped-op.html', 'w') as f:
# f.write(content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
response = self.client.get("/expedition/2019")
self.assertEqual(response.status_code, 200)
ph = r"Michael Sargent"
content = response.content.decode()
phmatch = re.search(ph, content)
# with open('exped-op.html', 'w') as f:
# f.write(content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_fix_personexped(self):
response = self.client.get('/personexpedition/MichaelSargent/2019')
self.assertEqual(response.status_code, 200)
ph = r'Table of all trips and surveys aligned by date'
response = self.client.get("/personexpedition/MichaelSargent/2019")
self.assertEqual(response.status_code, 200)
ph = r"Table of all trips and surveys aligned by date"
content = response.content.decode()
phmatch = re.search(ph, content)
# with open('persexped-op.html', 'w') as f:
# f.write(content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
phmatch = re.search(ph, content)
# with open('persexped-op.html', 'w') as f:
# f.write(content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_fix_person(self):
response = self.client.get('/person/MichaelSargent')
self.assertEqual(response.status_code, 200)
ph = r'second-generation expo caver '
content = response.content.decode()
phmatch = re.search(ph, content)
# with open('person-op.html', 'w') as f:
# f.write(content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
response = self.client.get("/person/MichaelSargent")
self.assertEqual(response.status_code, 200)
ph = r"second-generation expo caver "
content = response.content.decode()
phmatch = re.search(ph, content)
# with open('person-op.html', 'w') as f:
# f.write(content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_fix_cave_url115(self):
ph = self.ph
response = self.client.get('/1623/115.url') # yes this is intentional, see the inserted data above & fixture
self.assertEqual(response.status_code, 200)
content = response.content.decode()
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
response = self.client.get("/1623/115.url") # yes this is intentional, see the inserted data above & fixture
self.assertEqual(response.status_code, 200)
def test_fix_cave_url284(self):
response = self.client.get('/1623/284/284.html')
self.assertEqual(response.status_code, 200)
ph = r'at a depth of 72m, there are large round blocks'
content = response.content.decode()
phmatch = re.search(ph, content)
# with open('cave-op.html', 'w') as f:
# f.write(content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_fix_cave_url284(self):
response = self.client.get("/1623/284/284.html")
self.assertEqual(response.status_code, 200)
ph = r"at a depth of 72m, there are large round blocks"
content = response.content.decode()
phmatch = re.search(ph, content)
# with open('cave-op.html', 'w') as f:
# f.write(content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
def test_fix_cave_bare_url115(self):
'''Expect to get Page Not Found and status 404'''
"""Expect to get Page Not Found and status 404"""
ph = self.ph
ph = 'Probably a mistake.'
response = self.client.get('/1623/115')
self.assertEqual(response.status_code, 404)
content = response.content.decode()
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'") # 200 & Page Not Found
ph = "Probably a mistake."
response = self.client.get("/1623/115")
self.assertEqual(response.status_code, 404)
content = response.content.decode()
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'") # 200 & Page Not Found
def test_fix_cave_slug115(self):
'''Expect to get Page Not Found and status 404'''
"""Expect to get Page Not Found and status 404"""
ph = self.ph
ph = 'Probably a mistake.'
response = self.client.get('/1623-115')
self.assertEqual(response.status_code, 404)
ph = "Probably a mistake."
response = self.client.get("/1623-115")
self.assertEqual(response.status_code, 404)
content = response.content.decode()
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'") # 200 & Page Not Found
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'") # 200 & Page Not Found

View File

@ -18,377 +18,410 @@ from troggle.core.models.survex import Wallet
from troggle.core.models.troggle import Expedition
class DataTests(TestCase ):
'''These check that the NULL and NON-UNIQUE constraints are working in the database '''
class DataTests(TestCase):
"""These check that the NULL and NON-UNIQUE constraints are working in the database"""
@classmethod
def setUpTestData(cls):
pass
def setUp(self):
from django.contrib.auth.models import User
u = User()
u.pk = 9000
u.user_id = 8000
u.username, u.password ='stinker', 'secretword'
u.email='philip.sargent+SP@gmail.com'
u.first_name, u.last_name ='Stinker', 'Pinker'
u.user_id = 8000
u.username, u.password = "stinker", "secretword"
u.email = "philip.sargent+SP@gmail.com"
u.first_name, u.last_name = "Stinker", "Pinker"
u.save()
self.user = u
def tearDown(self):
#self.member.delete() # must delete member before user
#self.user.delete() # horrible crash, why?
# self.member.delete() # must delete member before user
# self.user.delete() # horrible crash, why?
pass
class FixturePageTests(TestCase):
# The fixtures have a password hash which is compatible with plain-text password 'secretword'
fixtures = ['auth_users']
fixtures = ["auth_users"]
def setUp(self):
from django.contrib.auth.models import User
self.user = User.objects.get(username='expotest')
self.user = User.objects.get(username="expotest")
def tearDown(self):
pass
def test_fix_admin_login_fail(self):
c = self.client
from django.contrib.auth.models import User
u = User.objects.get(username='expotest')
self.assertTrue(u.is_active, 'User \'' + u.username + '\' is INACTIVE')
logged_in = c.login(username=u.username, password='secretword') # fails to work if password=u.password !
self.assertTrue(logged_in, 'FAILED to login as \'' + u.username + '\'')
response = c.get('/admin/')
u = User.objects.get(username="expotest")
self.assertTrue(u.is_active, "User '" + u.username + "' is INACTIVE")
logged_in = c.login(username=u.username, password="secretword") # fails to work if password=u.password !
self.assertTrue(logged_in, "FAILED to login as '" + u.username + "'")
response = c.get("/admin/")
content = response.content.decode()
# with open('admin-op.html', 'w') as f:
# f.write(content)
t = re.search(r'Troggle administration', content)
self.assertIsNone(t, 'Logged in as \'' + u.username + '\' (not staff) but still managed to get the Admin page' )
# with open('admin-op.html', 'w') as f:
# f.write(content)
t = re.search(r"Troggle administration", content)
self.assertIsNone(t, "Logged in as '" + u.username + "' (not staff) but still managed to get the Admin page")
class PostTests(TestCase):
'''Tests scanupload form
'''
fixtures = ['auth_users']
"""Tests scanupload form"""
fixtures = ["auth_users"]
@classmethod
def setUpTestData(cls):
pass
def setUp(self):
from django.contrib.auth.models import User
self.user = User.objects.get(username='expotest')
self.user = User.objects.get(username="expotest")
self.client = Client()
testyear = '2022'
wname = f'{testyear}:00'
testyear = "2022"
wname = f"{testyear}:00"
self.testyear = testyear
w = Wallet()
w.pk = 9100
w.fpath = str(pathlib.Path(settings.SCANS_ROOT, wname))
w.walletname = wname
w.walletname = wname
w.save()
self.wallet = w
e = Expedition()
e.year = testyear
e.save()
self.expedition = e
def test_scan_upload(self):
'''Expect scan upload to wallet to work on any file
"""Expect scan upload to wallet to work on any file
Need to login first.
This upload form looks for the Cave and the Wallet, so the test fails if the database is not loaded with the cave
identified in the wallet
'''
"""
c = self.client
w = self.wallet
from django.contrib.auth.models import User
u = User.objects.get(username='expotest')
testyear = self.testyear
self.assertTrue(u.is_active, 'User \'' + u.username + '\' is INACTIVE')
logged_in = c.login(username=u.username, password='secretword')
with open('core/fixtures/test_upload_file.txt','r') as testf:
response = self.client.post(f'/scanupload/{testyear}:00', data={'name': 'test_upload_file.txt', 'uploadfiles': testf })
u = User.objects.get(username="expotest")
testyear = self.testyear
self.assertTrue(u.is_active, "User '" + u.username + "' is INACTIVE")
logged_in = c.login(username=u.username, password="secretword")
with open("core/fixtures/test_upload_file.txt", "r") as testf:
response = self.client.post(
f"/scanupload/{testyear}:00", data={"name": "test_upload_file.txt", "uploadfiles": testf}
)
content = response.content.decode()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.status_code, HTTPStatus.OK)
with open('_test_response.html', 'w') as f:
f.write(content)
for ph in [ r'test_upload_',
rf'&larr; {testyear}#00 &rarr;',
r'description written',
r'Plan not required',
r'edit settings or upload a file']:
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
with open("_test_response.html", "w") as f:
f.write(content)
for ph in [
r"test_upload_",
rf"&larr; {testyear}#00 &rarr;",
r"description written",
r"Plan not required",
r"edit settings or upload a file",
]:
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
# # Does not use the filename Django actually uses, assumes it is unchanged. Potential bug.
# remove_file = pathlib.Path(settings.SCANS_ROOT) / f'{testyear}' / f'{testyear}#00'/ 'test_upload_file.txt'
# remove_file.unlink()
# # Undo the auto create and commit of a new wallet
# cwd = settings.DRAWINGS_DATA
# cwd = settings.DRAWINGS_DATA
# sp = subprocess.run([settings.GIT, "reset", "--hard", "master^"], cwd=cwd, capture_output=True, text=True)
# print(f'git output: {cwd}:\n # {sp.stderr=}\n # {sp.stdout=} \n # return code: {str(sp.returncode)}')
# if sp.returncode != 0:
# print(f'git output: {cwd}:\n # {sp.stderr=}\n # {sp.stdout=} \n # return code: {str(sp.returncode)}')
# print(f'git output: {cwd}:\n # {sp.stderr=}\n # {sp.stdout=} \n # return code: {str(sp.returncode)}')
def test_photo_upload(self):
'''Expect photo upload to work on any file (contrary to msg on screen)
"""Expect photo upload to work on any file (contrary to msg on screen)
Upload into current default year. settings.PHOTOS_YEAR
Deletes file afterwards
Need to login first.
'''
"""
c = self.client
from django.contrib.auth.models import User
u = User.objects.get(username='expotest')
self.assertTrue(u.is_active, 'User \'' + u.username + '\' is INACTIVE')
logged_in = c.login(username=u.username, password='secretword')
with open('core/fixtures/test_upload_file.txt','r') as testf:
response = self.client.post('/photoupload/', data={'name': 'test_upload_file.txt', 'renameto': '', 'uploadfiles': testf })
u = User.objects.get(username="expotest")
self.assertTrue(u.is_active, "User '" + u.username + "' is INACTIVE")
logged_in = c.login(username=u.username, password="secretword")
with open("core/fixtures/test_upload_file.txt", "r") as testf:
response = self.client.post(
"/photoupload/", data={"name": "test_upload_file.txt", "renameto": "", "uploadfiles": testf}
)
content = response.content.decode()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.status_code, HTTPStatus.OK)
# with open('_test_response.html', 'w') as f:
# f.write(content)
for ph in [ r'test_upload_',
r'Upload photos into /photos/'+str(settings.PHOTOS_YEAR),
r' you can create a new folder in your name',
r'Create new Photographer folder',
r'only photo image files are accepted']:
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
# with open('_test_response.html', 'w') as f:
# f.write(content)
for ph in [
r"test_upload_",
r"Upload photos into /photos/" + str(settings.PHOTOS_YEAR),
r" you can create a new folder in your name",
r"Create new Photographer folder",
r"only photo image files are accepted",
]:
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
# Does not use the filename Django actually uses, assumes it is unchanged. Potential bug.
remove_file = pathlib.Path(settings.PHOTOS_ROOT, settings.PHOTOS_YEAR) / 'test_upload_file.txt'
remove_file = pathlib.Path(settings.PHOTOS_ROOT, settings.PHOTOS_YEAR) / "test_upload_file.txt"
remove_file.unlink()
def test_photo_upload_rename(self):
'''Expect photo upload to work on any file (contrary to msg on screen)
"""Expect photo upload to work on any file (contrary to msg on screen)
Upload into current default year. settings.PHOTOS_YEAR
Deletes file afterwards
Need to login first.
'''
"""
c = self.client
from django.contrib.auth.models import User
u = User.objects.get(username='expotest')
self.assertTrue(u.is_active, 'User \'' + u.username + '\' is INACTIVE')
logged_in = c.login(username=u.username, password='secretword')
rename = 'RENAMED-FILE.JPG'
with open('core/fixtures/test_upload_file.txt','r') as testf:
response = self.client.post('/photoupload/', data={'name': 'test_upload_file.txt', 'renameto': rename, 'uploadfiles': testf })
u = User.objects.get(username="expotest")
self.assertTrue(u.is_active, "User '" + u.username + "' is INACTIVE")
logged_in = c.login(username=u.username, password="secretword")
rename = "RENAMED-FILE.JPG"
with open("core/fixtures/test_upload_file.txt", "r") as testf:
response = self.client.post(
"/photoupload/", data={"name": "test_upload_file.txt", "renameto": rename, "uploadfiles": testf}
)
content = response.content.decode()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.status_code, HTTPStatus.OK)
# with open('_test_response.html', 'w') as f:
# f.write(content)
# with open('_test_response.html', 'w') as f:
# f.write(content)
for ph in [rename]:
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
# Does not use the filename Django actually uses, assumes it is unchanged. Potential bug.
remove_file = pathlib.Path(settings.PHOTOS_ROOT, settings.PHOTOS_YEAR) / rename
remove_file.unlink()
def test_photo_folder_create(self):
'''Create folder for new user
"""Create folder for new user
Create in current default year. settings.PHOTOS_YEAR
Deletes folder afterwards
Need to login first.
'''
"""
c = self.client
from django.contrib.auth.models import User
u = User.objects.get(username='expotest')
self.assertTrue(u.is_active, 'User \'' + u.username + '\' is INACTIVE')
logged_in = c.login(username=u.username, password='secretword')
response = self.client.post('/photoupload/', data={'photographer': 'GussieFinkNottle'})
u = User.objects.get(username="expotest")
self.assertTrue(u.is_active, "User '" + u.username + "' is INACTIVE")
logged_in = c.login(username=u.username, password="secretword")
response = self.client.post("/photoupload/", data={"photographer": "GussieFinkNottle"})
content = response.content.decode()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.status_code, HTTPStatus.OK)
# with open('_test_response.html', 'w') as f:
# f.write(content)
for ph in [r'/GussieFinkNottle/',
r'Create new Photographer folder']:
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph +"'")
# with open('_test_response.html', 'w') as f:
# f.write(content)
for ph in [r"/GussieFinkNottle/", r"Create new Photographer folder"]:
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Failed to find expected text: '" + ph + "'")
# Does not use the filename Django actually uses, assumes it is unchanged. Potential bug.
remove_dir = pathlib.Path(settings.PHOTOS_ROOT, settings.PHOTOS_YEAR) / 'GussieFinkNottle'
remove_dir = pathlib.Path(settings.PHOTOS_ROOT, settings.PHOTOS_YEAR) / "GussieFinkNottle"
remove_dir.rmdir()
def test_dwg_upload_txt(self):
'''Expect .pdf file to be refused upload
"""Expect .pdf file to be refused upload
Need to login first.
'''
"""
c = self.client
from django.contrib.auth.models import User
u = User.objects.get(username='expotest')
self.assertTrue(u.is_active, 'User \'' + u.username + '\' is INACTIVE')
logged_in = c.login(username=u.username, password='secretword')
with open('core/fixtures/test_upload_file.pdf','r') as testf:
response = self.client.post('/dwgupload/uploads', data={'name': 'test_upload_file.txt', 'uploadfiles': testf })
u = User.objects.get(username="expotest")
self.assertTrue(u.is_active, "User '" + u.username + "' is INACTIVE")
logged_in = c.login(username=u.username, password="secretword")
with open("core/fixtures/test_upload_file.pdf", "r") as testf:
response = self.client.post(
"/dwgupload/uploads", data={"name": "test_upload_file.txt", "uploadfiles": testf}
)
content = response.content.decode()
self.assertEqual(response.status_code, 200)
t = re.search('Files refused:', content)
self.assertIsNotNone(t, 'Logged in but failed to see "Files refused:"' )
t = re.search("Files refused:", content)
self.assertIsNotNone(t, 'Logged in but failed to see "Files refused:"')
def test_dwg_upload_drawing(self):
'''Expect no-suffix file to upload
"""Expect no-suffix file to upload
Note that this skips the git commit process. That would need a new test.
Need to login first.
'''
"""
c = self.client
from django.contrib.auth.models import User
u = User.objects.get(username='expotest')
self.assertTrue(u.is_active, 'User \'' + u.username + '\' is INACTIVE')
logged_in = c.login(username=u.username, password='secretword')
with open('core/fixtures/test_upload_nosuffix','r') as testf:
response = self.client.post('/dwguploadnogit/uploads', data={'name': 'test_upload_nosuffix', 'uploadfiles': testf })
u = User.objects.get(username="expotest")
self.assertTrue(u.is_active, "User '" + u.username + "' is INACTIVE")
logged_in = c.login(username=u.username, password="secretword")
with open("core/fixtures/test_upload_nosuffix", "r") as testf:
response = self.client.post(
"/dwguploadnogit/uploads", data={"name": "test_upload_nosuffix", "uploadfiles": testf}
)
content = response.content.decode()
# with open('_test_response.html', 'w') as f:
# f.write(content)
# with open('_test_response.html', 'w') as f:
# f.write(content)
self.assertEqual(response.status_code, 200)
for ph in [ r'test_upload_nosuffix',
r'You cannot create folders here',
r'Creating a folder is done by a nerd']:
phmatch = re.search(ph, content)
self.assertIsNotNone(phmatch, "Expect no-suffix file to upload OK. Failed to find expected text: '" + ph +"'")
for ph in [
r"test_upload_nosuffix",
r"You cannot create folders here",
r"Creating a folder is done by a nerd",
]:
phmatch = re.search(ph, content)
self.assertIsNotNone(
phmatch, "Expect no-suffix file to upload OK. Failed to find expected text: '" + ph + "'"
)
# Does not use the filename Django actually uses, assumes it is unchanged. Bug: accumulates one file with random name added each time it is run.
remove_file = pathlib.Path(settings.DRAWINGS_DATA) / 'uploads' / 'test_upload_nosuffix'
remove_file = pathlib.Path(settings.DRAWINGS_DATA) / "uploads" / "test_upload_nosuffix"
remove_file.unlink()
class ComplexLoginTests(TestCase):
'''These test the login and capabilities of logged-in users, they do not use fixtures'''
"""These test the login and capabilities of logged-in users, they do not use fixtures"""
def setUp(self):
'''setUp runs once for each test in this class'''
"""setUp runs once for each test in this class"""
from django.contrib.auth.models import User
u = User()
u.pk = 9000
u.user_id = 8000
u.username, u.password ='expotest', 'secretword'
u.email='philip.sargent+ET@gmail.com'
u.first_name, u.last_name ='ExpoTest', 'Caver'
u.user_id = 8000
u.username, u.password = "expotest", "secretword"
u.email = "philip.sargent+ET@gmail.com"
u.first_name, u.last_name = "ExpoTest", "Caver"
u.is_staff = True
u.is_superuser = True
u.set_password(u.password) # This creates a new salt and thus a new key for EACH test
u.save() # vital that we save all this before attempting login
#print ('\n',u.password)
u.set_password(u.password) # This creates a new salt and thus a new key for EACH test
u.save() # vital that we save all this before attempting login
# print ('\n',u.password)
self.user = u
def tearDown(self):
self.client.logout() # not needed as each test creates a new self.client
#self.member.delete()
self.client.logout() # not needed as each test creates a new self.client
# self.member.delete()
##self.user.delete() # id attribute set to None !
pass
# def test_login_redirect_for_non_logged_on_user(self): # need to fix this in real system
# c = self.client
# # Need to login first. Tests that we are redirected to login page if not logged in
# response = c.get('noinfo/cave-number-index')
# self.assertRedirects(response, "/login/?next=/committee/appointments/")
# c = self.client
# # Need to login first. Tests that we are redirected to login page if not logged in
# response = c.get('noinfo/cave-number-index')
# self.assertRedirects(response, "/login/?next=/committee/appointments/")
def test_ordinary_login(self):
c = self.client
u = self.user
self.assertTrue(u.is_active, 'User \'' + u.username + '\' is INACTIVE')
logged_in = c.login(username=u.username, password='secretword') # fails to work if password=u.password !
self.assertTrue(logged_in, 'FAILED to login as \'' + u.username + '\'')
response = c.get('/accounts/login/') # defined by auth system
self.assertTrue(u.is_active, "User '" + u.username + "' is INACTIVE")
logged_in = c.login(username=u.username, password="secretword") # fails to work if password=u.password !
self.assertTrue(logged_in, "FAILED to login as '" + u.username + "'")
response = c.get("/accounts/login/") # defined by auth system
content = response.content.decode()
t = re.search(r'You are now logged in', content)
self.assertIsNotNone(t, 'Logged in as \'' + u.username + '\' but failed to get \'Now you can\' greeting' )
t = re.search(r"You are now logged in", content)
self.assertIsNotNone(t, "Logged in as '" + u.username + "' but failed to get 'Now you can' greeting")
def test_authentication_login(self):
c = self.client
u = self.user
self.assertTrue(u.is_active, 'User \'' + u.username + '\' is INACTIVE')
# This is weird. I thought that the user had to login before she was in the authenticated state
self.assertTrue(u.is_authenticated, 'User \'' + u.username + '\' is NOT AUTHENTICATED before login')
logged_in = c.login(username=u.username, password='secretword') # fails to work if password=u.password !
self.assertTrue(logged_in, 'FAILED to login as \'' + u.username + '\'')
self.assertTrue(u.is_authenticated, 'User \'' + u.username + '\' is NOT AUTHENTICATED after login')
self.assertTrue(u.is_active, "User '" + u.username + "' is INACTIVE")
# This is weird. I thought that the user had to login before she was in the authenticated state
self.assertTrue(u.is_authenticated, "User '" + u.username + "' is NOT AUTHENTICATED before login")
logged_in = c.login(username=u.username, password="secretword") # fails to work if password=u.password !
self.assertTrue(logged_in, "FAILED to login as '" + u.username + "'")
self.assertTrue(u.is_authenticated, "User '" + u.username + "' is NOT AUTHENTICATED after login")
# c.logout() # This next test always means user is still authenticated after logout. Surely not?
# self.assertFalse(u.is_authenticated, 'User \'' + u.username + '\' is STILL AUTHENTICATED after logout')
def test_admin_login(self):
c = self.client
u = self.user
logged_in = c.login(username=u.username, password='secretword') # fails to work if password=u.password !
self.assertTrue(logged_in, 'FAILED to login as \'' + u.username + '\'')
logged_in = c.login(username=u.username, password="secretword") # fails to work if password=u.password !
self.assertTrue(logged_in, "FAILED to login as '" + u.username + "'")
response = c.get('/admin/')
response = c.get("/admin/")
content = response.content.decode()
# with open('admin-op.html', 'w') as f:
# f.write(content)
t = re.search(r'Troggle database administration', content)
self.assertIsNotNone(t, 'Logged in as \'' + u.username + '\' but failed to get the Troggle Admin page' )
# with open('admin-op.html', 'w') as f:
# f.write(content)
t = re.search(r"Troggle database administration", content)
self.assertIsNotNone(t, "Logged in as '" + u.username + "' but failed to get the Troggle Admin page")
def test_noinfo_login(self):
from django.contrib.auth.models import User
c = self.client # inherited from TestCase
u = self.user
logged_in = c.login(username=u.username, password='secretword') # fails if password=u.password !
self.assertTrue(logged_in, 'FAILED to login as \'' + u.username + '\'')
response = c.get('/stats') # a page with the Troggle menus
content = response.content.decode()
t = re.search(r'User\:expotest', content)
self.assertIsNotNone(t, 'Logged in as \'' + u.username + '\' but failed to get \'User:expotest\' heading' )
c = self.client # inherited from TestCase
u = self.user
response = c.get('/noinfo/cave-number-index')
logged_in = c.login(username=u.username, password="secretword") # fails if password=u.password !
self.assertTrue(logged_in, "FAILED to login as '" + u.username + "'")
response = c.get("/stats") # a page with the Troggle menus
content = response.content.decode()
t = re.search(r'2001-07 Hoffnungschacht', content)
self.assertIsNotNone(t, 'Logged in as \'' + u.username + '\' but failed to get /noinfo/ content')
t = re.search(r"User\:expotest", content)
self.assertIsNotNone(t, "Logged in as '" + u.username + "' but failed to get 'User:expotest' heading")
response = c.get("/noinfo/cave-number-index")
content = response.content.decode()
t = re.search(r"2001-07 Hoffnungschacht", content)
self.assertIsNotNone(t, "Logged in as '" + u.username + "' but failed to get /noinfo/ content")
def test_user_force(self):
from django.conf import settings
from django.conf import settings
c = self.client
u = self.user
try:
c.force_login(u)
except:
self.assertIsNotNone(None, 'Unexpected exception trying to force_login as \'' + u.username + '\' but failed (Bad Django documentation?)')
self.assertIsNotNone(
None,
"Unexpected exception trying to force_login as '"
+ u.username
+ "' but failed (Bad Django documentation?)",
)
response = c.get('/stats') # a page with the Troggle menus
response = c.get("/stats") # a page with the Troggle menus
content = response.content.decode()
t = re.search(r'Log out', content)
self.assertIsNotNone(t, 'Forced logged in as \'' + u.username + '\' but failed to get Log out heading' )
t = re.search(r"Log out", content)
self.assertIsNotNone(t, "Forced logged in as '" + u.username + "' but failed to get Log out heading")
response = c.get('/accounts/login/')
response = c.get("/accounts/login/")
content = response.content.decode()
t = re.search(r'You are now logged in', content)
self.assertIsNotNone(t, 'Forced logged in as \'' + u.username + '\' but failed to get /accounts/profile/ content')
t = re.search(r"You are now logged in", content)
self.assertIsNotNone(t, "Forced logged in as '" + u.username + "' but failed to get /accounts/profile/ content")

View File

@ -4,37 +4,41 @@ from django.core import serializers
from django.forms import ModelForm
from django.http import HttpResponse
from troggle.core.models.caves import (Area, Cave, CaveAndEntrance,
Entrance)
from troggle.core.models.logbooks import (QM, LogbookEntry, PersonLogEntry)
from troggle.core.models.survex import (DrawingFile, SingleScan, SurvexBlock,
SurvexDirectory, SurvexFile,
SurvexPersonRole, SurvexStation)
from troggle.core.models.caves import Area, Cave, CaveAndEntrance, Entrance
from troggle.core.models.logbooks import QM, LogbookEntry, PersonLogEntry
from troggle.core.models.survex import (
DrawingFile,
SingleScan,
SurvexBlock,
SurvexDirectory,
SurvexFile,
SurvexPersonRole,
SurvexStation,
)
from troggle.core.models.wallets import Wallet
from troggle.core.models.troggle import (DataIssue, Expedition, Person,
PersonExpedition)
from troggle.core.models.troggle import DataIssue, Expedition, Person, PersonExpedition
from troggle.core.views.other import exportlogbook
'''This code significantly adds to the capabilities of the Django Management control panel for Troggle data.
"""This code significantly adds to the capabilities of the Django Management control panel for Troggle data.
In particular, it enables JSON export of any data with 'export_as_json'
and configures the search fields to be used within the control panel.
What is the search path for the css and js inclusions in the Media subclasses though ?!
The page looks for /static/jquery/jquery.min.js
'''
"""
class TroggleModelAdmin(admin.ModelAdmin):
def save_model(self, request, obj, form, change):
"""overriding admin save to fill the new_since parsing_field
new_since_parsing is not currently used in troggle. It is a fossil."""
obj.new_since_parsing=True
obj.new_since_parsing = True
obj.save()
class Media:
js = ('jquery/jquery.min.js','js/QM_helper.js') # not currently available to troggle, see media/js/README
js = ("jquery/jquery.min.js", "js/QM_helper.js") # not currently available to troggle, see media/js/README
class RoleInline(admin.TabularInline):
@ -47,35 +51,35 @@ class SurvexBlockAdmin(TroggleModelAdmin):
class QMsFoundInline(admin.TabularInline):
model=QM
fk_name='found_by'
fields=('number','grade','location_description','comment')#need to add foreignkey to cave part
extra=1
model = QM
fk_name = "found_by"
fields = ("number", "grade", "location_description", "comment") # need to add foreignkey to cave part
extra = 1
class PersonLogEntryInline(admin.TabularInline):
model = PersonLogEntry
raw_id_fields = ('personexpedition',)
raw_id_fields = ("personexpedition",)
extra = 1
class LogbookEntryAdmin(TroggleModelAdmin):
prepopulated_fields = {'slug':("title",)}
search_fields = ('title','expedition__year')
date_heirarchy = ('date')
prepopulated_fields = {"slug": ("title",)}
search_fields = ("title", "expedition__year")
date_heirarchy = "date"
inlines = (PersonLogEntryInline, QMsFoundInline)
class Media:
css = {
"all": ("css/troggleadmin.css",) # this does not exist
}
actions=('export_logbook_entries_as_html','export_logbook_entries_as_txt')
css = {"all": ("css/troggleadmin.css",)} # this does not exist
actions = ("export_logbook_entries_as_html", "export_logbook_entries_as_txt")
def export_logbook_entries_as_html(self, modeladmin, request, queryset):
response=downloadLogbook(request=request, queryset=queryset, extension='html') #fails, no queryset
response = downloadLogbook(request=request, queryset=queryset, extension="html") # fails, no queryset
return response
def export_logbook_entries_as_txt(self, modeladmin, request, queryset):
response=downloadLogbook(request=request, queryset=queryset, extension='txt') #fails, no queryset
response = downloadLogbook(request=request, queryset=queryset, extension="txt") # fails, no queryset
return response
@ -85,45 +89,53 @@ class PersonExpeditionInline(admin.TabularInline):
class PersonAdmin(TroggleModelAdmin):
search_fields = ('first_name','last_name')
search_fields = ("first_name", "last_name")
inlines = (PersonExpeditionInline,)
class QMAdmin(TroggleModelAdmin):
search_fields = ('found_by__cave__kataster_number','number','found_by__date')
list_display = ('__str__','grade','found_by','ticked_off_by')
list_display_links = ('__str__',)
list_editable = ('found_by','ticked_off_by','grade')
search_fields = ("found_by__cave__kataster_number", "number", "found_by__date")
list_display = ("__str__", "grade", "found_by", "ticked_off_by")
list_display_links = ("__str__",)
list_editable = ("found_by", "ticked_off_by", "grade")
list_per_page = 20
raw_id_fields=('found_by','ticked_off_by')
raw_id_fields = ("found_by", "ticked_off_by")
class PersonExpeditionAdmin(TroggleModelAdmin):
search_fields = ('person__first_name','expedition__year')
search_fields = ("person__first_name", "expedition__year")
class CaveAdmin(TroggleModelAdmin):
search_fields = ('official_name','kataster_number','unofficial_number')
search_fields = ("official_name", "kataster_number", "unofficial_number")
extra = 4
class EntranceAdmin(TroggleModelAdmin):
search_fields = ('caveandentrance__cave__kataster_number',)
search_fields = ("caveandentrance__cave__kataster_number",)
class SurvexStationAdmin(TroggleModelAdmin):
search_fields = ('name',)
search_fields = ("name",)
class SurvexFileAdmin(TroggleModelAdmin):
search_fields = ('path',)
search_fields = ("path",)
class SurvexDirectoryAdmin(TroggleModelAdmin):
search_fields = ('path', 'survexdirectory',)
search_fields = (
"path",
"survexdirectory",
)
class DrawingFileAdmin(TroggleModelAdmin):
search_fields = ('dwgname',)
search_fields = ("dwgname",)
class WalletAdmin(TroggleModelAdmin):
search_fields = ('fpath',)
search_fields = ("fpath",)
admin.site.register(Cave, CaveAdmin)
@ -133,28 +145,29 @@ admin.site.register(Entrance, EntranceAdmin)
admin.site.register(SurvexBlock, SurvexBlockAdmin)
admin.site.register(DrawingFile, DrawingFileAdmin)
admin.site.register(Expedition)
admin.site.register(Person,PersonAdmin)
admin.site.register(Person, PersonAdmin)
admin.site.register(SurvexPersonRole)
admin.site.register(SurvexDirectory, SurvexDirectoryAdmin)
admin.site.register(SurvexFile, SurvexFileAdmin)
admin.site.register(SurvexStation, SurvexStationAdmin)
admin.site.register(PersonExpedition,PersonExpeditionAdmin)
admin.site.register(PersonExpedition, PersonExpeditionAdmin)
admin.site.register(LogbookEntry, LogbookEntryAdmin)
admin.site.register(QM, QMAdmin)
admin.site.register(Wallet, WalletAdmin)
admin.site.register(SingleScan)
admin.site.register(DataIssue)
def export_as_json(modeladmin, request, queryset):
response = HttpResponse(content_type="text/json")
response['Content-Disposition'] = 'attachment; filename=troggle_output.json'
response["Content-Disposition"] = "attachment; filename=troggle_output.json"
serializers.serialize("json", queryset, stream=response)
return response
def export_as_xml(modeladmin, request, queryset):
response = HttpResponse(content_type="text/xml")
response['Content-Disposition'] = 'attachment; filename=troggle_output.xml'
response["Content-Disposition"] = "attachment; filename=troggle_output.xml"
serializers.serialize("xml", queryset, stream=response)
return response

View File

@ -2,7 +2,7 @@ from django.conf import settings
from troggle.core.models.troggle import Expedition
'''This is the only troggle-specific 'context processor' that troggle uses
"""This is the only troggle-specific 'context processor' that troggle uses
in the processing of Django templates
This seems to mean that every page produced has bundled in its context the complete 'settings' and
@ -11,8 +11,9 @@ https://betterprogramming.pub/django-quick-tips-context-processors-da74f887f1fc
If it is commented out, the logbookentry page goes crazy and the screws up all the site_media resultions for CSS file s!
Seems to be necessary to make {{settings.MEDIA_URL}} work. Which is obvious in retrospect.
'''
"""
def troggle_context(request):
return { 'settings':settings}
return { 'settings':settings, 'Expedition':Expedition }
return {"settings": settings}
return {"settings": settings, "Expedition": Expedition}

View File

@ -11,100 +11,151 @@ from troggle.core.models.logbooks import QM, LogbookEntry
from troggle.core.models.troggle import Expedition, Person, PersonExpedition
from troggle.core.views.editor_helpers import HTMLarea
#from tinymce.widgets import TinyMCE
# from tinymce.widgets import TinyMCE
'''These are all the class-based Forms used by troggle.
"""These are all the class-based Forms used by troggle.
There are other, simpler, upload forms in view/uploads.py
Some are not used and need renovating or destroying.
'''
"""
todo = """
"""
todo = '''
'''
class CaveForm(ModelForm):
'''Only those fields for which we want to override defaults are listed here
"""Only those fields for which we want to override defaults are listed here
the other fields are present on the form, but use the default presentation style
'''
official_name = forms.CharField(required = False, widget=forms.TextInput(attrs={'size': '45'}))
underground_description = forms.CharField(required = False, widget=HTMLarea(
attrs={"height":"80%", "rows":20, 'placeholder': "Enter page content (using HTML)"}))
explorers = forms.CharField(required = False, widget=HTMLarea(
attrs={"height":"80%", "rows":20, 'placeholder': "Enter page content (using HTML)"}))
equipment = forms.CharField(required = False, widget=HTMLarea(
attrs={"height":"80%", "rows":20, 'placeholder': "Enter page content (using HTML)"}))
survey = forms.CharField(required = False, widget=HTMLarea(
attrs={"height":"80%", "rows":20, 'placeholder': "Enter page content (using HTML)"}))
#survey = forms.CharField(required = False, widget=TinyMCE(attrs={'cols': 80, 'rows': 10}))
kataster_status = forms.CharField(required = False)
underground_centre_line = forms.CharField(required = False, widget=HTMLarea(
attrs={"height":"80%", "rows":20, 'placeholder': "Enter page content (using HTML)"}))
notes = forms.CharField(required = False, widget=HTMLarea(
attrs={"height":"80%", "rows":20, 'placeholder': "Enter page content (using HTML)"}))
references = forms.CharField(required = False, widget=HTMLarea(
attrs={"height":"80%", "rows":20, 'placeholder': "Enter page content (using HTML)"}))
description_file = forms.CharField(required = False, widget=forms.TextInput(attrs={'size': '45'}))
survex_file = forms.CharField(required = False, label="Survex file [caves-1623/000/000.svx]", widget=forms.TextInput(attrs={'size': '45'}))
url = forms.CharField(required = True, label="URL [1623/000/000]", widget=forms.TextInput(attrs={'size': '45'}))
length = forms.CharField(required = False, label="Length (m)")
depth = forms.CharField(required = False, label="Depth (m)")
extent = forms.CharField(required = False, label="Extent (m)")
"""
official_name = forms.CharField(required=False, widget=forms.TextInput(attrs={"size": "45"}))
underground_description = forms.CharField(
required=False,
widget=HTMLarea(attrs={"height": "80%", "rows": 20, "placeholder": "Enter page content (using HTML)"}),
)
explorers = forms.CharField(
required=False,
widget=HTMLarea(attrs={"height": "80%", "rows": 20, "placeholder": "Enter page content (using HTML)"}),
)
equipment = forms.CharField(
required=False,
widget=HTMLarea(attrs={"height": "80%", "rows": 20, "placeholder": "Enter page content (using HTML)"}),
)
survey = forms.CharField(
required=False,
widget=HTMLarea(attrs={"height": "80%", "rows": 20, "placeholder": "Enter page content (using HTML)"}),
)
# survey = forms.CharField(required = False, widget=TinyMCE(attrs={'cols': 80, 'rows': 10}))
kataster_status = forms.CharField(required=False)
underground_centre_line = forms.CharField(
required=False,
widget=HTMLarea(attrs={"height": "80%", "rows": 20, "placeholder": "Enter page content (using HTML)"}),
)
notes = forms.CharField(
required=False,
widget=HTMLarea(attrs={"height": "80%", "rows": 20, "placeholder": "Enter page content (using HTML)"}),
)
references = forms.CharField(
required=False,
widget=HTMLarea(attrs={"height": "80%", "rows": 20, "placeholder": "Enter page content (using HTML)"}),
)
description_file = forms.CharField(required=False, widget=forms.TextInput(attrs={"size": "45"}))
survex_file = forms.CharField(
required=False, label="Survex file [caves-1623/000/000.svx]", widget=forms.TextInput(attrs={"size": "45"})
)
url = forms.CharField(required=True, label="URL [1623/000/000]", widget=forms.TextInput(attrs={"size": "45"}))
length = forms.CharField(required=False, label="Length (m)")
depth = forms.CharField(required=False, label="Depth (m)")
extent = forms.CharField(required=False, label="Extent (m)")
class Meta:
model = Cave
exclude = ("filename",)
def clean(self):
if self.cleaned_data.get("kataster_number") == "" and self.cleaned_data.get("unofficial_number") == "":
self._errors["unofficial_number"] = self.error_class(["Either the kataster or unoffical number is required."])
# if self.cleaned_data.get("kataster_number") != "" and self.cleaned_data.get("official_name") == "":
# self._errors["official_name"] = self.error_class(["This field is required when there is a kataster number."])
self._errors["unofficial_number"] = self.error_class(
["Either the kataster or unoffical number is required."]
)
# if self.cleaned_data.get("kataster_number") != "" and self.cleaned_data.get("official_name") == "":
# self._errors["official_name"] = self.error_class(["This field is required when there is a kataster number."])
if self.cleaned_data.get("area") == []:
self._errors["area"] = self.error_class(["This field is required."])
if self.cleaned_data.get("url") and self.cleaned_data.get("url").startswith("/"):
self._errors["url"] = self.error_class(["This field cannot start with a /."])
return self.cleaned_data
class EntranceForm(ModelForm):
'''Only those fields for which we want to override defaults are listed here
"""Only those fields for which we want to override defaults are listed here
the other fields are present on the form, but use the default presentaiton style
'''
name = forms.CharField(required = False, widget=forms.TextInput(attrs={'size': '45'}))
entrance_description = forms.CharField(required = False, widget=HTMLarea(
attrs={"height":"80%", "rows":20, 'placeholder': "Enter page content (using HTML)"}))
explorers = forms.CharField(required = False, widget=forms.TextInput(attrs={'size': '45'}))
#explorers = forms.CharField(required = False, widget=TinyMCE(attrs={'cols': 80, 'rows': 10}))
map_description = forms.CharField(required = False, widget=HTMLarea(
attrs={"height":"80%", "rows":20, 'placeholder': "Enter page content (using HTML)"}))
location_description = forms.CharField(required = False, widget=HTMLarea(
attrs={"height":"80%", "rows":20, 'placeholder': "Enter page content (using HTML)"}))
lastvisit = forms.CharField(required=False, widget=forms.TextInput(attrs={'size': '10'}), label="Date of last visit")
approach = forms.CharField(required = False, widget=HTMLarea(
attrs={"height":"80%", "rows":20, 'placeholder': "Enter page content (using HTML)"}))
underground_description = forms.CharField(required = False, widget=HTMLarea(
attrs={"height":"80%", "rows":20, 'placeholder': "Enter page content (using HTML)"}))
photo = forms.CharField(required = False, widget=HTMLarea(
attrs={"height":"80%", "rows":20, 'placeholder': "Enter page content (using HTML)"}))
marking_comment = forms.CharField(required = False, widget=HTMLarea(
attrs={"height":"80%", "rows":20, 'placeholder': "Enter page content (using HTML)"}))
findability_description = forms.CharField(required = False, widget=HTMLarea(
attrs={"height":"80%", "rows":20, 'placeholder': "Enter page content (using HTML)"}))
other_description = forms.CharField(required = False, widget=HTMLarea(
attrs={"height":"80%", "rows":20, 'placeholder': "Enter page content (using HTML)"}))
bearings = forms.CharField(required = False, widget=HTMLarea(
attrs={"height":"80%", "rows":20, 'placeholder': "Enter page content (using HTML)"}))
other_station = forms.CharField(required=False)
tag_station = forms.CharField(required=False)
exact_station = forms.CharField(required=False)
northing = forms.CharField(required=False)
"""
name = forms.CharField(required=False, widget=forms.TextInput(attrs={"size": "45"}))
entrance_description = forms.CharField(
required=False,
widget=HTMLarea(attrs={"height": "80%", "rows": 20, "placeholder": "Enter page content (using HTML)"}),
)
explorers = forms.CharField(required=False, widget=forms.TextInput(attrs={"size": "45"}))
# explorers = forms.CharField(required = False, widget=TinyMCE(attrs={'cols': 80, 'rows': 10}))
map_description = forms.CharField(
required=False,
widget=HTMLarea(attrs={"height": "80%", "rows": 20, "placeholder": "Enter page content (using HTML)"}),
)
location_description = forms.CharField(
required=False,
widget=HTMLarea(attrs={"height": "80%", "rows": 20, "placeholder": "Enter page content (using HTML)"}),
)
lastvisit = forms.CharField(
required=False, widget=forms.TextInput(attrs={"size": "10"}), label="Date of last visit"
)
approach = forms.CharField(
required=False,
widget=HTMLarea(attrs={"height": "80%", "rows": 20, "placeholder": "Enter page content (using HTML)"}),
)
underground_description = forms.CharField(
required=False,
widget=HTMLarea(attrs={"height": "80%", "rows": 20, "placeholder": "Enter page content (using HTML)"}),
)
photo = forms.CharField(
required=False,
widget=HTMLarea(attrs={"height": "80%", "rows": 20, "placeholder": "Enter page content (using HTML)"}),
)
marking_comment = forms.CharField(
required=False,
widget=HTMLarea(attrs={"height": "80%", "rows": 20, "placeholder": "Enter page content (using HTML)"}),
)
findability_description = forms.CharField(
required=False,
widget=HTMLarea(attrs={"height": "80%", "rows": 20, "placeholder": "Enter page content (using HTML)"}),
)
other_description = forms.CharField(
required=False,
widget=HTMLarea(attrs={"height": "80%", "rows": 20, "placeholder": "Enter page content (using HTML)"}),
)
bearings = forms.CharField(
required=False,
widget=HTMLarea(attrs={"height": "80%", "rows": 20, "placeholder": "Enter page content (using HTML)"}),
)
other_station = forms.CharField(required=False)
tag_station = forms.CharField(required=False)
exact_station = forms.CharField(required=False)
northing = forms.CharField(required=False)
easting = forms.CharField(required=False)
lat_wgs84 = forms.CharField(required=False, widget=forms.TextInput(attrs={'size': '10'}), label="Latitude (WSG84)")
long_wgs84 = forms.CharField(required=False, widget=forms.TextInput(attrs={'size': '10'}), label="Longitude (WSG84)")
alt = forms.CharField(required=False, label="Altitude (m)")
url = forms.CharField(required = False, label="URL [usually blank]", widget=forms.TextInput(attrs={'size': '45'}))
lat_wgs84 = forms.CharField(required=False, widget=forms.TextInput(attrs={"size": "10"}), label="Latitude (WSG84)")
long_wgs84 = forms.CharField(
required=False, widget=forms.TextInput(attrs={"size": "10"}), label="Longitude (WSG84)"
)
alt = forms.CharField(required=False, label="Altitude (m)")
url = forms.CharField(required=False, label="URL [usually blank]", widget=forms.TextInput(attrs={"size": "45"}))
class Meta:
model = Entrance
exclude = ("cached_primary_slug", "filename",)
exclude = (
"cached_primary_slug",
"filename",
)
def clean(self):
if self.cleaned_data.get("url").startswith("/"):
self._errors["url"] = self.error_class(["This field cannot start with a /."])
@ -116,24 +167,23 @@ class EntranceForm(ModelForm):
# http://localhost:8000/cave/new/
# using django built-in Deep Magic. https://docs.djangoproject.com/en/3.2/topics/forms/modelforms/
# for forms which map directly onto a Django Model
CaveAndEntranceFormSet = modelformset_factory(CaveAndEntrance, exclude=('cave',))
CaveAndEntranceFormSet = modelformset_factory(CaveAndEntrance, exclude=("cave",))
class EntranceLetterForm(ModelForm):
'''Form to link entrances to caves, along with an entrance number.
Nb. The relationship between caves and entrances has historically been a many to many relationship.
"""Form to link entrances to caves, along with an entrance number.
Nb. The relationship between caves and entrances has historically been a many to many relationship.
With entrances gaining new caves and letters when caves are joined.
'''
"""
class Meta:
model = CaveAndEntrance
exclude = ('cave', 'entrance')
exclude = ("cave", "entrance")
def full_clean(self):
super(EntranceLetterForm, self).full_clean()
try:
self.instance.validate_unique()
except forms.ValidationError as e:
self._update_errors(e)

View File

@ -24,16 +24,17 @@ We might use this mechanism to replace/enhance the
folk, wallets and any cron jobs or other standalone scripts.
"""
class Command(BaseCommand):
def add_arguments(self, parser):
# Positional arguments
parser.add_argument('posargs', nargs='+', type=int)
parser.add_argument("posargs", nargs="+", type=int)
# Named (optional) arguments
parser.add_argument(
'--delete',
action='store_true',
help='Removed as redundant - use databaseReset.py',
"--delete",
action="store_true",
help="Removed as redundant - use databaseReset.py",
)
def handle(self, *args, **options):

View File

@ -5,9 +5,11 @@ from django.urls import Resolver404, resolve, reverse
"""Non-standard django middleware is loaded from this file.
"""
todo = '''SmartAppendSlashMiddleware(object) Not Working.
todo = """SmartAppendSlashMiddleware(object) Not Working.
It needs re-writing to be compatible with Django v2.0 and later
'''
"""
class SmartAppendSlashMiddleware(object):
"""
"SmartAppendSlash" middleware for taking care of URL rewriting.
@ -20,32 +22,34 @@ class SmartAppendSlashMiddleware(object):
"""
def process_request(self, request):
'''Called for every url so return as quickly as possible
"""Called for every url so return as quickly as possible
Append a slash if SMART_APPEND_SLASH is set, the resulting URL resolves and it doesn't without the /
'''
"""
if not settings.SMART_APPEND_SLASH:
return None
if request.path.endswith('/'):
if request.path.endswith("/"):
return None
if request.path.endswith('_edit'):
if request.path.endswith("_edit"):
return None
host = http.HttpRequest.get_host(request)
old_url = [host, request.path]
if _resolves(old_url[1]):
return None
# So: it does not resolve according to our criteria, i.e. _edit doesn't count
new_url = old_url[:]
new_url[1] = new_url[1] + '/'
new_url = old_url[:]
new_url[1] = new_url[1] + "/"
if not _resolves(new_url[1]):
return None
else:
if settings.DEBUG and request.method == 'POST':
else:
if settings.DEBUG and request.method == "POST":
# replace this exception with a redirect to an error page
raise RuntimeError(f"You called this URL via POST, but the URL doesn't end in a slash and you have SMART_APPEND_SLASH set. Django can't redirect to the slash URL while maintaining POST data. Change your form to point to {new_url[0]}{new_url[1]} (note the trailing slash), or set SMART_APPEND_SLASH=False in your Django settings.")
raise RuntimeError(
f"You called this URL via POST, but the URL doesn't end in a slash and you have SMART_APPEND_SLASH set. Django can't redirect to the slash URL while maintaining POST data. Change your form to point to {new_url[0]}{new_url[1]} (note the trailing slash), or set SMART_APPEND_SLASH=False in your Django settings."
)
if new_url != old_url:
# Redirect
if new_url[0]:
@ -53,17 +57,18 @@ class SmartAppendSlashMiddleware(object):
else:
newurl = new_url[1]
if request.GET:
newurl += '?' + request.GET.urlencode()
newurl += "?" + request.GET.urlencode()
return http.HttpResponsePermanentRedirect(newurl)
return None
def _resolves(url):
try:
# If the URL does not resolve, the function raises a Resolver404 exception (a subclass of Http404)
# If the URL does not resolve, the function raises a Resolver404 exception (a subclass of Http404)
match = resolve(url)
# this will ALWAYS be resolved by expopages because it will produce pagenotfound if not the thing asked for
# so handle this in expopages, not in middleware
# so handle this in expopages, not in middleware
return True
except Resolver404:
return False

View File

@ -23,33 +23,33 @@ from django.urls import reverse
import settings
from troggle.core.models.logbooks import QM
from troggle.core.models.survex import SurvexStation
from troggle.core.models.troggle import (DataIssue, Expedition, Person,
TroggleModel)
from troggle.core.models.troggle import DataIssue, Expedition, Person, TroggleModel
from troggle.core.utils import TROG, writetrogglefile
# Use the TROG global object to cache the cave lookup list. No good for multi-user..
Gcavelookup = TROG['caves']['gcavelookup']
Gcave_count = TROG['caves']['gcavecount']
Gcavelookup = TROG["caves"]["gcavelookup"]
Gcave_count = TROG["caves"]["gcavecount"]
Gcavelookup = None
Gcave_count = None
'''The model declarations for Areas, Caves and Entrances
'''
"""The model declarations for Areas, Caves and Entrances
"""
todo='''
todo = """
- Find out why we have separate objects CaveSlug and EntranceSlug and why
these are not just a single field on the Model. Do we ever need more
than one slug per cave or entrance? Surely that would break everything??
- Restore constraint: unique_together = (("area", "kataster_number"), ("area", "unofficial_number"))
'''
"""
class Area(TroggleModel):
short_name = models.CharField(max_length=100)
name = models.CharField(max_length=200, blank=True, null=True)
description = models.TextField(blank=True, null=True)
super = models.ForeignKey('Area', blank=True, null=True, on_delete=models.SET_NULL)
super = models.ForeignKey("Area", blank=True, null=True, on_delete=models.SET_NULL)
def __str__(self):
if self.super:
@ -63,56 +63,62 @@ class Area(TroggleModel):
elif self.super:
return self.super.kat_area()
class CaveAndEntrance(models.Model):
cave = models.ForeignKey('Cave',on_delete=models.CASCADE)
entrance = models.ForeignKey('Entrance',on_delete=models.CASCADE)
entrance_letter = models.CharField(max_length=20,blank=True, null=True)
cave = models.ForeignKey("Cave", on_delete=models.CASCADE)
entrance = models.ForeignKey("Entrance", on_delete=models.CASCADE)
entrance_letter = models.CharField(max_length=20, blank=True, null=True)
class Meta:
unique_together = [['cave', 'entrance'], ['cave', 'entrance_letter']]
ordering = ['entrance_letter']
unique_together = [["cave", "entrance"], ["cave", "entrance_letter"]]
ordering = ["entrance_letter"]
def __str__(self):
return str(self.cave) + str(self.entrance_letter)
class Cave(TroggleModel):
# too much here perhaps,
# too much here perhaps,
official_name = models.CharField(max_length=160)
area = models.ManyToManyField(Area, blank=True)
kataster_code = models.CharField(max_length=20,blank=True, null=True)
kataster_number = models.CharField(max_length=10,blank=True, null=True)
unofficial_number = models.CharField(max_length=60,blank=True, null=True)
entrances = models.ManyToManyField('Entrance', through='CaveAndEntrance')
explorers = models.TextField(blank=True,null=True)
underground_description = models.TextField(blank=True,null=True)
equipment = models.TextField(blank=True,null=True)
references = models.TextField(blank=True,null=True)
survey = models.TextField(blank=True,null=True)
kataster_status = models.TextField(blank=True,null=True)
underground_centre_line = models.TextField(blank=True,null=True)
notes = models.TextField(blank=True,null=True)
length = models.CharField(max_length=100,blank=True, null=True)
depth = models.CharField(max_length=100,blank=True, null=True)
extent = models.CharField(max_length=100,blank=True, null=True)
survex_file = models.CharField(max_length=100,blank=True, null=True)
description_file = models.CharField(max_length=200,blank=True, null=True)
url = models.CharField(max_length=200,blank=True, null=True)
kataster_code = models.CharField(max_length=20, blank=True, null=True)
kataster_number = models.CharField(max_length=10, blank=True, null=True)
unofficial_number = models.CharField(max_length=60, blank=True, null=True)
entrances = models.ManyToManyField("Entrance", through="CaveAndEntrance")
explorers = models.TextField(blank=True, null=True)
underground_description = models.TextField(blank=True, null=True)
equipment = models.TextField(blank=True, null=True)
references = models.TextField(blank=True, null=True)
survey = models.TextField(blank=True, null=True)
kataster_status = models.TextField(blank=True, null=True)
underground_centre_line = models.TextField(blank=True, null=True)
notes = models.TextField(blank=True, null=True)
length = models.CharField(max_length=100, blank=True, null=True)
depth = models.CharField(max_length=100, blank=True, null=True)
extent = models.CharField(max_length=100, blank=True, null=True)
survex_file = models.CharField(max_length=100, blank=True, null=True)
description_file = models.CharField(max_length=200, blank=True, null=True)
url = models.CharField(max_length=200, blank=True, null=True)
filename = models.CharField(max_length=200)
#class Meta:
# class Meta:
# unique_together = (("area", "kataster_number"), ("area", "unofficial_number"))
# FIXME Kataster Areas and CUCC defined sub areas need seperating
# FIXME Kataster Areas and CUCC defined sub areas need seperating
# href = models.CharField(max_length=100)
#href = models.CharField(max_length=100)
class Meta:
ordering = ('kataster_code', 'unofficial_number')
ordering = ("kataster_code", "unofficial_number")
def hassurvey(self):
if not self.underground_centre_line:
return "No"
if (self.survey.find("<img") > -1 or self.survey.find("<a") > -1 or self.survey.find("<IMG") > -1 or self.survey.find("<A") > -1):
if (
self.survey.find("<img") > -1
or self.survey.find("<a") > -1
or self.survey.find("<IMG") > -1
or self.survey.find("<A") > -1
):
return "Yes"
return "Missing"
@ -122,9 +128,9 @@ class Cave(TroggleModel):
if self.survex_filcavee:
return "Yes"
return "Missing"
def slug(self):
primarySlugs = self.caveslug_set.filter(primary = True)
primarySlugs = self.caveslug_set.filter(primary=True)
if primarySlugs:
return primarySlugs[0].slug
else:
@ -133,14 +139,14 @@ class Cave(TroggleModel):
return slugs[0].slug
def ours(self):
return bool(re.search(r'CUCC', self.explorers))
return bool(re.search(r"CUCC", self.explorers))
def reference(self):
if self.kataster_number:
return f"{self.kat_area()}-{self.kataster_number}"
else:
return f"{self.kat_area()}-{self.unofficial_number}"
def get_absolute_url(self):
if self.kataster_number:
href = self.kataster_number
@ -148,34 +154,35 @@ class Cave(TroggleModel):
href = self.unofficial_number
else:
href = self.official_name.lower()
#return settings.URL_ROOT + '/cave/' + href + '/'
#return urljoin(settings.URL_ROOT, reverse('cave',kwargs={'cave_id':href,})) # WRONG. This produces /cave/161 and should be /1623/161
return Path(settings.URL_ROOT) / self.url # not good Django style.. NEEDS actual URL
# return settings.URL_ROOT + '/cave/' + href + '/'
# return urljoin(settings.URL_ROOT, reverse('cave',kwargs={'cave_id':href,})) # WRONG. This produces /cave/161 and should be /1623/161
return Path(settings.URL_ROOT) / self.url # not good Django style.. NEEDS actual URL
def url_parent(self):
return self.url.rsplit("/", 1)[0]
def __str__(self, sep = ": "):
def __str__(self, sep=": "):
return str(self.slug())
def get_QMs(self):
'''Searches for all QMs that reference this cave.
'''
#qms = self.qm_set.all().order_by('expoyear', 'block__date')
qms = QM.objects.filter(cave=self).order_by('expoyear', 'block__date') # a QuerySet, see https://docs.djangoproject.com/en/4.0/ref/models/querysets/#order-by
return qms # a QuerySet
"""Searches for all QMs that reference this cave."""
# qms = self.qm_set.all().order_by('expoyear', 'block__date')
qms = QM.objects.filter(cave=self).order_by(
"expoyear", "block__date"
) # a QuerySet, see https://docs.djangoproject.com/en/4.0/ref/models/querysets/#order-by
return qms # a QuerySet
def kat_area(self):
for a in self.area.all():
if a.kat_area():
return a.kat_area()
def entrances(self):
return CaveAndEntrance.objects.filter(cave=self)
def singleentrance(self):
return len(CaveAndEntrance.objects.filter(cave=self)) == 1
def entrancelist(self):
rs = []
res = ""
@ -183,11 +190,11 @@ class Cave(TroggleModel):
if e.entrance_letter:
rs.append(e.entrance_letter)
rs.sort()
prevR = ''
prevR = ""
n = 0
for r in rs:
if prevR:
if chr(ord(prevR) + 1 ) == r:
if chr(ord(prevR) + 1) == r:
prevR = r
n += 1
else:
@ -205,27 +212,27 @@ class Cave(TroggleModel):
else:
res += "&ndash;" + prevR
return res
def writeDataFile(self):
filepath = os.path.join(settings.CAVEDESCRIPTIONS, self.filename)
t = loader.get_template('dataformat/cave.xml')
t = loader.get_template("dataformat/cave.xml")
now = datetime.now(timezone.utc)
print(now)
c = dict({'cave': self, 'date': now})
c = dict({"cave": self, "date": now})
u = t.render(c)
writetrogglefile(filepath, u)
return
def file_output(self):
filepath = Path(os.path.join(settings.CAVEDESCRIPTIONS, self.filename))
t = loader.get_template('dataformat/cave.xml')
t = loader.get_template("dataformat/cave.xml")
now = datetime.now(timezone.utc)
c = dict({'cave': self, 'date': now})
c = dict({"cave": self, "date": now})
content = t.render(c)
return (filepath, content, "utf8")
def getArea(self):
areas = self.area.all()
lowestareas = list(areas)
@ -237,40 +244,39 @@ class Cave(TroggleModel):
pass
return lowestareas[0]
class EntranceSlug(models.Model):
entrance = models.ForeignKey('Entrance',on_delete=models.CASCADE)
slug = models.SlugField(max_length=50, unique = True)
entrance = models.ForeignKey("Entrance", on_delete=models.CASCADE)
slug = models.SlugField(max_length=50, unique=True)
primary = models.BooleanField(default=False)
class Entrance(TroggleModel):
name = models.CharField(max_length=100, blank=True,null=True)
entrance_description = models.TextField(blank=True,null=True)
explorers = models.TextField(blank=True,null=True)
map_description = models.TextField(blank=True,null=True)
location_description = models.TextField(blank=True,null=True)
lastvisit = models.TextField(blank=True,null=True)
approach = models.TextField(blank=True,null=True)
underground_description = models.TextField(blank=True,null=True)
photo = models.TextField(blank=True,null=True)
name = models.CharField(max_length=100, blank=True, null=True)
entrance_description = models.TextField(blank=True, null=True)
explorers = models.TextField(blank=True, null=True)
map_description = models.TextField(blank=True, null=True)
location_description = models.TextField(blank=True, null=True)
lastvisit = models.TextField(blank=True, null=True)
approach = models.TextField(blank=True, null=True)
underground_description = models.TextField(blank=True, null=True)
photo = models.TextField(blank=True, null=True)
MARKING_CHOICES = (
('P', 'Paint'),
('P?', 'Paint (?)'),
('T', 'Tag'),
('T?', 'Tag (?)'),
('R', 'Needs Retag'),
('S', 'Spit'),
('S?', 'Spit (?)'),
('U', 'Unmarked'),
('?', 'Unknown'))
("P", "Paint"),
("P?", "Paint (?)"),
("T", "Tag"),
("T?", "Tag (?)"),
("R", "Needs Retag"),
("S", "Spit"),
("S?", "Spit (?)"),
("U", "Unmarked"),
("?", "Unknown"),
)
marking = models.CharField(max_length=2, choices=MARKING_CHOICES)
marking_comment = models.TextField(blank=True,null=True)
FINDABLE_CHOICES = (
('?', 'To be confirmed ...'),
('S', 'Coordinates'),
('L', 'Lost'),
('R', 'Refindable'))
marking_comment = models.TextField(blank=True, null=True)
FINDABLE_CHOICES = (("?", "To be confirmed ..."), ("S", "Coordinates"), ("L", "Lost"), ("R", "Refindable"))
findability = models.CharField(max_length=1, choices=FINDABLE_CHOICES, blank=True, null=True)
findability_description = models.TextField(blank=True,null=True)
findability_description = models.TextField(blank=True, null=True)
alt = models.TextField(blank=True, null=True)
northing = models.TextField(blank=True, null=True)
easting = models.TextField(blank=True, null=True)
@ -279,14 +285,14 @@ class Entrance(TroggleModel):
tag_station = models.TextField(blank=True, null=True)
exact_station = models.TextField(blank=True, null=True)
other_station = models.TextField(blank=True, null=True)
other_description = models.TextField(blank=True,null=True)
bearings = models.TextField(blank=True,null=True)
url = models.CharField(max_length=200,blank=True, null=True)
other_description = models.TextField(blank=True, null=True)
bearings = models.TextField(blank=True, null=True)
url = models.CharField(max_length=200, blank=True, null=True)
filename = models.CharField(max_length=200)
cached_primary_slug = models.CharField(max_length=200,blank=True, null=True)
cached_primary_slug = models.CharField(max_length=200, blank=True, null=True)
class Meta:
ordering = ['caveandentrance__entrance_letter']
ordering = ["caveandentrance__entrance_letter"]
def __str__(self):
return str(self.slug())
@ -298,11 +304,7 @@ class Entrance(TroggleModel):
return SurvexStation.objects.lookup(self.other_station)
def find_location(self):
r = {'': 'To be entered ',
'?': 'To be confirmed:',
'S': '',
'L': 'Lost:',
'R': 'Refindable:'}[self.findability]
r = {"": "To be entered ", "?": "To be confirmed:", "S": "", "L": "Lost:", "R": "Refindable:"}[self.findability]
if self.tag_station:
try:
s = SurvexStation.objects.lookup(self.tag_station)
@ -337,7 +339,12 @@ class Entrance(TroggleModel):
def has_photo(self):
if self.photo:
if (self.photo.find("<img") > -1 or self.photo.find("<a") > -1 or self.photo.find("<IMG") > -1 or self.photo.find("<A") > -1):
if (
self.photo.find("<img") > -1
or self.photo.find("<a") > -1
or self.photo.find("<IMG") > -1
or self.photo.find("<A") > -1
):
return "Yes"
else:
return "Missing"
@ -363,17 +370,17 @@ class Entrance(TroggleModel):
def get_absolute_url(self):
# ancestor_titles='/'.join([subcave.title for subcave in self.get_ancestors()])
# if ancestor_titles:
# res = '/'.join((self.get_root().cave.get_absolute_url(), ancestor_titles, self.title))
# res = '/'.join((self.get_root().cave.get_absolute_url(), ancestor_titles, self.title))
# else:
# res = '/'.jocavein((self.get_root().cave.get_absolute_url(), self.title))
# res = '/'.jocavein((self.get_root().cave.get_absolute_url(), self.title))
# return res
res = '/'.join((self.get_root().cave.get_absolute_url(), self.title))
res = "/".join((self.get_root().cave.get_absolute_url(), self.title))
return res
def slug(self):
if not self.cached_primary_slug:
primarySlugs = self.entranceslug_set.filter(primary = True)
if primarySlugs:
primarySlugs = self.entranceslug_set.filter(primary=True)
if primarySlugs:
self.cached_primary_slug = primarySlugs[0].slug
self.save()
else:
@ -390,30 +397,29 @@ class Entrance(TroggleModel):
if e.cave:
rs.append(e.cave)
return rs
def get_file_path(self):
return Path(settings.ENTRANCEDESCRIPTIONS, self.filename)
return Path(settings.ENTRANCEDESCRIPTIONS, self.filename)
def file_output(self):
filepath = Path(os.path.join(settings.ENTRANCEDESCRIPTIONS, self.filename))
t = loader.get_template('dataformat/entrance.xml')
t = loader.get_template("dataformat/entrance.xml")
now = datetime.now(timezone.utc)
c = dict({'entrance': self, 'date': now})
c = dict({"entrance": self, "date": now})
content = t.render(c)
return (filepath, content, "utf8")
def writeDataFile(self):
filepath = os.path.join(settings.ENTRANCEDESCRIPTIONS, self.filename)
t = loader.get_template('dataformat/entrance.xml')
t = loader.get_template("dataformat/entrance.xml")
now = datetime.now(timezone.utc)
c = dict({'entrance': self, 'date': now})
c = dict({"entrance": self, "date": now})
u = t.render(c)
writetrogglefile(filepath, u)
return
def url_parent(self):
if self.url:
return self.url.rsplit("/", 1)[0]
@ -423,21 +429,22 @@ class Entrance(TroggleModel):
return cavelist[0].url_parent()
else:
return ""
def GetCaveLookup():
"""A very relaxed way of finding probably the right cave given almost any string which might serve to identify it
lookup function modelled on GetPersonExpeditionNameLookup
repeated assignment each call, needs refactoring
Used when parsing wallets contents.json file too in views/uploads.py
Does NOT detect duplicates! Needs fixing.
Needs to be a proper funciton that raises an exception if there is a duplicate.
OR we could set it to return None if there are duplicates, and require the caller to
OR we could set it to return None if there are duplicates, and require the caller to
fall back on doing the actual database query it wants rather thna using this cache shortcut
"""
def checkcaveid(cave, id):
global Gcavelookup
if id not in Gcavelookup:
@ -445,48 +452,48 @@ def GetCaveLookup():
Gcave_count[id] += 1
else:
if cave == Gcavelookup[id]:
pass # same id, same cave
else: # same id but different cave
pass # same id, same cave
else: # same id but different cave
message = f" - Warning: same alias id '{id:3}' for two caves '{Gcavelookup[id]}' and '{cave}'. Removing this shorthand alias entirely."
Gcavelookup.pop(id)
print(message)
DataIssue.objects.create(parser='aliases', message=message)
DataIssue.objects.create(parser="aliases", message=message)
global Gcavelookup
if Gcavelookup:
return Gcavelookup
Gcavelookup = {"NONEPLACEHOLDER": None}
global Gcave_count
Gcave_count = defaultdict(int) # sets default value to int(0)
DataIssue.objects.filter(parser='aliases').delete()
Gcave_count = defaultdict(int) # sets default value to int(0)
DataIssue.objects.filter(parser="aliases").delete()
for cave in Cave.objects.all():
key = cave.official_name.lower()
if key != "" and key != "unamed" and key != "unnamed":
Gcavelookup[key] = cave
Gcave_count[key] += 1
Gcave_count[key] += 1
if cave.kataster_number:
checkcaveid(cave,cave.kataster_number) # we do expect 1623/55 and 1626/55 to cause a warning message
checkcaveid(cave, cave.kataster_number) # we do expect 1623/55 and 1626/55 to cause a warning message
# the rest of these are 'nice to have' but may validly already be set
if cave.unofficial_number:
unoffn = cave.unofficial_number.lower()
checkcaveid(cave,unoffn)
checkcaveid(cave, unoffn)
if cave.filename:
# this is the slug - usually.. but usually done as as f'{cave.area}-{cave.kataster_number}'
fn = cave.filename.replace(".html","").lower()
checkcaveid(cave,fn)
fn = cave.filename.replace(".html", "").lower()
checkcaveid(cave, fn)
if cave.slug():
# also possibly done already
slug = cave.slug().lower()
checkcaveid(cave,slug)
checkcaveid(cave, slug)
# These might alse create more duplicate entries
# Yes, this should be set in, and imported from, settings.py
aliases =[
aliases = [
("1987-02", "267"),
("1990-01", "171"),
("1990-02", "172"),
@ -570,29 +577,25 @@ def GetCaveLookup():
("2015-mf-06", "288"),
("2016-jb-01", "289"),
("2017-pw-01", "277"),
("2018-dm-07", "359"), # NB this is 1626
("2017_cucc_24", "291"), # note _ not -
("2017_cucc_23", "295"), # note _ not -
("2017_cucc_28", "290"), # note _ not -
("2018-dm-07", "359"), # NB this is 1626
("2017_cucc_24", "291"), # note _ not -
("2017_cucc_23", "295"), # note _ not -
("2017_cucc_28", "290"), # note _ not -
("bs17", "283"),
("1976/b11", "198"),
("1976/b8", "197"),
("1976/b9", "190"),
("b11", "1976/b11"),
("b8", "1976/b8"),
("b9", "1976/b9"),
("2011-01-bs30", "190"),
("bs30", "190"),
("2011-01", "190"),
("quarriesd", "2002-08"),
("2002-x11", "2005-08"),
("2002-x12", "2005-07"),
("2002-x13", "2005-06"),
("2002-x14", "2005-05"),
("kh", "161"),
("161-kh", "161"),
("204-steinBH", "204"),
@ -605,13 +608,12 @@ def GetCaveLookup():
("balkon", "264"),
("fgh", "290"),
("gsh", "291"),
("homecoming", "2018-dm-07"),
("heimkommen", "2018-dm-07"),
("Heimkehr", "2018-dm-07"),
("99ob02", "1999-ob-02"),
]
for i in aliases:
if i[1] in Gcavelookup:
if i[0] in Gcavelookup:
@ -623,23 +625,23 @@ def GetCaveLookup():
Gcavelookup[i[0]] = Gcavelookup[i[1]]
else:
message = f" * Coding or cave existence mistake, cave for id '{i[1]}' does not exist. Expecting to set alias '{i[0]}' to it"
#print(message)
DataIssue.objects.create(parser='aliases', message=message)
# print(message)
DataIssue.objects.create(parser="aliases", message=message)
addmore = {}
for id in Gcavelookup:
addmore[id.replace("-","_")] = Gcavelookup[id]
addmore[id.replace("_","-")] = Gcavelookup[id]
addmore[id.replace("-", "_")] = Gcavelookup[id]
addmore[id.replace("_", "-")] = Gcavelookup[id]
addmore[id.upper()] = Gcavelookup[id]
Gcavelookup = {**addmore, **Gcavelookup}
addmore ={}
addmore = {}
for c in Gcave_count:
if Gcave_count[c] > 1:
message = f" ** Duplicate cave id count={Gcave_count[c]} id:'{Gcavelookup[c]}' cave __str__:'{c}'"
print(message)
DataIssue.objects.create(parser='aliases', message=message)
DataIssue.objects.create(parser="aliases", message=message)
# logdataissues[Gcavelookup[c]]=message # pending troggle-wide issues logging system
return Gcavelookup

View File

@ -22,56 +22,62 @@ from django.urls import reverse
import settings
from troggle.core.models.survex import SurvexStation
from troggle.core.models.troggle import (DataIssue, Expedition, Person,
PersonExpedition, TroggleModel)
from troggle.core.models.troggle import DataIssue, Expedition, Person, PersonExpedition, TroggleModel
'''The model declarations LogBookEntry, PersonLogEntry, QM
'''
"""The model declarations LogBookEntry, PersonLogEntry, QM
"""
todo = """
"""
todo='''
'''
class CaveSlug(models.Model):
"""Moved here to avoid nasty cyclic import error"""
cave = models.ForeignKey('Cave',on_delete=models.CASCADE)
slug = models.SlugField(max_length=50, unique = True)
cave = models.ForeignKey("Cave", on_delete=models.CASCADE)
slug = models.SlugField(max_length=50, unique=True)
primary = models.BooleanField(default=False)
class LogbookEntry(TroggleModel):
"""Single parsed entry from Logbook
"""
date = models.DateField()#MJG wants to turn this into a datetime such that multiple Logbook entries on the same day can be ordered.ld()
expedition = models.ForeignKey(Expedition,blank=True, null=True,on_delete=models.SET_NULL) # yes this is double-
title = models.CharField(max_length=200)
cave_slug = models.SlugField(max_length=50, blank=True, null=True)
place = models.CharField(max_length=100,blank=True, null=True,help_text="Only use this if you haven't chosen a cave")
text = models.TextField()
slug = models.SlugField(max_length=50)
time_underground = models.FloatField(null=True,help_text="In decimal hours")
"""Single parsed entry from Logbook"""
date = (
models.DateField()
) # MJG wants to turn this into a datetime such that multiple Logbook entries on the same day can be ordered.ld()
expedition = models.ForeignKey(Expedition, blank=True, null=True, on_delete=models.SET_NULL) # yes this is double-
title = models.CharField(max_length=200)
cave_slug = models.SlugField(max_length=50, blank=True, null=True)
place = models.CharField(
max_length=100, blank=True, null=True, help_text="Only use this if you haven't chosen a cave"
)
text = models.TextField()
slug = models.SlugField(max_length=50)
time_underground = models.FloatField(null=True, help_text="In decimal hours")
class Meta:
verbose_name_plural = "Logbook Entries"
# several PersonLogEntrys point in to this object
ordering = ('-date',)
ordering = ("-date",)
def cave(self): # Why didn't he just make this a foreign key to Cave ?
def cave(self): # Why didn't he just make this a foreign key to Cave ?
c = CaveSlug.objects.get(slug=self.cave_slug, primary=True).cave
return c
def isLogbookEntry(self): # Function used in templates
def isLogbookEntry(self): # Function used in templates
return True
def get_absolute_url(self):
return urljoin(settings.URL_ROOT, reverse('logbookentry',kwargs={'date':self.date,'slug':self.slug}))
return urljoin(settings.URL_ROOT, reverse("logbookentry", kwargs={"date": self.date, "slug": self.slug}))
def __str__(self):
return f'{self.date}: {self.title}'
return f"{self.date}: {self.title}"
def get_next_by_id(self):
LogbookEntry.objects.get(id=self.id+1)
LogbookEntry.objects.get(id=self.id + 1)
def get_previous_by_id(self):
LogbookEntry.objects.get(id=self.id-1)
LogbookEntry.objects.get(id=self.id - 1)
def DayIndex(self):
"""This is used to set different colours for the different trips on
@ -81,12 +87,12 @@ class LogbookEntry(TroggleModel):
if self in todays:
index = todays.index(self)
else:
print(f"DayIndex: Synchronization error. Restart server. {self}")
index = 0
print(f"DayIndex: Synchronization error. Restart server. {self}")
index = 0
if index not in range(0, mx):
print(f"DayIndex: More than {mx-1} LogbookEntry items on one day '{index}' {self}")
index = 0
print(f"DayIndex: More than {mx-1} LogbookEntry items on one day '{index}' {self}")
index = 0
return index
@ -94,24 +100,37 @@ class PersonLogEntry(TroggleModel):
"""Single Person going on a trip, which may or may not be written up.
It could account for different T/U for people in same logbook entry.
"""
personexpedition = models.ForeignKey("PersonExpedition",null=True,on_delete=models.CASCADE)
personexpedition = models.ForeignKey("PersonExpedition", null=True, on_delete=models.CASCADE)
time_underground = models.FloatField(help_text="In decimal hours")
logbook_entry = models.ForeignKey(LogbookEntry,on_delete=models.CASCADE)
logbook_entry = models.ForeignKey(LogbookEntry, on_delete=models.CASCADE)
is_logbook_entry_author = models.BooleanField(default=False)
class Meta:
ordering = ('-personexpedition',)
#order_with_respect_to = 'personexpedition'
ordering = ("-personexpedition",)
# order_with_respect_to = 'personexpedition'
def next_personlog(self):
futurePTs = PersonLogEntry.objects.filter(personexpedition = self.personexpedition, logbook_entry__date__gt = self.logbook_entry.date).order_by('logbook_entry__date').all()
futurePTs = (
PersonLogEntry.objects.filter(
personexpedition=self.personexpedition, logbook_entry__date__gt=self.logbook_entry.date
)
.order_by("logbook_entry__date")
.all()
)
if len(futurePTs) > 0:
return futurePTs[0]
else:
return None
def prev_personlog(self):
pastPTs = PersonLogEntry.objects.filter(personexpedition = self.personexpedition, logbook_entry__date__lt = self.logbook_entry.date).order_by('-logbook_entry__date').all()
pastPTs = (
PersonLogEntry.objects.filter(
personexpedition=self.personexpedition, logbook_entry__date__lt=self.logbook_entry.date
)
.order_by("-logbook_entry__date")
.all()
)
if len(pastPTs) > 0:
return pastPTs[0]
else:
@ -121,38 +140,50 @@ class PersonLogEntry(TroggleModel):
return self.logbook_entry.cave and self.logbook_entry.cave or self.logbook_entry.place
def __str__(self):
return f'{self.personexpedition} ({self.logbook_entry.date})'
return f"{self.personexpedition} ({self.logbook_entry.date})"
class QM(TroggleModel):
"""This is based on qm.csv in trunk/expoweb/1623/204 which has the fields:
"Number","Grade","Area","Description","Page reference","Nearest station","Completion description","Comment"
"""
cave = models.ForeignKey('Cave', related_name='QMs',blank=True, null=True,on_delete=models.SET_NULL )
block = models.ForeignKey('SurvexBlock', null=True,on_delete=models.SET_NULL) # only for QMs from survex files
blockname=models.TextField(blank=True,null=True) # NB truncated copy of survexblock name with last char added
expoyear = models.CharField(max_length=4,blank=True, null=True) # could change to datetime if logbooks similarly chnaged
found_by = models.ForeignKey(LogbookEntry, related_name='QMs_found',blank=True, null=True,on_delete=models.SET_NULL )
ticked = models.BooleanField(default=False) # for ticked QMs not attached to a logbook entry, should imply completion_description has text
ticked_off_by = models.ForeignKey(LogbookEntry, related_name='QMs_ticked_off',blank=True, null=True,on_delete=models.SET_NULL) # unused, ever?!
number = models.IntegerField(help_text="this is the sequential number in the year, only unique for CSV imports", )
GRADE_CHOICES=(
('A', 'A: Large obvious lead'),
('B', 'B: Average lead'),
('C', 'C: Tight unpromising lead'),
('D', 'D: Dig'),
('X', 'X: Unclimbable aven')
) # also seen "?" and "V" in imported data - see urls.py
cave = models.ForeignKey("Cave", related_name="QMs", blank=True, null=True, on_delete=models.SET_NULL)
block = models.ForeignKey("SurvexBlock", null=True, on_delete=models.SET_NULL) # only for QMs from survex files
blockname = models.TextField(blank=True, null=True) # NB truncated copy of survexblock name with last char added
expoyear = models.CharField(
max_length=4, blank=True, null=True
) # could change to datetime if logbooks similarly chnaged
found_by = models.ForeignKey(
LogbookEntry, related_name="QMs_found", blank=True, null=True, on_delete=models.SET_NULL
)
ticked = models.BooleanField(
default=False
) # for ticked QMs not attached to a logbook entry, should imply completion_description has text
ticked_off_by = models.ForeignKey(
LogbookEntry, related_name="QMs_ticked_off", blank=True, null=True, on_delete=models.SET_NULL
) # unused, ever?!
number = models.IntegerField(
help_text="this is the sequential number in the year, only unique for CSV imports",
)
GRADE_CHOICES = (
("A", "A: Large obvious lead"),
("B", "B: Average lead"),
("C", "C: Tight unpromising lead"),
("D", "D: Dig"),
("X", "X: Unclimbable aven"),
) # also seen "?" and "V" in imported data - see urls.py
grade = models.CharField(max_length=1, choices=GRADE_CHOICES)
location_description = models.TextField(blank=True)
nearest_station_description = models.CharField(max_length=400,blank=True, null=True)
nearest_station_name = models.CharField(max_length=200,blank=True, null=True)
nearest_station = models.ForeignKey('SurvexStation',blank=True, null=True,on_delete=models.SET_NULL)
area = models.CharField(max_length=100,blank=True, null=True)
completion_description = models.TextField(blank=True,null=True)
comment=models.TextField(blank=True,null=True)
nearest_station_description = models.CharField(max_length=400, blank=True, null=True)
nearest_station_name = models.CharField(max_length=200, blank=True, null=True)
nearest_station = models.ForeignKey("SurvexStation", blank=True, null=True, on_delete=models.SET_NULL)
area = models.CharField(max_length=100, blank=True, null=True)
completion_description = models.TextField(blank=True, null=True)
comment = models.TextField(blank=True, null=True)
def __str__(self):
return f'{self.code()}'
return f"{self.code()}"
def code(self):
if self.cave:
@ -167,11 +198,10 @@ class QM(TroggleModel):
blocknamestr = "-" + str(self.blockname)
else:
blocknamestr = ""
return f'{cavestr}-{expoyearstr}-{self.number}{self.grade}{blocknamestr}'
return f"{cavestr}-{expoyearstr}-{self.number}{self.grade}{blocknamestr}"
def get_completion_url(self):
'''assumes html file named is in same folder as cave description file
'''
"""assumes html file named is in same folder as cave description file"""
cd = None
if self.completion_description:
try:
@ -180,18 +210,29 @@ class QM(TroggleModel):
except:
cd = None
return cd
def newslug(self):
qmslug = f'{str(self.cave)}-{self.expoyear}-{self.blockname}{self.number}{self.grade}'
def newslug(self):
qmslug = f"{str(self.cave)}-{self.expoyear}-{self.blockname}{self.number}{self.grade}"
return qmslug
def get_absolute_url(self):
# This reverse resolution stuff is pure magic. Just change the regex in urls.py and everything changes to suit. Whacky.
return urljoin(settings.URL_ROOT, reverse('qm',kwargs={'cave_id':self.cave.slug(),'year':self.expoyear, 'blockname':self.blockname,'qm_id':self.number,'grade':self.grade}))
return urljoin(
settings.URL_ROOT,
reverse(
"qm",
kwargs={
"cave_id": self.cave.slug(),
"year": self.expoyear,
"blockname": self.blockname,
"qm_id": self.number,
"grade": self.grade,
},
),
)
def get_next_by_id(self):
return QM.objects.get(id=self.id+1)
return QM.objects.get(id=self.id + 1)
def get_previous_by_id(self):
return QM.objects.get(id=self.id-1)
return QM.objects.get(id=self.id - 1)

View File

@ -12,42 +12,46 @@ from django.db import models
from django.urls import reverse
from troggle.core.models.wallets import Wallet
# from troggle.core.models.troggle import DataIssue # circular import. Hmm
class SurvexDirectory(models.Model):
path = models.CharField(max_length=200)
cave = models.ForeignKey('Cave', blank=True, null=True,on_delete=models.SET_NULL)
primarysurvexfile = models.ForeignKey('SurvexFile', related_name='primarysurvexfile', blank=True, null=True,on_delete=models.SET_NULL)
cave = models.ForeignKey("Cave", blank=True, null=True, on_delete=models.SET_NULL)
primarysurvexfile = models.ForeignKey(
"SurvexFile", related_name="primarysurvexfile", blank=True, null=True, on_delete=models.SET_NULL
)
# could also include files in directory but not referenced
class Meta:
ordering = ('id',)
ordering = ("id",)
verbose_name_plural = "Survex directories"
def __str__(self):
return "[SurvexDirectory:"+str(self.path) + " | Primary svx:" + str(self.primarysurvexfile.path) +".svx ]"
return "[SurvexDirectory:" + str(self.path) + " | Primary svx:" + str(self.primarysurvexfile.path) + ".svx ]"
class SurvexFile(models.Model):
path = models.CharField(max_length=200)
survexdirectory = models.ForeignKey("SurvexDirectory", blank=True, null=True,on_delete=models.SET_NULL)
cave = models.ForeignKey('Cave', blank=True, null=True,on_delete=models.SET_NULL)
survexdirectory = models.ForeignKey("SurvexDirectory", blank=True, null=True, on_delete=models.SET_NULL)
cave = models.ForeignKey("Cave", blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
ordering = ('id',)
ordering = ("id",)
# Don't change from the default as that breaks troggle webpages and internal referencing!
# def __str__(self):
# return "[SurvexFile:"+str(self.path) + "-" + str(self.survexdirectory) + "-" + str(self.cave)+"]"
# return "[SurvexFile:"+str(self.path) + "-" + str(self.survexdirectory) + "-" + str(self.cave)+"]"
def exists(self):
fname = os.path.join(settings.SURVEX_DATA, self.path + ".svx")
return os.path.isfile(fname)
def OpenFile(self):
fname = os.path.join(settings.SURVEX_DATA, self.path + ".svx")
return open(fname)
def SetDirectory(self):
dirpath = os.path.split(self.path)[0]
# pointless search every time we import a survex file if we know there are no duplicates..
@ -60,24 +64,25 @@ class SurvexFile(models.Model):
survexdirectory.save()
self.survexdirectory = survexdirectory
self.save()
def __str__(self):
return self.path
class SurvexStationLookUpManager(models.Manager):
def lookup(self, name):
blocknames, sep, stationname = name.rpartition(".")
return self.get(block = SurvexBlock.objects.lookup(blocknames),
name__iexact = stationname)
return self.get(block=SurvexBlock.objects.lookup(blocknames), name__iexact=stationname)
class SurvexStation(models.Model):
name = models.CharField(max_length=100)
block = models.ForeignKey('SurvexBlock', null=True,on_delete=models.SET_NULL)
name = models.CharField(max_length=100)
block = models.ForeignKey("SurvexBlock", null=True, on_delete=models.SET_NULL)
objects = SurvexStationLookUpManager()
x = models.FloatField(blank=True, null=True)
y = models.FloatField(blank=True, null=True)
z = models.FloatField(blank=True, null=True)
def path(self):
r = self.name
b = self.block
@ -90,16 +95,19 @@ class SurvexStation(models.Model):
return r
class Meta:
ordering = ('id',)
ordering = ("id",)
def __str__(self):
return self.name and str(self.name) or 'no name'
return self.name and str(self.name) or "no name"
#
# Single SurvexBlock
#
# Single SurvexBlock
#
class SurvexBlockLookUpManager(models.Manager):
"""Don't know what this does, suspect it is part of the Django admin
system"""
def lookup(self, name):
if name == "":
blocknames = []
@ -110,38 +118,39 @@ class SurvexBlockLookUpManager(models.Manager):
block = SurvexBlock.objects.get(parent=block, name__iexact=blockname)
return block
class SurvexBlock(models.Model):
"""One begin..end block within a survex file. The basic element of a survey trip.
"""
"""One begin..end block within a survex file. The basic element of a survey trip."""
objects = SurvexBlockLookUpManager()
name = models.CharField(max_length=100)
title = models.CharField(max_length=200)
parent = models.ForeignKey('SurvexBlock', blank=True, null=True,on_delete=models.SET_NULL)
cave = models.ForeignKey('Cave', blank=True, null=True,on_delete=models.SET_NULL)
date = models.DateField(blank=True, null=True)
expedition = models.ForeignKey('Expedition', blank=True, null=True,on_delete=models.SET_NULL)
survexfile = models.ForeignKey("SurvexFile", blank=True, null=True,on_delete=models.SET_NULL)
survexpath = models.CharField(max_length=200) # the path for the survex stations
scanswallet = models.ForeignKey("Wallet", null=True,on_delete=models.SET_NULL) # only ONE wallet per block. The most recent seen overwites.. ugh.
legsall = models.IntegerField(null=True) # summary data for this block
name = models.CharField(max_length=100)
title = models.CharField(max_length=200)
parent = models.ForeignKey("SurvexBlock", blank=True, null=True, on_delete=models.SET_NULL)
cave = models.ForeignKey("Cave", blank=True, null=True, on_delete=models.SET_NULL)
date = models.DateField(blank=True, null=True)
expedition = models.ForeignKey("Expedition", blank=True, null=True, on_delete=models.SET_NULL)
survexfile = models.ForeignKey("SurvexFile", blank=True, null=True, on_delete=models.SET_NULL)
survexpath = models.CharField(max_length=200) # the path for the survex stations
scanswallet = models.ForeignKey(
"Wallet", null=True, on_delete=models.SET_NULL
) # only ONE wallet per block. The most recent seen overwites.. ugh.
legsall = models.IntegerField(null=True) # summary data for this block
legslength = models.FloatField(null=True)
class Meta:
ordering = ('id',)
ordering = ("id",)
def __str__(self):
return "[SurvexBlock:"+ str(self.name) + "-path:" + \
str(self.survexpath) + "-cave:" + \
str(self.cave) + "]"
def __str__(self):
return self.name and str(self.name) or 'no name'
return "[SurvexBlock:" + str(self.name) + "-path:" + str(self.survexpath) + "-cave:" + str(self.cave) + "]"
def isSurvexBlock(self): # Function used in templates
def __str__(self):
return self.name and str(self.name) or "no name"
def isSurvexBlock(self): # Function used in templates
return True
def DayIndex(self):
@ -152,51 +161,56 @@ class SurvexBlock(models.Model):
if index not in range(0, mx):
print(f"DayIndex: More than {mx-1} SurvexBlock items on one day '{index}' {self}")
index = 0
#return list(self.survexblock_set.all()).index(self)
# return list(self.survexblock_set.all()).index(self)
return index
class SurvexPersonRole(models.Model):
survexblock = models.ForeignKey('SurvexBlock',on_delete=models.CASCADE)
# increasing levels of precision, Surely we only need survexblock and person now that we have no link to a logbook entry?
personname = models.CharField(max_length=100)
person = models.ForeignKey('Person', blank=True, null=True,on_delete=models.SET_NULL)
personexpedition = models.ForeignKey('PersonExpedition', blank=True, null=True,on_delete=models.SET_NULL)
survexblock = models.ForeignKey("SurvexBlock", on_delete=models.CASCADE)
# increasing levels of precision, Surely we only need survexblock and person now that we have no link to a logbook entry?
personname = models.CharField(max_length=100)
person = models.ForeignKey("Person", blank=True, null=True, on_delete=models.SET_NULL)
personexpedition = models.ForeignKey("PersonExpedition", blank=True, null=True, on_delete=models.SET_NULL)
# expeditionday = models.ForeignKey("ExpeditionDay", null=True,on_delete=models.SET_NULL)
def __str__(self):
return str(self.personname) + " - " + str(self.survexblock)
return str(self.personname) + " - " + str(self.survexblock)
class SingleScan(models.Model):
"""A single file holding an image. Could be raw notes, an elevation plot or whatever
"""
ffile = models.CharField(max_length=200)
name = models.CharField(max_length=200)
wallet = models.ForeignKey("Wallet", null=True,on_delete=models.SET_NULL)
"""A single file holding an image. Could be raw notes, an elevation plot or whatever"""
ffile = models.CharField(max_length=200)
name = models.CharField(max_length=200)
wallet = models.ForeignKey("Wallet", null=True, on_delete=models.SET_NULL)
class Meta:
ordering = ('name',)
ordering = ("name",)
def get_absolute_url(self):
return urljoin(settings.URL_ROOT, reverse('scansingle', kwargs={"path":re.sub("#", "%23", self.wallet.walletname), "file":self.name}))
return urljoin(
settings.URL_ROOT,
reverse("scansingle", kwargs={"path": re.sub("#", "%23", self.wallet.walletname), "file": self.name}),
)
def __str__(self):
return "Scan Image: " + str(self.name) + " in " + str(self.wallet)
class DrawingFile(models.Model):
"""A file holding a Therion (several types) or a Tunnel drawing
"""
dwgpath = models.CharField(max_length=200)
dwgname = models.CharField(max_length=200)
dwgwallets = models.ManyToManyField("Wallet") # implicitly links via folders to scans to SVX files
scans = models.ManyToManyField("SingleScan") # implicitly links via scans to SVX files
dwgcontains = models.ManyToManyField("DrawingFile") # case when its a frame type
filesize = models.IntegerField(default=0)
npaths = models.IntegerField(default=0)
survexfiles = models.ManyToManyField("SurvexFile") # direct link to SVX files - not populated yet
"""A file holding a Therion (several types) or a Tunnel drawing"""
dwgpath = models.CharField(max_length=200)
dwgname = models.CharField(max_length=200)
dwgwallets = models.ManyToManyField("Wallet") # implicitly links via folders to scans to SVX files
scans = models.ManyToManyField("SingleScan") # implicitly links via scans to SVX files
dwgcontains = models.ManyToManyField("DrawingFile") # case when its a frame type
filesize = models.IntegerField(default=0)
npaths = models.IntegerField(default=0)
survexfiles = models.ManyToManyField("SurvexFile") # direct link to SVX files - not populated yet
class Meta:
ordering = ('dwgpath',)
ordering = ("dwgpath",)
def __str__(self):
return "Drawing File: " + str(self.dwgname) + " (" + str(self.filesize) + " bytes)"
return "Drawing File: " + str(self.dwgname) + " (" + str(self.filesize) + " bytes)"

View File

@ -7,7 +7,7 @@ from decimal import Decimal, getcontext
from subprocess import call
from urllib.parse import urljoin
getcontext().prec=2 #use 2 significant figures for decimal calculations
getcontext().prec = 2 # use 2 significant figures for decimal calculations
from django.conf import settings
from django.contrib import admin
@ -28,11 +28,13 @@ the django Object Relational Mapping (ORM).
There are more subclasses define in models_caves.py models_survex.py etc.
"""
class TroggleModel(models.Model):
"""This class is for adding fields and methods which all of our models will have.
"""
"""This class is for adding fields and methods which all of our models will have."""
new_since_parsing = models.BooleanField(default=False, editable=False)
non_public = models.BooleanField(default=False)
def object_name(self):
return self._meta.object_name
@ -42,128 +44,135 @@ class TroggleModel(models.Model):
class Meta:
abstract = True
class DataIssue(TroggleModel):
"""When importing cave data any validation problems produce a message which is
recorded as a DataIssue. The django admin system automatically prodiuces a page listing
"""When importing cave data any validation problems produce a message which is
recorded as a DataIssue. The django admin system automatically prodiuces a page listing
these at /admin/core/dataissue/
This is a use of the NOTIFICATION pattern:
This is a use of the NOTIFICATION pattern:
https://martinfowler.com/eaaDev/Notification.html
We have replaced all assertions in the code with messages and local fix-ups or skips:
https://martinfowler.com/articles/replaceThrowWithNotification.html
See also the use of stash_data_issue() & store_data_issues() in parsers/survex.py which defer writing to the database until the end of the import.
"""
date = models.DateTimeField(auto_now_add=True, blank=True)
parser = models.CharField(max_length=50, blank=True, null=True)
message = models.CharField(max_length=800, blank=True, null=True)
url = models.CharField(max_length=300, blank=True, null=True) # link to offending object
url = models.CharField(max_length=300, blank=True, null=True) # link to offending object
class Meta:
ordering = ['date']
ordering = ["date"]
def __str__(self):
return f"{self.parser} - {self.message}"
#
#
# single Expedition, usually seen by year
#
class Expedition(TroggleModel):
year = models.CharField(max_length=20, unique=True)
name = models.CharField(max_length=100)
year = models.CharField(max_length=20, unique=True)
name = models.CharField(max_length=100)
logbookfile = models.CharField(max_length=100, blank=True, null=True)
def __str__(self):
return self.year
class Meta:
ordering = ('-year',)
get_latest_by = 'year'
ordering = ("-year",)
get_latest_by = "year"
def get_absolute_url(self):
return urljoin(settings.URL_ROOT, reverse('expedition', args=[self.year]))
return urljoin(settings.URL_ROOT, reverse("expedition", args=[self.year]))
# class ExpeditionDay(TroggleModel):
# """Exists only on Expedition now. Removed links from logbookentry, personlogentry, survex stuff etc.
# """
# expedition = models.ForeignKey("Expedition",on_delete=models.CASCADE)
# date = models.DateField()
# """Exists only on Expedition now. Removed links from logbookentry, personlogentry, survex stuff etc.
# """
# expedition = models.ForeignKey("Expedition",on_delete=models.CASCADE)
# date = models.DateField()
# class Meta:
# ordering = ('date',)
# class Meta:
# ordering = ('date',)
class Person(TroggleModel):
"""single Person, can go on many years
"""
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
fullname = models.CharField(max_length=200)
nickname = models.CharField(max_length=200)
is_vfho = models.BooleanField(help_text="VFHO is the Vereines f&uuml;r H&ouml;hlenkunde in Obersteier, a nearby Austrian caving club.", default=False)
mug_shot = models.CharField(max_length=100, blank=True,null=True)
blurb = models.TextField(blank=True,null=True)
orderref = models.CharField(max_length=200) # for alphabetic
"""single Person, can go on many years"""
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
fullname = models.CharField(max_length=200)
nickname = models.CharField(max_length=200)
is_vfho = models.BooleanField(
help_text="VFHO is the Vereines f&uuml;r H&ouml;hlenkunde in Obersteier, a nearby Austrian caving club.",
default=False,
)
mug_shot = models.CharField(max_length=100, blank=True, null=True)
blurb = models.TextField(blank=True, null=True)
orderref = models.CharField(max_length=200) # for alphabetic
def get_absolute_url(self):
return urljoin(settings.URL_ROOT,reverse('person',kwargs={'first_name':self.first_name,'last_name':self.last_name}))
return urljoin(
settings.URL_ROOT, reverse("person", kwargs={"first_name": self.first_name, "last_name": self.last_name})
)
class Meta:
verbose_name_plural = "People"
ordering = ('orderref',) # "Wookey" makes too complex for: ('last_name', 'first_name')
ordering = ("orderref",) # "Wookey" makes too complex for: ('last_name', 'first_name')
def __str__(self):
if self.last_name:
return f"{self.first_name} {self.last_name}"
return self.first_name
def notability(self):
"""This is actually recency: all recent cavers, weighted by number of expos
"""
"""This is actually recency: all recent cavers, weighted by number of expos"""
notability = Decimal(0)
max_expo_val = 0
max_expo_year = Expedition.objects.all().aggregate(models.Max('year'))
max_expo_val = int(max_expo_year['year__max']) + 1
max_expo_year = Expedition.objects.all().aggregate(models.Max("year"))
max_expo_val = int(max_expo_year["year__max"]) + 1
for personexpedition in self.personexpedition_set.all():
if not personexpedition.is_guest:
if not personexpedition.is_guest:
notability += Decimal(1) / (max_expo_val - int(personexpedition.expedition.year))
return notability
def bisnotable(self):
"""Boolean: is this person notable?
"""
return self.notability() > Decimal(1)/Decimal(3)
"""Boolean: is this person notable?"""
return self.notability() > Decimal(1) / Decimal(3)
def surveyedleglength(self):
return sum([personexpedition.surveyedleglength() for personexpedition in self.personexpedition_set.all()])
return sum([personexpedition.surveyedleglength() for personexpedition in self.personexpedition_set.all()])
def first(self):
return self.personexpedition_set.order_by('-expedition')[0]
return self.personexpedition_set.order_by("-expedition")[0]
def last(self):
return self.personexpedition_set.order_by('expedition')[0]
return self.personexpedition_set.order_by("expedition")[0]
class PersonExpedition(TroggleModel):
"""Person's attendance to one Expo
"""
expedition = models.ForeignKey(Expedition,on_delete=models.CASCADE)
person = models.ForeignKey(Person,on_delete=models.CASCADE)
slugfield = models.SlugField(max_length=50,blank=True, null=True) # 2022 to be used in future
"""Person's attendance to one Expo"""
is_guest = models.BooleanField(default=False)
nickname = models.CharField(max_length=100,blank=True, null=True) # removbe this
expedition = models.ForeignKey(Expedition, on_delete=models.CASCADE)
person = models.ForeignKey(Person, on_delete=models.CASCADE)
slugfield = models.SlugField(max_length=50, blank=True, null=True) # 2022 to be used in future
is_guest = models.BooleanField(default=False)
nickname = models.CharField(max_length=100, blank=True, null=True) # removbe this
class Meta:
ordering = ('-expedition',)
#order_with_respect_to = 'expedition'
ordering = ("-expedition",)
# order_with_respect_to = 'expedition'
def __str__(self):
return f"{self.person}: ({self.expedition})"
#why is the below a function in personexpedition, rather than in person? - AC 14 Feb 09
# why is the below a function in personexpedition, rather than in person? - AC 14 Feb 09
def name(self):
if self.nickname:
return f"{self.person.first_name} ({self.nickname}) {self.person.last_name}"
@ -172,12 +181,19 @@ class PersonExpedition(TroggleModel):
return self.person.first_name
def get_absolute_url(self):
return urljoin(settings.URL_ROOT, reverse('personexpedition',kwargs={'first_name':self.person.first_name,'last_name':self.person.last_name,'year':self.expedition.year}))
return urljoin(
settings.URL_ROOT,
reverse(
"personexpedition",
kwargs={
"first_name": self.person.first_name,
"last_name": self.person.last_name,
"year": self.expedition.year,
},
),
)
def surveyedleglength(self):
"""Survey length for this person on all survex trips on this expedition
"""
survexblocks = [personrole.survexblock for personrole in self.survexpersonrole_set.all() ]
return sum([survexblock.legslength for survexblock in set(survexblocks)])
"""Survey length for this person on all survex trips on this expedition"""
survexblocks = [personrole.survexblock for personrole in self.survexpersonrole_set.all()]
return sum([survexblock.legslength for survexblock in set(survexblocks)])

View File

@ -11,40 +11,41 @@ from django.conf import settings
from django.db import models
from django.urls import reverse
#from troggle.core.models.survex import SurvexBlock
# from troggle.core.models.survex import SurvexBlock
# from troggle.core.models.troggle import DataIssue # circular import. Hmm
class Wallet(models.Model):
'''We do not keep the JSON values in the database, we query them afresh each time,
"""We do not keep the JSON values in the database, we query them afresh each time,
but we will change this when we need to do a Django query on e.g. personame
'''
fpath = models.CharField(max_length=200)
walletname = models.CharField(max_length=200)
walletdate = models.DateField(blank=True, null=True)
walletyear = models.DateField(blank=True, null=True)
"""
fpath = models.CharField(max_length=200)
walletname = models.CharField(max_length=200)
walletdate = models.DateField(blank=True, null=True)
walletyear = models.DateField(blank=True, null=True)
class Meta:
ordering = ('walletname',)
ordering = ("walletname",)
def get_absolute_url(self):
return urljoin(settings.URL_ROOT, reverse('singlewallet', kwargs={"path":re.sub("#", "%23", self.walletname)}))
return urljoin(settings.URL_ROOT, reverse("singlewallet", kwargs={"path": re.sub("#", "%23", self.walletname)}))
def get_json(self):
"""Read the JSON file for the wallet and do stuff
"""
#jsonfile = Path(self.fpath, 'contents.json')
"""Read the JSON file for the wallet and do stuff"""
# jsonfile = Path(self.fpath, 'contents.json')
# Get from git repo instead
# :drawings: walletjson/2022/2022#01/contents.json
# fpath = /mnt/d/EXPO/expofiles/surveyscans/1999/1999#02
fp = Path(self.fpath)
wname = fp.name
wyear = fp.parent.name
wurl = f"/scanupload/{self.walletname}" # .replace('#', ':')
wurl = f"/scanupload/{self.walletname}" # .replace('#', ':')
jsonfile = Path(settings.DRAWINGS_DATA, "walletjson") / wyear / wname / "contents.json"
if not Path(jsonfile).is_file():
#print(f'{jsonfile} is not a file')
# print(f'{jsonfile} is not a file')
return None
else:
with open(jsonfile) as json_f:
@ -52,65 +53,63 @@ class Wallet(models.Model):
waldata = json.load(json_f)
except:
message = f"! {str(self.walletname)} Failed to load {jsonfile} JSON file"
#print(message)
# print(message)
raise
if waldata["date"]:
datestr = waldata["date"].replace('.','-')
datestr = waldata["date"].replace(".", "-")
try:
thisdate = datetime.date.fromisoformat(datestr)
except ValueError:
# probably a single digit day number. HACKUS MAXIMUS.
# clearly we need to fix this when we first import date strings..
datestr = datestr[:-1] + '0' + datestr[-1]
print(f' - {datestr=} ')
datestr = datestr[:-1] + "0" + datestr[-1]
print(f" - {datestr=} ")
try:
thisdate = datetime.date.fromisoformat(datestr)
self.walletdate = thisdate
self.walletdate = thisdate
self.save()
try:
waldata["date"] = thisdate.isoformat()
except:
message = f"! {str(self.walletname)} Date formatting failure {thisdate}. Failed to load from {jsonfile} JSON file"
from troggle.core.models.troggle import \
DataIssue
DataIssue.objects.update_or_create(parser='scans', message=message, url=wurl)
from troggle.core.models.troggle import DataIssue
DataIssue.objects.update_or_create(parser="scans", message=message, url=wurl)
except:
message = f"! {str(self.walletname)} Date format not ISO {datestr}. Failed to load from {jsonfile} JSON file"
from troggle.core.models.troggle import DataIssue
DataIssue.objects.update_or_create(parser='scans', message=message, url=wurl)
DataIssue.objects.update_or_create(parser="scans", message=message, url=wurl)
return waldata
def year(self):
'''This gets the year syntactically without opening and reading the JSON
'''
def year(self):
"""This gets the year syntactically without opening and reading the JSON"""
if len(self.walletname) < 5:
return None
return None
if self.walletname[4] != "#":
return None
return None
year = int(self.walletname[0:4])
if year < 1975 or year > 2050:
return None
return None
else:
self.walletyear = datetime.date(year, 1, 1)
self.walletyear = datetime.date(year, 1, 1)
self.save()
return str(year)
# Yes this is horribly, horribly inefficient, esp. for a page that have date, people and cave in it
def date(self):
"""Reads all the JSON data just to get the JSNON date.
"""
"""Reads all the JSON data just to get the JSNON date."""
if self.walletdate:
return self.walletdate
if not self.get_json():
return None
jsondata = self.get_json() # use walrus operator?
jsondata = self.get_json() # use walrus operator?
datestr = jsondata["date"]
if not datestr:
return None
else:
datestr = datestr.replace('.','-')
datestr = datestr.replace(".", "-")
try:
samedate = datetime.date.fromisoformat(datestr)
self.walletdate = samedate.isoformat()
@ -122,13 +121,13 @@ class Wallet(models.Model):
samedate = None
self.save()
return self.walletdate
def people(self):
if not self.get_json():
return None
jsondata = self.get_json()
return jsondata["people"]
def cave(self):
if not self.get_json():
return None
@ -142,9 +141,8 @@ class Wallet(models.Model):
return jsondata["name"]
def get_fnames(self):
'''Filenames without the suffix, i.e. without the ".jpg"
'''
dirpath = Path(settings.SCANS_ROOT, self.fpath) # does nowt as fpath is a rooted path already
'''Filenames without the suffix, i.e. without the ".jpg"'''
dirpath = Path(settings.SCANS_ROOT, self.fpath) # does nowt as fpath is a rooted path already
files = []
if not self.fpath:
files.append(f"Incorrect path to wallet contents: '{self.fpath}'")
@ -163,19 +161,18 @@ class Wallet(models.Model):
files.append("FileNotFoundError")
pass
return files
def fixsurvextick(self, tick):
blocks = self.survexblock_set.all()
#blocks = SurvexBlock.objects.filter(scanswallet = self)
# blocks = SurvexBlock.objects.filter(scanswallet = self)
result = tick
for b in blocks:
if b.survexfile: # if any exist in db, no check for validity or a real file. Refactor.
result = "seagreen" # slightly different shade of green
for b in blocks:
if b.survexfile: # if any exist in db, no check for validity or a real file. Refactor.
result = "seagreen" # slightly different shade of green
return result
def get_ticks(self):
"""Reads all the JSON data and sets the colour of the completion tick for each condition
"""
"""Reads all the JSON data and sets the colour of the completion tick for each condition"""
ticks = {}
waldata = self.get_json()
if not waldata:
@ -189,7 +186,7 @@ class Wallet(models.Model):
ticks["W"] = "black"
return ticks
ticks = {}
# Initially, are there any required survex files present ?
# Note that we can't set the survexblock here on the wallet as that info is only available while parsing the survex file
survexok = "red"
@ -199,14 +196,14 @@ class Wallet(models.Model):
ticks["S"] = "green"
else:
if waldata["survex file"]:
if not type(waldata["survex file"])==list: # a string also is a sequence type, so do it this way
if not type(waldata["survex file"]) == list: # a string also is a sequence type, so do it this way
waldata["survex file"] = [waldata["survex file"]]
ngood = 0
nbad = 0
ticks["S"] = "purple"
for sx in waldata["survex file"]:
#this logic appears in several places, inc uploads.py). Refactor.
if sx !="":
# this logic appears in several places, inc uploads.py). Refactor.
if sx != "":
if Path(sx).suffix.lower() != ".svx":
sx = sx + ".svx"
if (Path(settings.SURVEX_DATA) / sx).is_file():
@ -221,9 +218,9 @@ class Wallet(models.Model):
ticks["S"] = "red"
else:
ticks["S"] = "black"
# Cave Description
if waldata["description written"]:
# Cave Description
if waldata["description written"]:
ticks["C"] = "green"
else:
ticks["C"] = survexok
@ -235,10 +232,9 @@ class Wallet(models.Model):
if not self.year():
ticks["Q"] = "darkgrey"
else:
if int(self.year()) < 2015:
if int(self.year()) < 2015:
ticks["Q"] = "lightgrey"
# Notes, Plan, Elevation; Tunnel
if waldata["electronic survey"]:
ticks["N"] = "green"
@ -246,9 +242,9 @@ class Wallet(models.Model):
ticks["E"] = "green"
ticks["T"] = "green"
else:
files = self.get_fnames()
# Notes required
notes_scanned = reduce(operator.or_, [f.startswith("note") for f in files], False)
notes_scanned = reduce(operator.or_, [f.endswith("notes") for f in files], notes_scanned)
@ -281,15 +277,14 @@ class Wallet(models.Model):
ticks["T"] = "red"
else:
ticks["T"] = "green"
# Website
if waldata["website updated"]:
ticks["W"] = "green"
else:
ticks["W"] = "red"
return ticks
def __str__(self):
return "[" + str(self.walletname) + " (Wallet)]"

View File

@ -3,7 +3,7 @@ from django.utils.safestring import mark_safe
register = template.Library()
@register.filter()
def link(value):
return mark_safe(f"<a href='{value.get_absolute_url()}'>"+str(value)+"</a>")
return mark_safe(f"<a href='{value.get_absolute_url()}'>" + str(value) + "</a>")

View File

@ -10,7 +10,7 @@ from decimal import Decimal, getcontext
from pathlib import Path
from urllib.parse import urljoin
getcontext().prec=2 #use 2 significant figures for decimal calculations
getcontext().prec = 2 # use 2 significant figures for decimal calculations
from django.conf import settings
from django.contrib import admin
@ -22,7 +22,7 @@ from django.urls import reverse
import settings
'''This file declares TROG a globally visible object for caches.
"""This file declares TROG a globally visible object for caches.
TROG is a dictionary holding globally visible indexes and cache functions.
It is a Global Object, see https://python-patterns.guide/python/module-globals/
@ -38,78 +38,79 @@ This needs to be in a multi-user database with transactions. However it is
useful when doing a data import with databaseReset.py as that has a single
thread.
'''
"""
TROG = {
'pagecache' : {
'expedition' : {}
},
'caves' : {
'gcavelookup' : {},
'gcavecount' : {}
}
}
TROG = {"pagecache": {"expedition": {}}, "caves": {"gcavelookup": {}, "gcavecount": {}}}
# This is module-level executable. This is a Bad Thing. Especially when it touches the file system.
try:
logging.basicConfig(level=logging.DEBUG,
filename=settings.LOGFILE,
filemode='w')
logging.basicConfig(level=logging.DEBUG, filename=settings.LOGFILE, filemode="w")
except:
# Opening of file for writing is going to fail currently, so decide it doesn't matter for now
# Opening of file for writing is going to fail currently, so decide it doesn't matter for now
pass
def get_process_memory():
usage=resource.getrusage(resource.RUSAGE_SELF)
return usage[2]/1024.0
usage = resource.getrusage(resource.RUSAGE_SELF)
return usage[2] / 1024.0
def chaosmonkey(n):
'''returns True once every n calls - randomly'''
if random.randrange(0,n) != 0:
"""returns True once every n calls - randomly"""
if random.randrange(0, n) != 0:
return False
# print("CHAOS strikes !", file=sys.stderr)
return True
def only_commit(fname, message):
'''Only used to commit a survex file edited and saved in view/survex.py
'''
"""Only used to commit a survex file edited and saved in view/survex.py"""
git = settings.GIT
cwd = fname.parent
filename = fname.name
#print(f'{fname=} ')
# print(f'{fname=} ')
try:
cp_add = subprocess.run([git, "add", filename], cwd=cwd, capture_output=True, text=True)
if cp_add.returncode != 0:
msgdata = f'Ask a nerd to fix this problem in only_commit().\n--{cp_add.stderr}\n--{cp_add.stdout}\n--return code:{str(cp_add.returncode)}'
raise WriteAndCommitError(f'CANNOT git ADD on server for this file {filename}. Edits saved but not added to git.\n\n' + msgdata)
msgdata = f"Ask a nerd to fix this problem in only_commit().\n--{cp_add.stderr}\n--{cp_add.stdout}\n--return code:{str(cp_add.returncode)}"
raise WriteAndCommitError(
f"CANNOT git ADD on server for this file {filename}. Edits saved but not added to git.\n\n" + msgdata
)
cp_commit = subprocess.run([git, "commit", "-m", message], cwd=cwd, capture_output=True, text=True)
# This produces return code = 1 if it commits OK, but when the local repo still needs to be pushed to origin/loser
# which will be the case when running a test troggle system on a development machine
devok_text ='''On branch master
devok_text = """On branch master
Your branch is ahead of 'origin/master' by 1 commit.
(use "git push" to publish your local commits)
nothing to commit, working tree clean
'''
"""
if cp_commit.returncode == 1 and cp_commit.stdout == devok_text:
pass
else:
if cp_commit.returncode != 0 and not cp_commit.stdout.strip().endswith('nothing to commit, working tree clean'):
msgdata = f'--Ask a nerd to fix this problem in only_commit().\n--{cp_commit.stderr}\n--"{cp_commit.stdout}"\n--return code:{str(cp_commit.returncode)}'
if cp_commit.returncode != 0 and not cp_commit.stdout.strip().endswith(
"nothing to commit, working tree clean"
):
msgdata = f'--Ask a nerd to fix this problem in only_commit().\n--{cp_commit.stderr}\n--"{cp_commit.stdout}"\n--return code:{str(cp_commit.returncode)}'
print(msgdata)
raise WriteAndCommitError(f'Error code with git on server for this file {filename}. Edits saved, added to git, but NOT committed.\n\n' + msgdata)
raise WriteAndCommitError(
f"Error code with git on server for this file {filename}. Edits saved, added to git, but NOT committed.\n\n"
+ msgdata
)
except subprocess.SubprocessError:
raise WriteAndCommitError(f'CANNOT git COMMIT on server for this file {filename}. Subprocess error. Edits not saved.\nAsk a nerd to fix this.')
raise WriteAndCommitError(
f"CANNOT git COMMIT on server for this file {filename}. Subprocess error. Edits not saved.\nAsk a nerd to fix this."
)
def write_and_commit(files, message):
"""Writes the content to the filepath and adds and commits the file to git. If this fails, a WriteAndCommitError is raised.
This does not create any needed intermediate folders, which is what we do when writing survex files, so functionality here
is duplicated in only_commit()
These need refactoring
"""
git = settings.GIT
@ -125,46 +126,73 @@ def write_and_commit(files, message):
kwargs = {"encoding": encoding}
else:
mode = "wb"
kwargs = {}
kwargs = {}
try:
with open(filepath, mode, **kwargs) as f:
print(f'WRITING{cwd}---{filename} ')
print(f"WRITING{cwd}---{filename} ")
# as the wsgi process www-data, we have group write-access but are not owner, so cannot chmod.
# os.chmod(filepath, 0o664) # set file permissions to rw-rw-r--
f.write(content)
except PermissionError:
raise WriteAndCommitError(f'CANNOT save this file.\nPERMISSIONS incorrectly set on server for this file {filename}. Ask a nerd to fix this.')
raise WriteAndCommitError(
f"CANNOT save this file.\nPERMISSIONS incorrectly set on server for this file {filename}. Ask a nerd to fix this."
)
cp_diff = subprocess.run([git, "diff", filename], cwd=cwd, capture_output=True, text=True)
if cp_diff.returncode == 0:
cp_add = subprocess.run([git, "add", filename], cwd=cwd, capture_output=True, text=True)
if cp_add.returncode != 0:
msgdata = 'Ask a nerd to fix this.\n\n' + cp_add.stderr + '\n\n' + cp_add.stdout + '\n\nreturn code: ' + str(cp_add.returncode)
raise WriteAndCommitError(f'CANNOT git on server for this file {filename}. Edits saved but not added to git.\n\n' + msgdata)
msgdata = (
"Ask a nerd to fix this.\n\n"
+ cp_add.stderr
+ "\n\n"
+ cp_add.stdout
+ "\n\nreturn code: "
+ str(cp_add.returncode)
)
raise WriteAndCommitError(
f"CANNOT git on server for this file {filename}. Edits saved but not added to git.\n\n"
+ msgdata
)
else:
print(f"No change {filepah}")
cp_commit = subprocess.run([git, "commit", "-m", message], cwd=cwd, capture_output=True, text=True)
cp_status = subprocess.run([git, "status"], cwd=cwd, capture_output=True, text=True)
# This produces return code = 1 if it commits OK, but when the repo still needs to be pushed to origin/expoweb
if cp_status.stdout.split("\n")[-2] != 'nothing to commit, working tree clean':
if cp_status.stdout.split("\n")[-2] != "nothing to commit, working tree clean":
print("FOO: ", cp_status.stdout.split("\n")[-2])
msgdata = 'Ask a nerd to fix this.\n\n' + cp_status.stderr + '\n\n' + cp_status.stdout + '\n\nreturn code: ' + str(cp_status.returncode)
raise WriteAndCommitError(f'Error code with git on server for this file {filename}. Edits saved, added to git, but NOT committed.\n\n' + msgdata)
msgdata = (
"Ask a nerd to fix this.\n\n"
+ cp_status.stderr
+ "\n\n"
+ cp_status.stdout
+ "\n\nreturn code: "
+ str(cp_status.returncode)
)
raise WriteAndCommitError(
f"Error code with git on server for this file {filename}. Edits saved, added to git, but NOT committed.\n\n"
+ msgdata
)
except subprocess.SubprocessError:
raise WriteAndCommitError(f'CANNOT git on server for this file {filename}. Subprocess error. Edits not saved.\nAsk a nerd to fix this.')
raise WriteAndCommitError(
f"CANNOT git on server for this file {filename}. Subprocess error. Edits not saved.\nAsk a nerd to fix this."
)
class WriteAndCommitError(Exception):
"""Exception class for errors writing files and comitting them to git"""
def __init__(self, message):
self.message = message
def __str__(self):
return f'WriteAndCommitError: {self.message}'
return f"WriteAndCommitError: {self.message}"
def writetrogglefile(filepath, filecontent):
'''Commit the new saved file to git
"""Commit the new saved file to git
Callers to cave.writeDataFile() or entrance.writeDataFile() should handle the exception PermissionsError explicitly
'''
"""
# GIT see also core/views/expo.py editexpopage()
# GIT see also core/views/uploads.py dwgupload()
# Called from core/models/caves.py Cave.writeDataFile() Entrance.writeDataFile()
@ -175,41 +203,47 @@ def writetrogglefile(filepath, filecontent):
# as the wsgi process www-data, we have group write-access but are not owner, so cannot chmod.
# do not trap exceptions, pass them up to the view that called this function
print(f'WRITING{cwd}---{filename} ')
print(f"WRITING{cwd}---{filename} ")
with open(filepath, "w") as f:
f.write(filecontent)
#os.chmod(filepath, 0o664) # set file permissions to rw-rw-r--
# os.chmod(filepath, 0o664) # set file permissions to rw-rw-r--
sp = subprocess.run([git, "add", filename], cwd=cwd, capture_output=True, check=True, text=True)
if sp.returncode != 0:
out = sp.stdout
if len(out) > 160:
out = out[:75] + "\n <Long output curtailed>\n" + out[-75:]
print(f'git ADD {cwd}:\n\n' + str(sp.stderr) + '\n\n' + out + '\n\nreturn code: ' + str(sp.returncode))
print(f"git ADD {cwd}:\n\n" + str(sp.stderr) + "\n\n" + out + "\n\nreturn code: " + str(sp.returncode))
sp = subprocess.run([git, "commit", "-m", f'Troggle online: cave or entrance edit -{filename}'], cwd=cwd, capture_output=True, check=True, text=True)
sp = subprocess.run(
[git, "commit", "-m", f"Troggle online: cave or entrance edit -{filename}"],
cwd=cwd,
capture_output=True,
check=True,
text=True,
)
if sp.returncode != 0:
out = sp.stdout
if len(out) > 160:
out = out[:75] + "\n <Long output curtailed>\n" + out[-75:]
print(f'git COMMIT {cwd}:\n\n' + str(sp.stderr) + '\n\n' + out + '\n\nreturn code: ' + str(sp.returncode))
print(f"git COMMIT {cwd}:\n\n" + str(sp.stderr) + "\n\n" + out + "\n\nreturn code: " + str(sp.returncode))
# not catching and re-raising any exceptions yet, inc. the stderr etc.,. We should do that.
def save_carefully(objectType, lookupAttribs={}, nonLookupAttribs={}):
"""Looks up instance using lookupAttribs and carries out the following:
-if instance does not exist in DB: add instance to DB, return (new instance, True)
-if instance exists in DB and was modified using Troggle: do nothing, return (existing instance, False)
-if instance exists in DB and was not modified using Troggle: overwrite instance, return (instance, False)
The checking is accomplished using Django's get_or_create and the new_since_parsing boolean field
defined in core.models.TroggleModel.
We are not using new_since_parsing - it is a fossil from Aaron Curtis's design in 2006. So it is always false.
NOTE: this takes twice as long as simply creating a new object with the given values.
As of Jan.2023 this function is not used anywhere in troggle.
-if instance does not exist in DB: add instance to DB, return (new instance, True)
-if instance exists in DB and was modified using Troggle: do nothing, return (existing instance, False)
-if instance exists in DB and was not modified using Troggle: overwrite instance, return (instance, False)
The checking is accomplished using Django's get_or_create and the new_since_parsing boolean field
defined in core.models.TroggleModel.
We are not using new_since_parsing - it is a fossil from Aaron Curtis's design in 2006. So it is always false.
NOTE: this takes twice as long as simply creating a new object with the given values.
As of Jan.2023 this function is not used anywhere in troggle.
"""
try:
instance, created = objectType.objects.get_or_create(defaults=nonLookupAttribs, **lookupAttribs)
@ -219,7 +253,9 @@ def save_carefully(objectType, lookupAttribs={}, nonLookupAttribs={}):
print(f" !! - lookupAttribs:{lookupAttribs}\n !! - nonLookupAttribs:{nonLookupAttribs}")
raise
if not created and not instance.new_since_parsing:
for k, v in list(nonLookupAttribs.items()): #overwrite the existing attributes from the logbook text (except date and title)
for k, v in list(
nonLookupAttribs.items()
): # overwrite the existing attributes from the logbook text (except date and title)
setattr(instance, k, v)
try:
instance.save()
@ -233,18 +269,17 @@ def save_carefully(objectType, lookupAttribs={}, nonLookupAttribs={}):
except:
msg = f"FAULT getting __str__ for instance with lookupattribs: {lookupAttribs}:"
if created:
logging.info(str(instance) + ' was just added to the database for the first time. \n')
logging.info(str(instance) + " was just added to the database for the first time. \n")
if not created and instance.new_since_parsing:
logging.info(str(instance) + " has been modified using Troggle since parsing, so the current script left it as is. \n")
logging.info(
str(instance) + " has been modified using Troggle since parsing, so the current script left it as is. \n"
)
if not created and not instance.new_since_parsing:
logging.info(" instance:<"+ str(instance) + "> existed in the database unchanged since last parse. It have been overwritten.")
logging.info(
" instance:<"
+ str(instance)
+ "> existed in the database unchanged since last parse. It have been overwritten."
)
return (instance, created)

View File

@ -13,8 +13,8 @@ the decorator mechanism.
https://www.fullstackpython.com/django-contrib-auth-decorators-login-required-examples.html
"""
class login_required_if_public(object):
class login_required_if_public(object):
def __init__(self, f):
if settings.PUBLIC_SITE:
self.f = login_required(f)
@ -26,66 +26,62 @@ class login_required_if_public(object):
# This is copied from CUYC.cuy.website.view.auth
# If we want to do the whole online-email thing, we would also need to copy across the code in these
# imported files and delete what is superfluous.
# If we want to do the whole online-email thing, we would also need to copy across the code in these
# imported files and delete what is superfluous.
# Or we could just load the latest version of django-registration app.
#from cuy.club.models import Member, Message
#from ..forms import WebsiteLoginForm, WebsiteRegisterForm
#from ...common import mail_site_error
#from .generic import user_is_active
# from cuy.club.models import Member, Message
# from ..forms import WebsiteLoginForm, WebsiteRegisterForm
# from ...common import mail_site_error
# from .generic import user_is_active
'''The login and logout functions.
"""The login and logout functions.
This is also where we would manage registration: for people wanting to create and validate their individual
logon accounts/forgottenpassword'''
logon accounts/forgottenpassword"""
############################
# Authentication Functions #
############################
def expologout(request):
login_form = auth_forms.AuthenticationForm()
logout(request)
return render(request, 'login/logout.html', {'form':login_form})
return render(request, "login/logout.html", {"form": login_form})
def expologin(request):
# GET
if not request.method == 'POST':
if not request.method == "POST":
if (not request.user.is_authenticated) or (not request.user.is_active):
return render(request, 'login/index.html', {})
return render(request, "login/index.html", {})
else:
# going to login page when you are already logged in
return render(request, 'tasks.html', {})
return render(request, "tasks.html", {})
# POST
username = request.POST['username']
password = request.POST['password']
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(username=username, password=password)
if user is None:
return render(request, 'login/index.html',
{'invalid': True, 'username':username})
return render(request, "login/index.html", {"invalid": True, "username": username})
if not user.is_active:
return render(request, 'login/enable.html',
{'login_state':'notenabled'})
return render(request, "login/enable.html", {"login_state": "notenabled"})
try:
login(request, user)
# Should do the ?next= stuff here..
return redirect_after_login(request)
except:
return render(request, 'errors/generic.html', {})
return render(request, "errors/generic.html", {})
def redirect_after_login(request):
nxt = request.GET.get("next", None)
if nxt is None:
return redirect(settings.LOGIN_REDIRECT_URL)
elif not is_safe_url(
url=nxt,
allowed_hosts={request.get_host()},
require_https=request.is_secure()):
elif not is_safe_url(url=nxt, allowed_hosts={request.get_host()}, require_https=request.is_secure()):
return redirect(settings.LOGIN_REDIRECT_URL)
else:
return redirect(nxt)

View File

@ -8,18 +8,14 @@ from pathlib import Path
from django import forms
from django.conf import settings
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.http import (HttpResponse, HttpResponseNotFound,
HttpResponseRedirect)
from django.http import HttpResponse, HttpResponseNotFound, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import NoReverseMatch, reverse
import settings
import troggle.settings as settings
from troggle.core.forms import (CaveAndEntranceFormSet, CaveForm, EntranceForm,
EntranceLetterForm)
from troggle.core.models.caves import (Area, Cave, CaveAndEntrance,
Entrance, EntranceSlug,
GetCaveLookup, SurvexStation)
from troggle.core.forms import CaveAndEntranceFormSet, CaveForm, EntranceForm, EntranceLetterForm
from troggle.core.models.caves import Area, Cave, CaveAndEntrance, Entrance, EntranceSlug, GetCaveLookup, SurvexStation
from troggle.core.models.logbooks import CaveSlug, QM
from troggle.core.models.troggle import DataIssue, Expedition
from troggle.core.utils import write_and_commit, writetrogglefile
@ -27,24 +23,25 @@ from troggle.core.views import expo
from .auth import login_required_if_public
'''Manages the complex procedures to assemble a cave description out of the compnoents
"""Manages the complex procedures to assemble a cave description out of the compnoents
Manages the use of cavern to parse survex files to produce 3d and pos files
'''
"""
todo = '''- Fix rendercave() so that CaveView works
todo = """- Fix rendercave() so that CaveView works
- in getCaves() search GCavelookup first, which should raise a MultpleObjectsReturned exception if no duplicates
'''
"""
def getCaves(cave_id):
'''Only gets called if a call to getCave() raises a MultipleObjects exception
"""Only gets called if a call to getCave() raises a MultipleObjects exception
TO DO: search GCavelookup first, which should raise a MultpleObjectsReturned exception if there
are duplicates'''
are duplicates"""
try:
caves = Cave.objects.filter(kataster_number=cave_id)
caveset = set(caves)
Gcavelookup = GetCaveLookup() # dictionary makes strings to Cave objects
Gcavelookup = GetCaveLookup() # dictionary makes strings to Cave objects
if cave_id in Gcavelookup:
caveset.add(Gcavelookup[cave_id])
return list(caveset)
@ -53,45 +50,52 @@ def getCaves(cave_id):
def getCave(cave_id):
'''Returns a cave object when given a cave name or number. It is used by views including cavehref, ent, and qm.
"""Returns a cave object when given a cave name or number. It is used by views including cavehref, ent, and qm.
TO DO: search GCavelookup first, which should raise a MultpleObjectsReturned exception if there
are duplicates'''
are duplicates"""
try:
cave = Cave.objects.get(kataster_number=cave_id)
return cave
except Cave.MultipleObjectsReturned as ex:
raise MultipleObjectsReturned("Duplicate kataster number") from ex # propagate this up
raise MultipleObjectsReturned("Duplicate kataster number") from ex # propagate this up
except Cave.DoesNotExist as ex:
Gcavelookup = GetCaveLookup() # dictionary makes strings to Cave objects
Gcavelookup = GetCaveLookup() # dictionary makes strings to Cave objects
if cave_id in Gcavelookup:
return Gcavelookup[cave_id]
return Gcavelookup[cave_id]
else:
raise ObjectDoesNotExist("No cave found with this identifier in any id field") from ex # propagate this up
raise ObjectDoesNotExist("No cave found with this identifier in any id field") from ex # propagate this up
except:
raise ObjectDoesNotExist("No cave found with this identifier in any id field")
def pad5(x):
return "0" * (5 -len(x.group(0))) + x.group(0)
return "0" * (5 - len(x.group(0))) + x.group(0)
def padnumber(x):
return re.sub("\d+", pad5, x)
return re.sub("\d+", pad5, x)
def numericalcmp(x, y):
return cmp(padnumber(x), padnumber(y))
return cmp(padnumber(x), padnumber(y))
def caveKey(c):
"""This function goes into a lexicogrpahic sort function, and the values are strings,
but we want to sort numberically on kataster number before sorting on unofficial number.
"""
"""
if not c.kataster_number:
return "9999." + c.unofficial_number
else:
if int(c.kataster_number) >= 100:
return "99." + c.kataster_number
if int(c.kataster_number) >= 10:
return "9." + c.kataster_number
return "9." + c.kataster_number
return c.kataster_number
def getnotablecaves():
notablecaves = []
for kataster_number in settings.NOTABLECAVESHREFS:
@ -99,137 +103,146 @@ def getnotablecaves():
cave = Cave.objects.get(kataster_number=kataster_number)
notablecaves.append(cave)
except:
#print(" ! FAILED to get only one cave per kataster_number OR invalid number for: "+kataster_number)
# print(" ! FAILED to get only one cave per kataster_number OR invalid number for: "+kataster_number)
caves = Cave.objects.all().filter(kataster_number=kataster_number)
for c in caves:
#print(c.kataster_number, c.slug())
# print(c.kataster_number, c.slug())
if c.slug() != None:
notablecaves.append(c)
return notablecaves
def caveindex(request):
caves = Cave.objects.all()
caves1623 = list(Cave.objects.filter(area__short_name = "1623"))
caves1626 = list(Cave.objects.filter(area__short_name = "1626"))
caves1623 = list(Cave.objects.filter(area__short_name="1623"))
caves1626 = list(Cave.objects.filter(area__short_name="1626"))
caves1623.sort(key=caveKey)
caves1626.sort(key=caveKey)
return render(request,'caveindex.html', {'caves1623': caves1623, 'caves1626': caves1626, 'notablecaves':getnotablecaves(), 'cavepage': True})
return render(
request,
"caveindex.html",
{"caves1623": caves1623, "caves1626": caves1626, "notablecaves": getnotablecaves(), "cavepage": True},
)
def cave3d(request, cave_id=''):
'''This is used to create a download url in templates/cave.html if anyone wants to download the .3d file
def cave3d(request, cave_id=""):
"""This is used to create a download url in templates/cave.html if anyone wants to download the .3d file
The caller template tries kataster first, then unofficial_number if that kataster number does not exist
but only if Cave.survex_file is non-empty
But the template file cave.html has its own ideas about the name of the file and thus the href. Ouch.
/cave/3d/<cave_id>
'''
"""
try:
cave = getCave(cave_id)
cave = getCave(cave_id)
except ObjectDoesNotExist:
return HttpResponseNotFound
except Cave.MultipleObjectsReturned:
# But only one might have survex data? So scan and return the first that works.
caves = getCaves(cave_id)
for c in caves:
if c.survex_file:
except Cave.MultipleObjectsReturned:
# But only one might have survex data? So scan and return the first that works.
caves = getCaves(cave_id)
for c in caves:
if c.survex_file:
# exists, but may not be a valid file path to a valid .svx file in the Loser repo
return file3d(request, c, c.slug)
else:
return file3d(request, cave, cave_id)
def file3d(request, cave, cave_id):
'''Produces a .3d file directly for download.
"""Produces a .3d file directly for download.
survex_file should be in valid path format 'caves-1623/264/264.svx' but it might be mis-entered as simply '2012-ns-10.svx'
Also the cave.survex_file may well not match the cave description path:
Also the cave.survex_file may well not match the cave description path:
e.g. it might be to the whole system 'smk-system.svx' instead of just for the specific cave.
- If the expected .3d file corresponding to cave.survex_file is present, return it.
- If the cave.survex_file exists, generate the 3d file, cache it and return it
- Use the cave_id to guess what the 3d file might be and, if in the cache, return it
- Use the cave_id to guess what the .svx file might be and generate the .3d file and return it
- (Use the incomplete cave.survex_file and a guess at the missing directories to guess the real .svx file location ?)
'''
- (Use the incomplete cave.survex_file and a guess at the missing directories to guess the real .svx file location ?)
"""
def runcavern(survexpath):
'''This has not yet been properly updated with respect to putting the .3d file in the same folder as the .svx filse
as done in runcavern3d() in parsers/survex.py
"""This has not yet been properly updated with respect to putting the .3d file in the same folder as the .svx filse
as done in runcavern3d() in parsers/survex.py
Needs testing.
'''
#print(" - Regenerating cavern .log and .3d for '{}'".format(survexpath))
"""
# print(" - Regenerating cavern .log and .3d for '{}'".format(survexpath))
if not survexpath.is_file():
#print(" - - Regeneration ABORT\n - - from '{}'".format(survexpath))
# print(" - - Regeneration ABORT\n - - from '{}'".format(survexpath))
pass
try:
completed_process = subprocess.run([settings.CAVERN, "--log", f"--output={settings.SURVEX_DATA}", f"{survexpath}"])
completed_process = subprocess.run(
[settings.CAVERN, "--log", f"--output={settings.SURVEX_DATA}", f"{survexpath}"]
)
except OSError as ex:
# propagate this to caller.
# propagate this to caller.
raise OSError(completed_process.stdout) from ex
op3d = (Path(settings.SURVEX_DATA) / Path(survexpath).name).with_suffix('.3d')
op3dlog = Path(op3d.with_suffix('.log'))
op3d = (Path(settings.SURVEX_DATA) / Path(survexpath).name).with_suffix(".3d")
op3dlog = Path(op3d.with_suffix(".log"))
if not op3d.is_file():
print(f" - - Regeneration FAILED\n - - from '{survexpath}'\n - - to '{op3d}'")
print(" - - Regeneration stdout: ", completed_process.stdout)
print(" - - Regeneration cavern log output: ", op3dlog.read_text())
def return3d(threedpath):
if threedpath.is_file():
response = HttpResponse(content=open(threedpath, 'rb'), content_type='application/3d')
response['Content-Disposition'] = f'attachment; filename={threedpath.name}'
response = HttpResponse(content=open(threedpath, "rb"), content_type="application/3d")
response["Content-Disposition"] = f"attachment; filename={threedpath.name}"
return response
else:
message = f'<h1>Path provided does not correspond to any actual 3d file.</h1><p>path: "{threedpath}"'
#print(message)
return HttpResponseNotFound(message)
survexname = Path(cave.survex_file).name # removes directories
# print(message)
return HttpResponseNotFound(message)
survexname = Path(cave.survex_file).name # removes directories
survexpath = Path(settings.SURVEX_DATA, cave.survex_file)
threedname = Path(survexname).with_suffix('.3d') # removes .svx, replaces with .3d
threedpath = Path(settings.SURVEX_DATA, threedname)
threedname = Path(survexname).with_suffix(".3d") # removes .svx, replaces with .3d
threedpath = Path(settings.SURVEX_DATA, threedname)
threedcachedir = Path(settings.SURVEX_DATA)
# These if statements need refactoring more cleanly
if cave.survex_file:
#print(" - cave.survex_file '{}'".format(cave.survex_file))
# print(" - cave.survex_file '{}'".format(cave.survex_file))
if threedpath.is_file():
#print(" - threedpath '{}'".format(threedpath))
# print(" - threedpath '{}'".format(threedpath))
# possible error here as several .svx files of same names in different directories will overwrite in /3d/
if survexpath.is_file():
if os.path.getmtime(survexpath) > os.path.getmtime(threedpath):
runcavern(survexpath)
return return3d(threedpath)
else:
#print(" - - survexpath '{}'".format(survexpath))
# print(" - - survexpath '{}'".format(survexpath))
if survexpath.is_file():
#print(" - - - survexpath '{}'".format(survexpath))
# print(" - - - survexpath '{}'".format(survexpath))
runcavern(survexpath)
return return3d(threedpath)
# Get here if cave.survex_file was set but did not correspond to a valid svx file
if survexpath.is_file():
# a file, but invalid format
message=f'<h1>File is not valid .svx format.</h1><p>Could not generate 3d file from "{survexpath}"'
message = f'<h1>File is not valid .svx format.</h1><p>Could not generate 3d file from "{survexpath}"'
else:
# we could try to guess that 'caves-1623/' is missing,... nah.
message = f'<h1>Path provided does not correspond to any actual file.</h1><p>path: "{survexpath}"'
return HttpResponseNotFound(message)
def rendercave(request, cave, slug, cave_id=''):
'''Gets the data and files ready and then triggers Django to render the template.
return HttpResponseNotFound(message)
def rendercave(request, cave, slug, cave_id=""):
"""Gets the data and files ready and then triggers Django to render the template.
The resulting html contains urls which are dispatched independently, e.g. the 'download' link
'''
"""
# print(" ! rendercave:'{}' START slug:'{}' cave_id:'{}'".format(cave, slug, cave_id))
if cave.non_public and settings.PUBLIC_SITE and not request.user.is_authenticated:
return render(request, 'nonpublic.html', {'instance': cave, 'cavepage': True, 'cave_id': cave_id})
return render(request, "nonpublic.html", {"instance": cave, "cavepage": True, "cave_id": cave_id})
else:
# print(f" ! rendercave: slug:'{slug}' survex file:'{cave.survex_file}'")
try:
svx3d = Path(cave.survex_file).stem
svx3d = Path(cave.survex_file).stem
svxstem = Path(settings.SURVEX_DATA) / Path(cave.survex_file)
# print(f" ! rendercave: slug:'{slug}' '' ++ '{svxstem}'")
except:
@ -239,45 +252,55 @@ def rendercave(request, cave, slug, cave_id=''):
# So only do this render if a valid .3d file exists. TO BE DONE -Not yet as CaveView is currently disabled
# see design docum in troggle/templates/cave.html
# see rendercave() in troggle/core/views/caves.py
templatefile = 'cave.html'
templatefile = "cave.html"
if not cave_id:
cave_id = slug # cave.unofficial_number
context = {'cave_editable': True, 'settings': settings, 'cave': cave, 'cavepage': True,
'cave_id': cave_id, 'svxstem': str(svxstem), 'svx3d':svx3d}
cave_id = slug # cave.unofficial_number
context = {
"cave_editable": True,
"settings": settings,
"cave": cave,
"cavepage": True,
"cave_id": cave_id,
"svxstem": str(svxstem),
"svx3d": svx3d,
}
# Do not catch any exceptions here: propagate up to caller
r = render(request, templatefile, context) # crashes here with NoReverseMatch if url not set up for 'edit_cave' in urls.py
r = render(
request, templatefile, context
) # crashes here with NoReverseMatch if url not set up for 'edit_cave' in urls.py
return r
def cavepage(request, karea, subpath):
'''Displays a cave description page
"""Displays a cave description page
accessed by kataster area number specifically
OR
accessed by cave.url specifically set in data, e.g.
accessed by cave.url specifically set in data, e.g.
"1623/000/000.html" <= cave-data/1623-000.html
"1623/41/115.htm" <= cave-data/1623-115.html
"1623/41/115.htm" <= cave-data/1623-115.html
so we have to query the database to fine the URL as we cannot rely on the url actually telling us the cave by inspection.
There are A LOT OF URLS to e.g. /1623/161/l/rl89a.htm which are IMAGES and html files
in cave descriptions. These need to be handled HERE
'''
"""
kpath = karea + subpath
# print(f" ! cavepage:'{kpath}' kataster area:'{karea}' rest of path:'{subpath}'")
try:
cave = Cave.objects.get(url = kpath) # ideally this will be unique
cave = Cave.objects.get(url=kpath) # ideally this will be unique
except Cave.DoesNotExist:
# probably a link to text or an image e.g. 1623/161/l/rl89a.htm i.e. an expoweb page
# cannot assume that this is a simple cave page, for a cave we don't know.
# print(f" ! cavepage: url={kpath} A cave of this name does not exist")
return expo.expopage(request, kpath)
except Cave.MultipleObjectsReturned:
caves = Cave.objects.filter(url = kpath)
caves = Cave.objects.filter(url=kpath)
# print(f" ! cavepage: url={kpath} multiple caves exist")
# we should have a -several variant for the cave pages, not just the svxcaves:
return render(request, 'svxcaveseveral.html', {'settings': settings, "caves":caves })
return render(request, "svxcaveseveral.html", {"settings": settings, "caves": caves})
try:
r = rendercave(request, cave, cave.slug())
return r
@ -285,64 +308,66 @@ def cavepage(request, karea, subpath):
if settings.DEBUG:
raise
else:
message = f'Failed to render cave: {kpath} (it does exist and is unique) because of a Django URL resolution error. Check urls.py.'
return render(request,'errors/generic.html', {'message': message})
message = f"Failed to render cave: {kpath} (it does exist and is unique) because of a Django URL resolution error. Check urls.py."
return render(request, "errors/generic.html", {"message": message})
except:
# anything else is a new problem. Add in specific error messages here as we discover new types of error
# anything else is a new problem. Add in specific error messages here as we discover new types of error
raise
def caveEntrance(request, slug):
try:
cave = Cave.objects.get(caveslug__slug = slug)
cave = Cave.objects.get(caveslug__slug=slug)
except:
return render(request,'errors/badslug.html', {'badslug': slug})
return render(request, "errors/badslug.html", {"badslug": slug})
if cave.non_public and settings.PUBLIC_SITE and not request.user.is_authenticated:
return render(request,'nonpublic.html', {'instance': cave})
return render(request, "nonpublic.html", {"instance": cave})
else:
return render(request,'cave_entrances.html', {'cave': cave})
return render(request, "cave_entrances.html", {"cave": cave})
@login_required_if_public
def edit_cave(request, path = "", slug=None):
'''This is the form that edits all the cave data and writes out an XML file in the :expoweb: repo folder
def edit_cave(request, path="", slug=None):
"""This is the form that edits all the cave data and writes out an XML file in the :expoweb: repo folder
The format for the file being saved is in templates/dataformat/cave.xml
Warning. This uses Django deep magic.
It does save the data into into the database directly, not by parsing the file.
'''
"""
message = ""
if slug is not None:
if slug is not None:
try:
cave = Cave.objects.get(caveslug__slug = slug)
cave = Cave.objects.get(caveslug__slug=slug)
except:
return render(request,'errors/badslug.html', {'badslug': slug})
return render(request, "errors/badslug.html", {"badslug": slug})
else:
cave = Cave()
if request.POST:
form = CaveForm(request.POST, instance=cave)
ceFormSet = CaveAndEntranceFormSet(request.POST)
#versionControlForm = VersionControlCommentForm(request.POST)
# versionControlForm = VersionControlCommentForm(request.POST)
if form.is_valid() and ceFormSet.is_valid():
#print(f'! POST is valid. {cave}')
cave = form.save(commit = False)
# print(f'! POST is valid. {cave}')
cave = form.save(commit=False)
if slug is None:
for a in form.cleaned_data["area"]:
if a.kat_area():
myArea = a.kat_area()
if form.cleaned_data["kataster_number"]:
myslug = f"{myArea}-{form.cleaned_data['kataster_number']}"
myslug = f"{myArea}-{form.cleaned_data['kataster_number']}"
else:
myslug = f"{myArea}-{form.cleaned_data['unofficial_number']}"
myslug = f"{myArea}-{form.cleaned_data['unofficial_number']}"
else:
myslug = slug
# Converting a PENDING cave to a real cave by saving this form
myslug = myslug.replace('-PENDING-', '-')
myslug = myslug.replace("-PENDING-", "-")
cave.filename = myslug + ".html"
cave.save()
form.save_m2m()
if slug is None:
cs = CaveSlug(cave = cave, slug = myslug, primary = True)
cs.save()
cs = CaveSlug(cave=cave, slug=myslug, primary=True)
cs.save()
ceinsts = ceFormSet.save(commit=False)
for ceinst in ceinsts:
ceinst.cave = cave
@ -353,59 +378,65 @@ def edit_cave(request, path = "", slug=None):
write_and_commit([cave_file], f"Online edit of {cave}")
# leave other exceptions unhandled so that they bubble up to user interface
except PermissionError:
message = f'CANNOT save this file.\nPERMISSIONS incorrectly set on server for this file {cave.filename}. Ask a nerd to fix this.'
return render(request,'errors/generic.html', {'message': message})
message = f"CANNOT save this file.\nPERMISSIONS incorrectly set on server for this file {cave.filename}. Ask a nerd to fix this."
return render(request, "errors/generic.html", {"message": message})
except subprocess.SubprocessError:
message = f'CANNOT git on server for this file {cave.filename}. Edits may not be committed.\nAsk a nerd to fix this.'
return render(request,'errors/generic.html', {'message': message})
message = f"CANNOT git on server for this file {cave.filename}. Edits may not be committed.\nAsk a nerd to fix this."
return render(request, "errors/generic.html", {"message": message})
return HttpResponseRedirect("/" + cave.url)
return HttpResponseRedirect("/" + cave.url)
else:
message = f'! POST data is INVALID {cave}'
message = f"! POST data is INVALID {cave}"
print(message)
else:
form = CaveForm(instance=cave)
ceFormSet = CaveAndEntranceFormSet(queryset=cave.caveandentrance_set.all())
#versionControlForm = VersionControlCommentForm()
return render(request,
'editcave.html',
{'form': form, 'cave': cave, 'message': message,
'caveAndEntranceFormSet': ceFormSet,
#'versionControlForm': versionControlForm
})
# versionControlForm = VersionControlCommentForm()
return render(
request,
"editcave.html",
{
"form": form,
"cave": cave,
"message": message,
"caveAndEntranceFormSet": ceFormSet,
#'versionControlForm': versionControlForm
},
)
@login_required_if_public
def edit_entrance(request, path = "", caveslug=None, slug=None):
'''This is the form that edits the entrance data for a single entrance and writes out
def edit_entrance(request, path="", caveslug=None, slug=None):
"""This is the form that edits the entrance data for a single entrance and writes out
an XML file in the :expoweb: repo folder
The format for the file being saved is in templates/dataformat/entrance.xml
Warning. This uses Django deep magic.
It does save the data into into the database directly, not by parsing the file.
'''
"""
try:
cave = Cave.objects.get(caveslug__slug = caveslug)
cave = Cave.objects.get(caveslug__slug=caveslug)
except:
return render(request,'errors/badslug.html', {'badslug': caveslug})
return render(request, "errors/badslug.html", {"badslug": caveslug})
if slug:
entrance = Entrance.objects.get(entranceslug__slug = slug)
caveAndEntrance = CaveAndEntrance.objects.get(entrance = entrance, cave = cave)
entrance = Entrance.objects.get(entranceslug__slug=slug)
caveAndEntrance = CaveAndEntrance.objects.get(entrance=entrance, cave=cave)
entlettereditable = False
else:
entrance = Entrance()
caveAndEntrance = CaveAndEntrance(cave = cave, entrance = entrance)
caveAndEntrance = CaveAndEntrance(cave=cave, entrance=entrance)
entlettereditable = True
if request.POST:
form = EntranceForm(request.POST, instance = entrance)
entletter = EntranceLetterForm(request.POST, instance = caveAndEntrance)
#versionControlForm = VersionControlCommentForm(request.POST)
form = EntranceForm(request.POST, instance=entrance)
entletter = EntranceLetterForm(request.POST, instance=caveAndEntrance)
# versionControlForm = VersionControlCommentForm(request.POST)
if form.is_valid() and entletter.is_valid():
entrance = form.save(commit = False)
entrance_letter = entletter.save(commit = False)
entrance = form.save(commit=False)
entrance_letter = entletter.save(commit=False)
if slug is None:
if entletter.cleaned_data["entrance_letter"]:
slugname = cave.slug() + entletter.cleaned_data["entrance_letter"]
@ -415,7 +446,7 @@ def edit_entrance(request, path = "", caveslug=None, slug=None):
entrance.filename = slugname + ".html"
entrance.save()
if slug is None:
es = EntranceSlug(entrance = entrance, slug = slugname, primary = True)
es = EntranceSlug(entrance=entrance, slug=slugname, primary=True)
es.save()
entrance_file = entrance.file_output()
cave_file = cave.file_output()
@ -423,116 +454,148 @@ def edit_entrance(request, path = "", caveslug=None, slug=None):
entrance.save()
if slug is None:
entrance_letter.save()
return HttpResponseRedirect("/" + cave.url)
return HttpResponseRedirect("/" + cave.url)
else:
form = EntranceForm(instance = entrance)
#versionControlForm = VersionControlCommentForm()
form = EntranceForm(instance=entrance)
# versionControlForm = VersionControlCommentForm()
if slug is None:
entletter = EntranceLetterForm()
else:
entletter = caveAndEntrance.entrance_letter
return render(request,
'editentrance.html',
{'form': form,
'cave': cave,
#'versionControlForm': versionControlForm,
'entletter': entletter,
'entlettereditable': entlettereditable
})
return render(
request,
"editentrance.html",
{
"form": form,
"cave": cave,
#'versionControlForm': versionControlForm,
"entletter": entletter,
"entlettereditable": entlettereditable,
},
)
def ent(request, cave_id, ent_letter):
cave = Cave.objects.filter(kataster_number = cave_id)[0]
cave_and_ent = CaveAndEntrance.objects.filter(cave = cave).filter(entrance_letter = ent_letter)[0]
return render(request,'entrance.html', {'cave': cave,
'entrance': cave_and_ent.entrance,
'letter': cave_and_ent.entrance_letter,})
cave = Cave.objects.filter(kataster_number=cave_id)[0]
cave_and_ent = CaveAndEntrance.objects.filter(cave=cave).filter(entrance_letter=ent_letter)[0]
return render(
request,
"entrance.html",
{
"cave": cave,
"entrance": cave_and_ent.entrance,
"letter": cave_and_ent.entrance_letter,
},
)
# def entranceSlug(request, slug):
# '''This seems to be a fossil, but I am not sure...
# '''
# entrance = Entrance.objects.get(entranceslug__slug = slug)
# if entrance.non_public and not request.user.is_authenticated:
# return render(request,'nonpublic.html', {'instance': entrance})
# else:
# return render(request,'entranceslug.html', {'entrance': entrance})
# '''This seems to be a fossil, but I am not sure...
# '''
# entrance = Entrance.objects.get(entranceslug__slug = slug)
# if entrance.non_public and not request.user.is_authenticated:
# return render(request,'nonpublic.html', {'instance': entrance})
# else:
# return render(request,'entranceslug.html', {'entrance': entrance})
# def surveyindex(request):
# '''The template does not exist, there is no URL which calls this, so it is a fossil
# '''
# surveys=Survey.objects.all()
# expeditions=Expedition.objects.order_by("-year")
# return render(request,'survey.html',locals())
# '''The template does not exist, there is no URL which calls this, so it is a fossil
# '''
# surveys=Survey.objects.all()
# expeditions=Expedition.objects.order_by("-year")
# return render(request,'survey.html',locals())
def get_entrances(request, caveslug):
try:
cave = Cave.objects.get(caveslug__slug = caveslug)
cave = Cave.objects.get(caveslug__slug=caveslug)
except:
return render(request,'errors/badslug.html', {'badslug': caveslug})
return render(request,'options.html', {"items": [(e.entrance.slug(), e.entrance.slug()) for e in cave.entrances()]})
return render(request, "errors/badslug.html", {"badslug": caveslug})
return render(
request, "options.html", {"items": [(e.entrance.slug(), e.entrance.slug()) for e in cave.entrances()]}
)
def caveQMs(request, slug):
'''Lists all the QMs on a particular cave
"""Lists all the QMs on a particular cave
relies on the template to find all the QMs for the cave specified in the slug, e.g. '1623-161'
Now working in July 2022
'''
"""
try:
cave = Cave.objects.get(caveslug__slug = slug)
cave = Cave.objects.get(caveslug__slug=slug)
except:
return render(request,'errors/badslug.html', {'badslug': slug})
if cave.non_public and settings.PUBLIC_SITE and not request.user.is_authenticated:
return render(request,'nonpublic.html', {'instance': cave})
else:
return render(request,'cave_qms.html', {'cave': cave})
return render(request, "errors/badslug.html", {"badslug": slug})
def qm(request,cave_id,qm_id,year,grade=None, blockname=None):
'''Reports on one specific QM
Fixed and working July 2022, for both CSV imported QMs
if cave.non_public and settings.PUBLIC_SITE and not request.user.is_authenticated:
return render(request, "nonpublic.html", {"instance": cave})
else:
return render(request, "cave_qms.html", {"cave": cave})
def qm(request, cave_id, qm_id, year, grade=None, blockname=None):
"""Reports on one specific QM
Fixed and working July 2022, for both CSV imported QMs
needs refactoring though.
290 has several QMS with the same number, grade, year (2108) and first 8 chars of the survexblock. This crashes things.
'''
year=int(year)
if blockname == '' or not blockname:
"""
year = int(year)
if blockname == "" or not blockname:
# CSV import QMs, use old technique
try:
c=getCave(cave_id)
manyqms=c.get_QMs()
qm=manyqms.get(number=qm_id,expoyear=year)
return render(request,'qm.html', {'qm': qm})
c = getCave(cave_id)
manyqms = c.get_QMs()
qm = manyqms.get(number=qm_id, expoyear=year)
return render(request, "qm.html", {"qm": qm})
except QM.DoesNotExist:
#raise
return render(request,'errors/badslug.html', {'badslug': f'QM.DoesNotExist blockname is empty string: {cave_id=} {year=} {qm_id=} {grade=} {blockname=}'})
# raise
return render(
request,
"errors/badslug.html",
{
"badslug": f"QM.DoesNotExist blockname is empty string: {cave_id=} {year=} {qm_id=} {grade=} {blockname=}"
},
)
else:
try:
qmslug = f'{cave_id}-{year}-{blockname=}{qm_id}{grade}'
print(f'{qmslug=}')
c=getCave(cave_id)
manyqms=c.get_QMs()
qmqs=manyqms.filter(expoyear=year, blockname=blockname, number=qm_id, grade=grade)
if len(qmqs) > 1:
qmslug = f"{cave_id}-{year}-{blockname=}{qm_id}{grade}"
print(f"{qmslug=}")
c = getCave(cave_id)
manyqms = c.get_QMs()
qmqs = manyqms.filter(expoyear=year, blockname=blockname, number=qm_id, grade=grade)
if len(qmqs) > 1:
for q in qmqs:
print(qmqs)
message = f'Multiple QMs with the same cave, year, number, grade AND first 8 chars of the survexblock name. (Could be caused by incomplete databasereset). Fix this in the survex file(s). {cave_id=} {year=} {qm_id=} {blockname=}'
return render(request,'errors/generic.html', {'message': message})
message = f"Multiple QMs with the same cave, year, number, grade AND first 8 chars of the survexblock name. (Could be caused by incomplete databasereset). Fix this in the survex file(s). {cave_id=} {year=} {qm_id=} {blockname=}"
return render(request, "errors/generic.html", {"message": message})
else:
qm=qmqs.get(expoyear=year, blockname=blockname, number=qm_id, grade=grade)
qm = qmqs.get(expoyear=year, blockname=blockname, number=qm_id, grade=grade)
if qm:
print(qm, f'{qmslug=}:{cave_id=} {year=} {qm_id=} {blockname=} {qm.expoyear=} {qm.completion_description=}')
return render(request,'qm.html', {'qm': qm})
print(
qm,
f"{qmslug=}:{cave_id=} {year=} {qm_id=} {blockname=} {qm.expoyear=} {qm.completion_description=}",
)
return render(request, "qm.html", {"qm": qm})
else:
#raise
return render(request,'errors/badslug.html', {'badslug': f'Failed get {cave_id=} {year=} {qm_id=} {grade=} {blockname=}'})
# raise
return render(
request,
"errors/badslug.html",
{"badslug": f"Failed get {cave_id=} {year=} {qm_id=} {grade=} {blockname=}"},
)
except MultipleObjectsReturned:
message = f'Multiple QMs with the same cave, year, number, grade AND first 8 chars of the survexblock name. (Could be caused by incomplete databasereset). Fix this in the survex file(s). {cave_id=} {year=} {qm_id=} {blockname=}'
return render(request,'errors/generic.html', {'message': message})
message = f"Multiple QMs with the same cave, year, number, grade AND first 8 chars of the survexblock name. (Could be caused by incomplete databasereset). Fix this in the survex file(s). {cave_id=} {year=} {qm_id=} {blockname=}"
return render(request, "errors/generic.html", {"message": message})
except QM.DoesNotExist:
#raise
return render(request,'errors/badslug.html', {'badslug': f'QM.DoesNotExist blockname is not empty string {cave_id=} {year=} {qm_id=} {grade=} {blockname=}'})
# raise
return render(
request,
"errors/badslug.html",
{
"badslug": f"QM.DoesNotExist blockname is not empty string {cave_id=} {year=} {qm_id=} {grade=} {blockname=}"
},
)

View File

@ -12,100 +12,106 @@ from django.shortcuts import render
from troggle.core.models.survex import DrawingFile
from troggle.core.views.expo import getmimetype
#import parsers.surveys
# import parsers.surveys
'''Some of these views serve files as binary blobs, and simply set the mime type based on the file extension,
"""Some of these views serve files as binary blobs, and simply set the mime type based on the file extension,
as does the urls.py dispatcher which sends them here. Here they should actually have the filetype checked
by looking inside the file before being served.
'''
"""
todo='''- Need to check if invalid query string is invalid, or produces multiple replies
todo = """- Need to check if invalid query string is invalid, or produces multiple replies
and render a user-friendly error page.
'''
"""
def unescape(input):
'''These look like HTML entities, but they are not. They are tunnel-specific encodings
'''
"""These look like HTML entities, but they are not. They are tunnel-specific encodings"""
codes = {
"&space;" : " ",
"&quot;" : "\"",
"&tab;" : "\t",
"&backslash;" : "\\",
"&newline;" : "\n|\t",
"&space;": " ",
"&quot;": '"',
"&tab;": "\t",
"&backslash;": "\\",
"&newline;": "\n|\t",
"&apostrophe": "'",
}
for c in codes:
#print(c, codes[c])
# print(c, codes[c])
input = input.replace(c, codes[c])
return input
def dwgallfiles(request):
'''Report on all the drawing files in the system. These were loaded by parsing the entire directory tree
'''
"""Report on all the drawing files in the system. These were loaded by parsing the entire directory tree"""
dwgfiles = DrawingFile.objects.all()
return render(request, 'dwgfiles.html', { 'dwgfiles':dwgfiles, 'settings': settings })
return render(request, "dwgfiles.html", {"dwgfiles": dwgfiles, "settings": settings})
def dwgfilesingle(request, path):
'''sends a single binary file to the user. It could be an old PNG, PDF or SVG
"""sends a single binary file to the user. It could be an old PNG, PDF or SVG
not just Tunnel or Therion
The db records created on datbase reset import are not used when we look for an individual drawing, only
The db records created on datbase reset import are not used when we look for an individual drawing, only
collections of them.
Note the infelicity that this will deliver files that exist, but are hidden on the previous
webpage /dwgupload/... if the user types the filename into the browser bar. Could be a problem?
Should we validate using uploads.py dwgvaliddisp() here too?
'''
tfile = Path(settings.DRAWINGS_DATA, path.replace(":","#"))
"""
tfile = Path(settings.DRAWINGS_DATA, path.replace(":", "#"))
if not tfile.is_file():
message = f'Drawing file not found in filesystem at \'{path}\' \n\t\tMaybe a new dataimport needs to be done to get up to date.'
return render(request, 'errors/generic.html', {'message': message})
if Path(tfile).suffix in ['.xml']: # tunnel files are usually 'us-ascii' (!). And may not close all XML tags properly either.
for encoding in ['us-ascii', 'iso-8859-1', 'utf-8']:
try:
#print(f'attempting {encoding} for {tfile}')
with open(tfile, encoding=encoding, errors='strict') as f:
print(f'- before reading any {encoding}')
message = f"Drawing file not found in filesystem at '{path}' \n\t\tMaybe a new dataimport needs to be done to get up to date."
return render(request, "errors/generic.html", {"message": message})
if Path(tfile).suffix in [
".xml"
]: # tunnel files are usually 'us-ascii' (!). And may not close all XML tags properly either.
for encoding in ["us-ascii", "iso-8859-1", "utf-8"]:
try:
# print(f'attempting {encoding} for {tfile}')
with open(tfile, encoding=encoding, errors="strict") as f:
print(f"- before reading any {encoding}")
lines = f.readlines()
#print(f'- finished reading {encoding}')
# print(f'- finished reading {encoding}')
clean = []
for l in lines:
clean.append(unescape(l)) # deals with strangely embedded survex file
#print(f'- Cleaned and stripped.')
clean.append(unescape(l)) # deals with strangely embedded survex file
# print(f'- Cleaned and stripped.')
try:
return HttpResponse(content=clean, content_type="text/xml")
except:
return HttpResponse(content=f"Render fail for this file: {tfile} Please report to a nerd. Probably Julian's fault.")
return HttpResponse(
content=f"Render fail for this file: {tfile} Please report to a nerd. Probably Julian's fault."
)
except:
print(f'! Exception when reading {encoding}')
print(f"! Exception when reading {encoding}")
continue
print(f'! None of those encodings worked for {tfile}')
print(f"! None of those encodings worked for {tfile}")
try:
return HttpResponse(content=open(tfile, errors='ignore'), content_type=getmimetype(tfile))
return HttpResponse(content=open(tfile, errors="ignore"), content_type=getmimetype(tfile))
except:
return HttpResponse(content=f"Unable to understand the encoding for this file: {tfile} Please report to a nerd.")
if Path(tfile).suffix in ['th2', '.th']:
try:
return HttpResponse(content=open(tfile, errors='strict'), content_type="text/txt") # default utf-8
except:
return HttpResponse(content=f"Unable to understand the encoding for this file: {tfile} Please report to a nerd.")
else: # SVG, JPG etc
try:
return HttpResponse(content=open(tfile, mode='rb'), content_type=getmimetype(tfile)) # default utf-8
except:
return HttpResponse(
content=f"Unable to understand the encoding for this file: {tfile} Please report to a nerd."
)
if Path(tfile).suffix in ["th2", ".th"]:
try:
return HttpResponse(content=open(tfile, errors="strict"), content_type="text/txt") # default utf-8
except:
return HttpResponse(
content=f"Unable to understand the encoding for this file: {tfile} Please report to a nerd."
)
else: # SVG, JPG etc
try:
return HttpResponse(content=open(tfile, mode="rb"), content_type=getmimetype(tfile)) # default utf-8
except:
try:
return HttpResponse(content=open(tfile, mode='rb'))
return HttpResponse(content=open(tfile, mode="rb"))
except:
return HttpResponse(content=f"Unable to understand the encoding '{getmimetype(tfile)}' for this file: {tfile} Note that Apache will do its own thing here. Please report to a nerd.")
return HttpResponse(
content=f"Unable to understand the encoding '{getmimetype(tfile)}' for this file: {tfile} Note that Apache will do its own thing here. Please report to a nerd."
)

View File

@ -3,8 +3,7 @@ import re
from pathlib import Path
import django.forms as forms
from django.http import (Http404, HttpResponse, HttpResponseRedirect,
JsonResponse)
from django.http import Http404, HttpResponse, HttpResponseRedirect, JsonResponse
from django.shortcuts import redirect, render
from django.template import Context, loader
from django.urls import resolve, reverse
@ -22,15 +21,17 @@ MAX_IMAGE_HEIGHT = 800
THUMBNAIL_WIDTH = 200
THUMBNAIL_HEIGHT = 200
def get_dir(path):
"From a path sent from urls.py, determine the directory."
if "/" in path:
return path.rsplit('/', 1)[0]
return path.rsplit("/", 1)[0]
else:
return ""
def image_selector(request, path):
'''Returns available images'''
"""Returns available images"""
directory = get_dir(path)
thumbnailspath = Path(settings.EXPOWEB) / directory / "t"
thumbnails = []
@ -41,25 +42,26 @@ def image_selector(request, path):
base = f"{directory}/"
else:
base = ""
thumbnail_url = reverse('expopage', args=[f"{base}t/{f.name}"])
name_base = f.name.rsplit('.', 1)[0]
thumbnail_url = reverse("expopage", args=[f"{base}t/{f.name}"])
name_base = f.name.rsplit(".", 1)[0]
page_path_base = Path(settings.EXPOWEB) / directory / "l"
if ((page_path_base / (f"{name_base}.htm")).is_file()):
page_url = reverse('expopage', args=[f"{base}l/{name_base}.htm"])
if (page_path_base / (f"{name_base}.htm")).is_file():
page_url = reverse("expopage", args=[f"{base}l/{name_base}.htm"])
else:
page_url = reverse('expopage', args=[f"{base}/l/{name_base}.html"])
page_url = reverse("expopage", args=[f"{base}/l/{name_base}.html"])
thumbnails.append({"thumbnail_url": thumbnail_url, "page_url": page_url})
return render(request, 'image_selector.html', {'thumbnails': thumbnails})
return render(request, "image_selector.html", {"thumbnails": thumbnails})
@login_required_if_public
@ensure_csrf_cookie
def new_image_form(request, path):
'''Manages a form to upload new images'''
"""Manages a form to upload new images"""
directory = get_dir(path)
if request.method == 'POST':
form = NewWebImageForm(request.POST, request.FILES, directory = directory)
if request.method == "POST":
form = NewWebImageForm(request.POST, request.FILES, directory=directory)
if form.is_valid():
f = request.FILES["file_"]
binary_data = io.BytesIO()
@ -69,19 +71,24 @@ def new_image_form(request, path):
width, height = i.size
if width > MAX_IMAGE_WIDTH or height > MAX_IMAGE_HEIGHT:
scale = max(width / MAX_IMAGE_WIDTH, height / MAX_IMAGE_HEIGHT)
i = i.resize((int(width / scale), int(height / scale)), Image.ANTIALIAS)
i = i.resize((int(width / scale), int(height / scale)), Image.ANTIALIAS)
tscale = max(width / THUMBNAIL_WIDTH, height / THUMBNAIL_HEIGHT)
thumbnail = i.resize((int(width / tscale), int(height / tscale)), Image.ANTIALIAS)
thumbnail = i.resize((int(width / tscale), int(height / tscale)), Image.ANTIALIAS)
ib = io.BytesIO()
i.save(ib, format="png")
tb = io.BytesIO()
thumbnail.save(tb, format="png")
image_rel_path, thumb_rel_path, desc_rel_path = form.get_rel_paths()
image_page_template = loader.get_template('image_page_template.html')
image_page = image_page_template.render({'header': form.cleaned_data["header"], 'description': form.cleaned_data["description"],
'photographer': form.cleaned_data["photographer"], 'year': form.cleaned_data["year"],
'filepath': f'/{image_rel_path}'
})
image_page_template = loader.get_template("image_page_template.html")
image_page = image_page_template.render(
{
"header": form.cleaned_data["header"],
"description": form.cleaned_data["description"],
"photographer": form.cleaned_data["photographer"],
"year": form.cleaned_data["year"],
"filepath": f"/{image_rel_path}",
}
)
image_path, thumb_path, desc_path = form.get_full_paths()
# Create directories if required
for full_path in image_path, thumb_path, desc_path:
@ -89,57 +96,80 @@ def new_image_form(request, path):
full_path.parent.mkdir(parents=False, exist_ok=True)
try:
change_message = form.cleaned_data["change_message"]
write_and_commit([(desc_path, image_page, "utf-8"),
(image_path, ib.getbuffer(), False),
(thumb_path, tb.getbuffer(), False)],
f'{change_message} - online adding of an image')
write_and_commit(
[
(desc_path, image_page, "utf-8"),
(image_path, ib.getbuffer(), False),
(thumb_path, tb.getbuffer(), False),
],
f"{change_message} - online adding of an image",
)
except WriteAndCommitError as e:
return JsonResponse({"error": e.message})
linked_image_template = loader.get_template('linked_image_template.html')
html_snippet = linked_image_template.render({'thumbnail_url': f'/{thumb_rel_path}', 'page_url': f'/{desc_rel_path}'}, request)
linked_image_template = loader.get_template("linked_image_template.html")
html_snippet = linked_image_template.render(
{"thumbnail_url": f"/{thumb_rel_path}", "page_url": f"/{desc_rel_path}"}, request
)
return JsonResponse({"html": html_snippet})
else:
form = NewWebImageForm(directory = directory)
template = loader.get_template('new_image_form.html')
htmlform = template.render({'form': form, 'path': path}, request)
form = NewWebImageForm(directory=directory)
template = loader.get_template("new_image_form.html")
htmlform = template.render({"form": form, "path": path}, request)
return JsonResponse({"form": htmlform})
class NewWebImageForm(forms.Form):
'''The form used by the editexpopage function
'''
header = forms.CharField(widget=forms.TextInput(attrs={'size':'60', 'placeholder': "Enter title (displayed as a header and in the tab)"}))
"""The form used by the editexpopage function"""
header = forms.CharField(
widget=forms.TextInput(
attrs={"size": "60", "placeholder": "Enter title (displayed as a header and in the tab)"}
)
)
file_ = forms.FileField()
description = forms.CharField(widget=forms.Textarea(attrs={"cols":80, "rows":20, 'placeholder': "Describe the photo (using HTML)"}))
photographer = forms.CharField(widget=forms.TextInput(attrs={'size':'60', 'placeholder': "Photographers name"}), required = False)
year = forms.CharField(widget=forms.TextInput(attrs={'size':'60', 'placeholder': "Year photo was taken"}), required = False)
change_message = forms.CharField(widget=forms.Textarea(attrs={"cols":80, "rows":3, 'placeholder': "Descibe the change made (for git)"}))
description = forms.CharField(
widget=forms.Textarea(attrs={"cols": 80, "rows": 20, "placeholder": "Describe the photo (using HTML)"})
)
photographer = forms.CharField(
widget=forms.TextInput(attrs={"size": "60", "placeholder": "Photographers name"}), required=False
)
year = forms.CharField(
widget=forms.TextInput(attrs={"size": "60", "placeholder": "Year photo was taken"}), required=False
)
change_message = forms.CharField(
widget=forms.Textarea(attrs={"cols": 80, "rows": 3, "placeholder": "Descibe the change made (for git)"})
)
def __init__(self, *args, **kwargs):
self.directory = Path(kwargs.pop('directory'))
self.directory = Path(kwargs.pop("directory"))
super(forms.Form, self).__init__(*args, **kwargs)
def get_rel_paths(self):
f = self.cleaned_data['file_']
return [self.directory / "i" / (f.name.rsplit('.', 1)[0] + ".png"),
self.directory / "t" / (f.name.rsplit('.', 1)[0] + ".png"),
self.directory / "l" / (f.name.rsplit('.', 1)[0] + ".html")]
f = self.cleaned_data["file_"]
return [
self.directory / "i" / (f.name.rsplit(".", 1)[0] + ".png"),
self.directory / "t" / (f.name.rsplit(".", 1)[0] + ".png"),
self.directory / "l" / (f.name.rsplit(".", 1)[0] + ".html"),
]
def get_full_paths(self):
return [Path(settings.EXPOWEB) / x for x in self.get_rel_paths()]
def clean_file_(self):
for rel_path, full_path in zip(self.get_rel_paths(), self.get_full_paths()):
if full_path.exists():
raise forms.ValidationError(f"File already exists in {rel_path}")
return self.cleaned_data['file_']
return self.cleaned_data["file_"]
class HTMLarea(forms.Textarea):
template_name = "widgets/HTMLarea.html"
def __init__(self, *args, **kwargs):
self.preview = kwargs.pop('preview', False)
self.preview = kwargs.pop("preview", False)
super(forms.Textarea, self).__init__(*args, **kwargs)
def get_context(self, name, value, attrs):
c = super(forms.Textarea, self).get_context(name, value, attrs)
c["preview"] = self.preview
return c

View File

@ -22,12 +22,12 @@ from troggle.core.views.editor_helpers import HTMLarea
from .auth import login_required_if_public
'''Formerly a separate package called 'flatpages' written by Martin Green 2011.
"""Formerly a separate package called 'flatpages' written by Martin Green 2011.
This was NOT django.contrib.flatpages which stores HTML in the database, so the name was changed to expopages.
Then it was incorporated into troggle directly, rather than being an unnecessary external package.
'''
"""
default_head = '''<head>
default_head = """<head>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" />
<title>CUCC Expedition - index</title>
<link rel="stylesheet" type="text/css" href="../css/main2.css" />
@ -50,40 +50,42 @@ default_head = '''<head>
<li><form name=P method=get action="/search" target="_top">
<input id="omega-autofocus" type=search name=P size=8 autofocus>
<input type=submit value="Search"></li>
</ul>''' # this gets overwritten by templates/menu.html by django for most normal pages
</ul>""" # this gets overwritten by templates/menu.html by django for most normal pages
def expofiles_redirect(request, filepath):
'''This is used only when running as a test system without a local copy of /expofiles/
"""This is used only when running as a test system without a local copy of /expofiles/
when settings.EXPOFILESREMOTE is True
'''
return redirect(urljoin('http://expo.survex.com/expofiles/', filepath))
"""
return redirect(urljoin("http://expo.survex.com/expofiles/", filepath))
def map(request):
'''Serves unadorned the expoweb/map/map.html file
'''
fn = Path(settings.EXPOWEB, 'map', 'map.html')
return HttpResponse(content=open(fn, "r"),content_type='text/html')
"""Serves unadorned the expoweb/map/map.html file"""
fn = Path(settings.EXPOWEB, "map", "map.html")
return HttpResponse(content=open(fn, "r"), content_type="text/html")
def mapfile(request, path):
'''Serves unadorned file
'''
fn = Path(settings.EXPOWEB, 'map', path)
return HttpResponse(content=open(fn, "r"),content_type=getmimetype(fn))
"""Serves unadorned file"""
fn = Path(settings.EXPOWEB, "map", path)
return HttpResponse(content=open(fn, "r"), content_type=getmimetype(fn))
def expofilessingle(request, filepath):
'''sends a single binary file to the user, if not found, show the parent directory
"""sends a single binary file to the user, if not found, show the parent directory
If the path actually is a directory, then show that.
'''
#print(f' - expofilessingle {filepath}')
if filepath =="" or filepath =="/":
"""
# print(f' - expofilessingle {filepath}')
if filepath == "" or filepath == "/":
return expofilesdir(request, settings.EXPOFILES, "")
fn=urlunquote(filepath)
fn = Path(settings.EXPOFILES,filepath)
fn = urlunquote(filepath)
fn = Path(settings.EXPOFILES, filepath)
if fn.is_dir():
return expofilesdir(request, Path(fn), Path(filepath))
if fn.is_file():
return HttpResponse(content=open(fn, "rb"),content_type=getmimetype(filepath)) # any file
return HttpResponse(content=open(fn, "rb"), content_type=getmimetype(filepath)) # any file
else:
# not a file, so show parent directory - DANGER need to check this is limited to below expofiles
if Path(fn).parent == Path(settings.EXPOFILES).parent:
@ -91,133 +93,164 @@ def expofilessingle(request, filepath):
else:
return expofilesdir(request, Path(fn).parent, Path(filepath).parent)
def expofilesdir(request, dirpath, filepath):
'''does a directory display. If there is an index.html file we should display that.
"""does a directory display. If there is an index.html file we should display that.
- dirpath is a full Path() resolved including local machine /expofiles/
- filepath is a Path() and it does not have /expofiles/ in it
'''
#print(f' - expofilesdir {dirpath} settings.EXPOFILESREMOTE: {settings.EXPOFILESREMOTE}')
"""
# print(f' - expofilesdir {dirpath} settings.EXPOFILESREMOTE: {settings.EXPOFILESREMOTE}')
if filepath:
urlpath = 'expofiles' / Path(filepath)
urlpath = "expofiles" / Path(filepath)
else:
urlpath = Path('expofiles')
urlpath = Path("expofiles")
try:
for f in dirpath.iterdir():
pass
except FileNotFoundError:
#print(f' - expofilesdir error {dirpath}')
# print(f' - expofilesdir error {dirpath}')
return expofilesdir(request, dirpath.parent, filepath.parent)
fileitems = []
diritems = []
diritems = []
for f in dirpath.iterdir():
if f.is_dir():
diritems.append((urlpath / f.parts[-1], str(f.parts[-1])))
diritems.append((urlpath / f.parts[-1], str(f.parts[-1])))
else:
# if f.parts[-1].lower() == 'index.htm' or f.parts[-1].lower() == 'index.html': # css cwd problem
# return HttpResponse(content=open(f, "rb"),content_type=getmimetype(filepath)) # any file
# return expofilessingle(request, str(Path(filepath / f.parts[-1])))
# return HttpResponse(content=open(f, "rb"),content_type=getmimetype(filepath)) # any file
# return expofilessingle(request, str(Path(filepath / f.parts[-1])))
fileitems.append((Path(urlpath) / f.parts[-1], str(f.parts[-1]), getmimetype(f)))
return render(request, 'dirdisplay.html', { 'filepath': urlpath, 'fileitems':fileitems, 'diritems': diritems,'settings': settings })
return render(
request,
"dirdisplay.html",
{"filepath": urlpath, "fileitems": fileitems, "diritems": diritems, "settings": settings},
)
def expowebpage(request, expowebpath, path):
'''Adds menus and serves an HTML page
'''
"""Adds menus and serves an HTML page"""
if not os.path.isfile(expowebpath / path):
# Should not get here if the path has suffix "_edit"
print(f' - 404 error in expowebpage() {path}')
return render(request, 'pagenotfound.html', {'path': path}, status="404")
print(f" - 404 error in expowebpage() {path}")
return render(request, "pagenotfound.html", {"path": path}, status="404")
# print(f' - {sys_getfilesystemencoding()=}')
if (sys_getfilesystemencoding() != "utf-8"):
return HttpResponse(default_head + '<h3>UTF-8 Parsing Failure:<br>Default file encoding on this Troggle installation is not UTF-8:<br>failure detected in expowebpage in views.expo.py</h3> Please Please reconfigure Debian/Apache/Django to fix this, i.e. contact Wookey. </body' )
if sys_getfilesystemencoding() != "utf-8":
return HttpResponse(
default_head
+ "<h3>UTF-8 Parsing Failure:<br>Default file encoding on this Troggle installation is not UTF-8:<br>failure detected in expowebpage in views.expo.py</h3> Please Please reconfigure Debian/Apache/Django to fix this, i.e. contact Wookey. </body"
)
# This next bit can be drastically simplified now that we know that the system encoding actually is utf-8
try:
with open(expowebpath / path, "r", encoding='utf-8') as o:
with open(expowebpath / path, "r", encoding="utf-8") as o:
html = o.read()
except:
# exception raised on debian with python 3.9.2 but not on WSL Ubuntu with python 3.9.5
# because debian was assuming default text encoding was 'ascii'. Now specified explicitly so should be OK
try:
with open(expowebpath / path, "rb") as o:
html = str(o.read()).replace("<h1>","<h1>BAD NON-UTF-8 characters here - ")
html = html.replace("\\n","\n")
html = html.replace("\\r","")
html = html.replace("\\t","\t")
html = html.replace("\\'","\'")
with open(expowebpath / path, "rb") as o:
html = str(o.read()).replace("<h1>", "<h1>BAD NON-UTF-8 characters here - ")
html = html.replace("\\n", "\n")
html = html.replace("\\r", "")
html = html.replace("\\t", "\t")
html = html.replace("\\'", "'")
except:
return HttpResponse(default_head + '<h3>UTF-8 Parsing Failure:<br>Page could not be parsed using UTF-8:<br>failure detected in expowebpage in views.expo.py</h3> Please edit this <var>:expoweb:</var> page to replace dubious umlauts and &pound; symbols with correct HTML entities e.g. <em>&amp;pound;;</em>. </body' )
m = re.search(r'(.*)<\s*head([^>]*)>(.*)<\s*/head\s*>(.*)<\s*body([^>]*)>(.*)<\s*/body\s*>(.*)', html, re.DOTALL + re.IGNORECASE)
return HttpResponse(
default_head
+ "<h3>UTF-8 Parsing Failure:<br>Page could not be parsed using UTF-8:<br>failure detected in expowebpage in views.expo.py</h3> Please edit this <var>:expoweb:</var> page to replace dubious umlauts and &pound; symbols with correct HTML entities e.g. <em>&amp;pound;;</em>. </body"
)
m = re.search(
r"(.*)<\s*head([^>]*)>(.*)<\s*/head\s*>(.*)<\s*body([^>]*)>(.*)<\s*/body\s*>(.*)",
html,
re.DOTALL + re.IGNORECASE,
)
if m:
preheader, headerattrs, head, postheader, bodyattrs, body, postbody = m.groups()
else:
return HttpResponse(default_head + html + '<h3>HTML Parsing failure:<br>Page could not be parsed into header and body:<br>failure detected in expowebpage in views.expo.py</h3> Please edit this <var>:expoweb:</var> page to be in the expected full HTML format </body' )
return HttpResponse(
default_head
+ html
+ "<h3>HTML Parsing failure:<br>Page could not be parsed into header and body:<br>failure detected in expowebpage in views.expo.py</h3> Please edit this <var>:expoweb:</var> page to be in the expected full HTML format </body"
)
m = re.search(r"<title>(.*)</title>", head, re.DOTALL + re.IGNORECASE)
if m:
title, = m.groups()
(title,) = m.groups()
else:
title = ""
m = re.search(r"^<meta([^>]*)noedit", head, re.DOTALL + re.IGNORECASE)
if m:
editable = False
else:
editable = os.access(expowebpath / path, os.W_OK) # are file permissions writeable?
editable = os.access(expowebpath / path, os.W_OK) # are file permissions writeable?
has_menu = False
menumatch = re.match(r'(.*)<ul id="links">', body, re.DOTALL + re.IGNORECASE)
if menumatch:
has_menu = False
#Determine which caves this page relates to
# Determine which caves this page relates to
m = re.search(r"(162\d\/[^\/]+)[\/\.]", path, re.DOTALL + re.IGNORECASE)
if m:
path_start, = m.groups()
parent_caves = Cave.objects.filter(url__startswith = path_start)
(path_start,) = m.groups()
parent_caves = Cave.objects.filter(url__startswith=path_start)
else:
parent_caves = None
#Determine if this page relates to a particular year
# Determine if this page relates to a particular year
m = re.search(r"years\/(\d\d\d\d)\/.*", path, re.DOTALL + re.IGNORECASE)
if m:
year, = m.groups()
(year,) = m.groups()
else:
year = None
#Determine if this page is part of the handbook
# Determine if this page is part of the handbook
handbook = path.startswith("handbook")
return render(request, 'expopage.html', {'editable': editable, 'path': path, 'title': title,
'body': body, 'homepage': (path == "index.htm"), 'has_menu': has_menu,
'year': year,'handbook': handbook, 'parent_caves': parent_caves})
return render(
request,
"expopage.html",
{
"editable": editable,
"path": path,
"title": title,
"body": body,
"homepage": (path == "index.htm"),
"has_menu": has_menu,
"year": year,
"handbook": handbook,
"parent_caves": parent_caves,
},
)
def mediapage(request, subpath=None, doc_root=None):
'''This is for special prefix paths /photos/ /site_media/, /static/ etc.
as defined in urls.py . If given a directory, gives a failure page.
'''
#print(" - XXXXX_ROOT: {} ...{}".format(doc_root, subpath))
"""This is for special prefix paths /photos/ /site_media/, /static/ etc.
as defined in urls.py . If given a directory, gives a failure page.
"""
# print(" - XXXXX_ROOT: {} ...{}".format(doc_root, subpath))
if doc_root is not None:
filetobeopened = Path(doc_root, subpath)
if filetobeopened.is_dir():
return render(request, 'nodirlist.html', {'path': subpath})
return render(request, "nodirlist.html", {"path": subpath})
try:
return HttpResponse(content=open(filetobeopened, "rb"), content_type=getmimetype(subpath))
except IOError:
return render(request, 'pagenotfound.html', {'path': subpath}, status="404")
return render(request, "pagenotfound.html", {"path": subpath}, status="404")
else:
return render(request, 'pagenotfound.html', {'path': subpath}, status="404")
return render(request, "pagenotfound.html", {"path": subpath}, status="404")
def expopage(request, path):
'''Either renders an HTML page from expoweb with all the menus,
"""Either renders an HTML page from expoweb with all the menus,
or serves an unadorned binary file with mime type
'''
#print(" - EXPOPAGES delivering the file: '{}':{} as MIME type: {}".format(request.path, path,getmimetype(path)),flush=True)
"""
# print(" - EXPOPAGES delivering the file: '{}':{} as MIME type: {}".format(request.path, path,getmimetype(path)),flush=True)
if path.startswith("noinfo") and settings.PUBLIC_SITE and not request.user.is_authenticated:
return HttpResponseRedirect(urljoin(reverse("auth_login"),f'?next={request.path}'))
return HttpResponseRedirect(urljoin(reverse("auth_login"), f"?next={request.path}"))
if path.startswith("admin/"):
# don't even attempt to handle these sorts of mistakes
@ -230,117 +263,159 @@ def expopage(request, path):
if path.endswith(".htm") or path.endswith(".html"):
return expowebpage(request, expowebpath, path)
if Path(expowebpath / path ).is_dir():
if Path(expowebpath / path).is_dir():
for p in ["index.html", "index.htm"]:
if (expowebpath / path / p).is_file():
# This needs to reset the path to the new subdirectory
return HttpResponseRedirect('/'+str(Path(path) / p))
return render(request, 'pagenotfound.html', {'path': Path(path) / "index.html"}, status="404")
return HttpResponseRedirect("/" + str(Path(path) / p))
return render(request, "pagenotfound.html", {"path": Path(path) / "index.html"}, status="404")
if path.endswith("/"):
# we already know it is not a directory.
# the final / may have been appended by middleware if there was no page without it
# do not redirect to a file path without the slash as we may get in a loop. Let the user fix it:
return render(request, 'dirnotfound.html', {'path': path, 'subpath': path[0:-1]})
return render(request, "dirnotfound.html", {"path": path, "subpath": path[0:-1]})
# So it must be a file in /expoweb/ but not .htm or .html probably an image, maybe a txt file
filetobeopened = expowebpath / path
# print(f' - {sys_getfilesystemencoding()=}')
if (sys_getfilesystemencoding() != "utf-8"):
return HttpResponse(default_head + '<h3>UTF-8 Parsing Failure:<br>Default file encoding on this Troggle installation is not UTF-8:<br>failure detected in expowebpage in views.expo.py</h3> Please Please reconfigure Debian/Apache/Django to fix this, i.e. contact Wookey. </body' )
if sys_getfilesystemencoding() != "utf-8":
return HttpResponse(
default_head
+ "<h3>UTF-8 Parsing Failure:<br>Default file encoding on this Troggle installation is not UTF-8:<br>failure detected in expowebpage in views.expo.py</h3> Please Please reconfigure Debian/Apache/Django to fix this, i.e. contact Wookey. </body"
)
try:
content = open(filetobeopened, "rb")
content_type=getmimetype(path)
return HttpResponse(content = content, content_type=content_type)
content_type = getmimetype(path)
return HttpResponse(content=content, content_type=content_type)
except IOError:
return render(request, 'pagenotfound.html', {'path': path}, status="404")
return render(request, "pagenotfound.html", {"path": path}, status="404")
def getmimetype(path):
'''Our own version rather than relying on what is provided by the python library. Note that when
"""Our own version rather than relying on what is provided by the python library. Note that when
Apache or nginx is used to deliver /expofiles/ it will use it's own idea of mimetypes and
not these.
'''
"""
path = str(path)
if path.lower().endswith(".css"): return "text/css"
if path.lower().endswith(".txt"): return "text/css"
if path.lower().endswith(".js"): return "application/javascript"
if path.lower().endswith(".json"): return "application/javascript"
if path.lower().endswith(".ico"): return "image/vnd.microsoft.icon"
if path.lower().endswith(".png"): return "image/png"
if path.lower().endswith(".tif"): return "image/tif"
if path.lower().endswith(".gif"): return "image/gif"
if path.lower().endswith(".jpeg"): return "image/jpeg"
if path.lower().endswith(".jpg"): return "image/jpeg"
if path.lower().endswith("svg"): return "image/svg+xml"
if path.lower().endswith("xml"): return "application/xml" # we use "text/xml" for tunnel files
if path.lower().endswith(".pdf"): return "application/pdf"
if path.lower().endswith(".ps"): return "application/postscript"
if path.lower().endswith(".svx"): return "application/x-survex-svx"
if path.lower().endswith(".3d"): return "application/x-survex-3d"
if path.lower().endswith(".pos"): return "application/x-survex-pos"
if path.lower().endswith(".err"): return "application/x-survex-err"
if path.lower().endswith(".odt"): return "application/vnd.oasis.opendocument.text"
if path.lower().endswith(".ods"): return "application/vnd.oasis.opendocument.spreadsheet"
if path.lower().endswith(".docx"): return "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
if path.lower().endswith(".xslx"): return "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
if path.lower().endswith(".gz"): return "application/x-7z-compressed"
if path.lower().endswith(".7z"): return "application/x-7z-compressed"
if path.lower().endswith(".zip"): return "application/zip"
if path.lower().endswith(".css"):
return "text/css"
if path.lower().endswith(".txt"):
return "text/css"
if path.lower().endswith(".js"):
return "application/javascript"
if path.lower().endswith(".json"):
return "application/javascript"
if path.lower().endswith(".ico"):
return "image/vnd.microsoft.icon"
if path.lower().endswith(".png"):
return "image/png"
if path.lower().endswith(".tif"):
return "image/tif"
if path.lower().endswith(".gif"):
return "image/gif"
if path.lower().endswith(".jpeg"):
return "image/jpeg"
if path.lower().endswith(".jpg"):
return "image/jpeg"
if path.lower().endswith("svg"):
return "image/svg+xml"
if path.lower().endswith("xml"):
return "application/xml" # we use "text/xml" for tunnel files
if path.lower().endswith(".pdf"):
return "application/pdf"
if path.lower().endswith(".ps"):
return "application/postscript"
if path.lower().endswith(".svx"):
return "application/x-survex-svx"
if path.lower().endswith(".3d"):
return "application/x-survex-3d"
if path.lower().endswith(".pos"):
return "application/x-survex-pos"
if path.lower().endswith(".err"):
return "application/x-survex-err"
if path.lower().endswith(".odt"):
return "application/vnd.oasis.opendocument.text"
if path.lower().endswith(".ods"):
return "application/vnd.oasis.opendocument.spreadsheet"
if path.lower().endswith(".docx"):
return "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
if path.lower().endswith(".xslx"):
return "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
if path.lower().endswith(".gz"):
return "application/x-7z-compressed"
if path.lower().endswith(".7z"):
return "application/x-7z-compressed"
if path.lower().endswith(".zip"):
return "application/zip"
return ""
@login_required_if_public
@ensure_csrf_cookie
def editexpopage(request, path):
'''Manages the 'Edit this Page' capability for expo handbook and other html pages.
"""Manages the 'Edit this Page' capability for expo handbook and other html pages.
Relies on HTML5 or javascript to provide the in-browser editing environment.
'''
"""
try:
# if a cave not a webpage at all.
r = Cave.objects.get(url = path)
# if a cave not a webpage at all.
r = Cave.objects.get(url=path)
return troggle.core.views.caves.editCave(request, r.cave.slug)
except Cave.DoesNotExist:
pass
print(f' - {sys_getfilesystemencoding()=}')
if (sys_getfilesystemencoding() != "utf-8"):
return HttpResponse(default_head + '<h3>UTF-8 Parsing Failure:<br>Default file encoding on this Troggle installation is not UTF-8:<br>failure detected in expowebpage in views.expo.py</h3> Please Please reconfigure Debian/Apache/Django to fix this, i.e. contact Wookey. </body' )
print(f" - {sys_getfilesystemencoding()=}")
if sys_getfilesystemencoding() != "utf-8":
return HttpResponse(
default_head
+ "<h3>UTF-8 Parsing Failure:<br>Default file encoding on this Troggle installation is not UTF-8:<br>failure detected in expowebpage in views.expo.py</h3> Please Please reconfigure Debian/Apache/Django to fix this, i.e. contact Wookey. </body"
)
try:
filepath = Path(settings.EXPOWEB) / path
o = open(filepath, "r", encoding="utf8")
html = o.read()
autogeneratedmatch = re.search(r"\<\!--\s*(.*?(Do not edit|It is auto-generated).*?)\s*--\>", html, re.DOTALL + re.IGNORECASE)
autogeneratedmatch = re.search(
r"\<\!--\s*(.*?(Do not edit|It is auto-generated).*?)\s*--\>", html, re.DOTALL + re.IGNORECASE
)
if autogeneratedmatch:
return HttpResponse(autogeneratedmatch.group(1))
m = re.search(r"(.*)<head([^>]*)>(.*)</head>(.*)<body([^>]*)>(.*)</body>(.*)", html, re.DOTALL + re.IGNORECASE)
if m:
filefound = True
preheader, headerargs, head, postheader, bodyargs, body, postbody = m.groups()
# linksmatch = re.match(r'(.*)(<ul\s+id="links">.*)', body, re.DOTALL + re.IGNORECASE)
# if linksmatch:
# body, links = linksmatch.groups()
# linksmatch = re.match(r'(.*)(<ul\s+id="links">.*)', body, re.DOTALL + re.IGNORECASE)
# if linksmatch:
# body, links = linksmatch.groups()
else:
return HttpResponse(default_head + html + '<h3>HTML Parsing failure:<br>Page could not be parsed into header and body:<br>failure detected in expowebpage in views.expo.py</h3> Please edit this <var>:expoweb:</var> page to be in the expected full HTML format .</body>' )
return HttpResponse(
default_head
+ html
+ "<h3>HTML Parsing failure:<br>Page could not be parsed into header and body:<br>failure detected in expowebpage in views.expo.py</h3> Please edit this <var>:expoweb:</var> page to be in the expected full HTML format .</body>"
)
except IOError:
print("### File not found ### ", filepath)
filefound = False
if request.method == 'POST': # If the form has been submitted...
pageform = ExpoPageForm(request.POST) # A form bound to the POST data
if pageform.is_valid():# Form valid therefore write file
#print("### \n", str(pageform)[0:300])
#print("### \n csrfmiddlewaretoken: ",request.POST['csrfmiddlewaretoken'])
if request.method == "POST": # If the form has been submitted...
pageform = ExpoPageForm(request.POST) # A form bound to the POST data
if pageform.is_valid(): # Form valid therefore write file
# print("### \n", str(pageform)[0:300])
# print("### \n csrfmiddlewaretoken: ",request.POST['csrfmiddlewaretoken'])
if filefound:
headmatch = re.match(r"(.*)<title>.*</title>(.*)", head, re.DOTALL + re.IGNORECASE)
headmatch = re.match(r"(.*)<title>.*</title>(.*)", head, re.DOTALL + re.IGNORECASE)
if headmatch:
head = headmatch.group(1) + "<title>" + pageform.cleaned_data["title"] + "</title>" + headmatch.group(2)
head = (
headmatch.group(1)
+ "<title>"
+ pageform.cleaned_data["title"]
+ "</title>"
+ headmatch.group(2)
)
else:
head = "<title>" + pageform.cleaned_data["title"] + "</title>"
else:
@ -349,36 +424,53 @@ def editexpopage(request, path):
headerargs = ""
postheader = ""
bodyargs = ""
postbody = "</html>\n"
postbody = "</html>\n"
body = pageform.cleaned_data["html"]
body = body.replace("\r", "")
result = f"{preheader}<head{headerargs}>{head}</head>{postheader}<body{bodyargs}>\n{body}</body>{postbody}"
if not filefound or result != html: # Check if content changed at all
if not filefound or result != html: # Check if content changed at all
try:
change_message = pageform.cleaned_data["change_message"]
write_and_commit([(filepath, result, "utf-8")], f'{change_message} - online edit of {path}')
write_and_commit([(filepath, result, "utf-8")], f"{change_message} - online edit of {path}")
except WriteAndCommitError as e:
return render(request,'errors/generic.html', {'message': e.message})
return render(request, "errors/generic.html", {"message": e.message})
return HttpResponseRedirect(reverse('expopage', args=[path])) # Redirect after POST
return HttpResponseRedirect(reverse("expopage", args=[path])) # Redirect after POST
else:
if filefound:
m = re.search(r"<title>(.*)</title>", head, re.DOTALL + re.IGNORECASE)
if m:
title, = m.groups()
if m:
(title,) = m.groups()
else:
title = ""
pageform = ExpoPageForm(initial = {"html": body, "title": title})
pageform = ExpoPageForm(initial={"html": body, "title": title})
else:
pageform = ExpoPageForm()
return render(request, 'editexpopage.html', {'path': path, 'form': pageform, })
return render(
request,
"editexpopage.html",
{
"path": path,
"form": pageform,
},
)
class ExpoPageForm(forms.Form):
'''The form used by the editexpopage function
'''
title = forms.CharField(widget=forms.TextInput(attrs={'size':'60', 'placeholder': "Enter title (displayed in tab)"}))
html = forms.CharField(widget=HTMLarea(attrs={"height":"80%", "rows":20, 'placeholder': "Enter page content (using HTML)"},
preview = True), required=False)
change_message = forms.CharField(widget=forms.Textarea(attrs={"cols":80, "rows":3, 'placeholder': "Describe the change made (for version control records)"}))
"""The form used by the editexpopage function"""
title = forms.CharField(
widget=forms.TextInput(attrs={"size": "60", "placeholder": "Enter title (displayed in tab)"})
)
html = forms.CharField(
widget=HTMLarea(
attrs={"height": "80%", "rows": 20, "placeholder": "Enter page content (using HTML)"}, preview=True
),
required=False,
)
change_message = forms.CharField(
widget=forms.Textarea(
attrs={"cols": 80, "rows": 3, "placeholder": "Describe the change made (for version control records)"}
)
)

View File

@ -24,60 +24,65 @@ from troggle.parsers.people import GetPersonExpeditionNameLookup
from .auth import login_required_if_public
'''These views are for logbook items when they appear in an 'expedition' page
"""These views are for logbook items when they appear in an 'expedition' page
and for persons: their individual pages and their perseonexpedition pages.
It uses the global object TROG to hold some cached pages.
'''
"""
todo = """Fix the get_person_chronology() display bug.
"""
todo = '''Fix the get_person_chronology() display bug.
'''
def notablepersons(request):
def notabilitykey(person):
return person.notability()
return person.notability()
persons = Person.objects.all()
# From what I can tell, "persons" seems to be the table rows, while "pcols" is the table columns. - AC 16 Feb 09
pcols = [ ]
pcols = []
ncols = 4
nc = int((len(persons) + ncols - 1) / ncols)
for i in range(ncols):
pcols.append(persons[i * nc: (i + 1) * nc])
pcols.append(persons[i * nc : (i + 1) * nc])
notablepersons = []
# Needed recoding because of Django CVE-2021-45116
# Needed recoding because of Django CVE-2021-45116
for person in persons:
if person.bisnotable():
notablepersons.append(person)
notablepersons.sort(key=notabilitykey, reverse=True)
return render(request,'notablepersons.html', {'persons': persons, 'pcols':pcols, 'notablepersons':notablepersons})
return render(
request, "notablepersons.html", {"persons": persons, "pcols": pcols, "notablepersons": notablepersons}
)
def expedition(request, expeditionname):
'''Returns a rendered page for one expedition, specified by the year e.g. '2019'.
"""Returns a rendered page for one expedition, specified by the year e.g. '2019'.
If page caching is enabled, it caches the dictionaries used to render the template page.
This is not as difficult to understand as it looks. Yes there are many levels of indirection, with multiple trees being traversed at the same time. And the Django special syntax
makes this hard for normal Python programmers.
Remember that 'personexpedition__expedition' is interpreted by Django to mean the
'expedition' object which is connected by a foreign key to the 'personexpedition'
object, which is a field of the PersonLogEntry object:
PersonLogEntry.objects.filter(personexpedition__expedition=expo)
Queries are not evaluated to hit the database until a result is actually used. Django
Queries are not evaluated to hit the database until a result is actually used. Django
does lazy evaluation.
'''
"""
try:
expo = Expedition.objects.get(year=int(expeditionname))
except:
message = f'Expedition not found - database apparently empty, you probably need to do a full re-import of all data.'
return render(request, 'errors/generic.html', {'message': message})
if request.user.is_authenticated:
message = (
f"Expedition not found - database apparently empty, you probably need to do a full re-import of all data."
)
return render(request, "errors/generic.html", {"message": message})
if request.user.is_authenticated:
logged_in = True
if "reload" in request.GET:
expo.logbookentry_set.all().delete()
@ -85,134 +90,149 @@ def expedition(request, expeditionname):
else:
logged_in = False
ts = TROG['pagecache']['expedition'] # not much use unless single user!
ts = TROG["pagecache"]["expedition"] # not much use unless single user!
if settings.CACHEDPAGES:
nexpos = len( TROG['pagecache']['expedition'])
#print(f'! - expo {expeditionname} CACHEDPAGES {nexpos} expo pages in cache.')
nexpos = len(TROG["pagecache"]["expedition"])
# print(f'! - expo {expeditionname} CACHEDPAGES {nexpos} expo pages in cache.')
if expeditionname in ts:
#print('! - expo {expeditionanme} using cached page')
return render(request,'expedition.html', { **ts[expeditionname], 'logged_in' : logged_in })
expeditions = Expedition.objects.all() # top menu only, evaluated only when template renders
entries = expo.logbookentry_set.all()
# print('! - expo {expeditionanme} using cached page')
return render(request, "expedition.html", {**ts[expeditionname], "logged_in": logged_in})
expeditions = Expedition.objects.all() # top menu only, evaluated only when template renders
entries = expo.logbookentry_set.all()
blocks = expo.survexblock_set.all()
dateditems = list(entries) + list(blocks) # evaluates the Django query and hits db
dateditems = list(entries) + list(blocks) # evaluates the Django query and hits db
dates = sorted(set([item.date for item in dateditems]))
allpersonlogentries = PersonLogEntry.objects.filter(personexpedition__expedition=expo)
personexpodays = [ ]
personexpodays = []
for personexpedition in expo.personexpedition_set.all():
expotrips = allpersonlogentries.filter(personexpedition=personexpedition) # lazy
expotrips = allpersonlogentries.filter(personexpedition=personexpedition) # lazy
expoblocks = blocks.filter(survexpersonrole__personexpedition=personexpedition)
prow = [ ]
prow = []
for date in dates:
personentries = expotrips.filter(logbook_entry__date=date) # lazy
personblocks = set(expoblocks.filter(date = date)) # not lazy
personentries = expotrips.filter(logbook_entry__date=date) # lazy
personblocks = set(expoblocks.filter(date=date)) # not lazy
pcell = {}
pcell["personentries"] = personentries
pcell["survexblocks"] = personblocks
if issunday := (date.weekday() == 6): # WALRUS
if issunday := (date.weekday() == 6): # WALRUS
pcell["sunday"] = issunday
prow.append(pcell)
personexpodays.append({"personexpedition":personexpedition, "personrow":prow})
ts[expeditionname] = {'expedition': expo,
'expeditions':expeditions,
'personexpodays':personexpodays, 'settings':settings,
'dateditems': dateditems, 'dates':dates}
TROG['pagecache']['expedition'][expeditionname] = ts[expeditionname]
personexpodays.append({"personexpedition": personexpedition, "personrow": prow})
return render(request,'expedition.html', { **ts[expeditionname], 'logged_in' : logged_in } )
ts[expeditionname] = {
"expedition": expo,
"expeditions": expeditions,
"personexpodays": personexpodays,
"settings": settings,
"dateditems": dateditems,
"dates": dates,
}
TROG["pagecache"]["expedition"][expeditionname] = ts[expeditionname]
class Expeditions_tsvListView(ListView):
"""This uses the Django built-in shortcut mechanism
return render(request, "expedition.html", {**ts[expeditionname], "logged_in": logged_in})
class Expeditions_tsvListView(ListView):
"""This uses the Django built-in shortcut mechanism
It defaults to use a template with name <app-label>/<model-name>_list.html.
https://www.agiliq.com/blog/2017/12/when-and-how-use-django-listview/
https://developer.mozilla.org/en-US/docs/Learn/Server-side/Django/Generic_views
Either a queryset variable or set_queryset() function is used, but not needed
if you want all the obejcts of a particaulr type in which case just set model = <object>
"""
template_name = 'core/expeditions_tsv_list.html' # if not present then uses core/expedition_list.html
#queryset = Expedition.objects.all()
#context_object_name = 'expedition'
model = Expedition # equivalent to .objects.all() for a queryset
class Expeditions_jsonListView(ListView):
template_name = 'core/expeditions_json_list.html'
model = Expedition
template_name = "core/expeditions_tsv_list.html" # if not present then uses core/expedition_list.html
# queryset = Expedition.objects.all()
# context_object_name = 'expedition'
model = Expedition # equivalent to .objects.all() for a queryset
def person(request, first_name='', last_name='', ):
class Expeditions_jsonListView(ListView):
template_name = "core/expeditions_json_list.html"
model = Expedition
def person(
request,
first_name="",
last_name="",
):
try:
this_person = Person.objects.get(first_name = first_name, last_name = last_name)
this_person = Person.objects.get(first_name=first_name, last_name=last_name)
except:
message = f'Person not found \'{first_name} {last_name}\' - possibly Scottish? (See our <a href="/handbook/troggle/namesredesign.html">Proposal to fix this</a>)'
return render(request, 'errors/generic.html', {'message': message})
return render(request,'person.html', {'person': this_person })
message = f"Person not found '{first_name} {last_name}' - possibly Scottish? (See our <a href=\"/handbook/troggle/namesredesign.html\">Proposal to fix this</a>)"
return render(request, "errors/generic.html", {"message": message})
return render(request, "person.html", {"person": this_person})
def get_person_chronology(personexpedition):
'''
"""
This is just a nasty convoluted way of trying the make the template do more work than it is sensible to ask it to do.
Rewrite more simply with the login in the python, not in Django template language (you bastard Curtis).
'''
res = { }
"""
res = {}
for personlogentry in personexpedition.personlogentry_set.all():
a = res.setdefault(personlogentry.logbook_entry.date, { })
a.setdefault("personlogentries", [ ]).append(personlogentry)
a = res.setdefault(personlogentry.logbook_entry.date, {})
a.setdefault("personlogentries", []).append(personlogentry)
for personrole in personexpedition.survexpersonrole_set.all():
if personrole.survexblock.date: # avoid bad data from another bug
a = res.setdefault(personrole.survexblock.date, { })
a.setdefault("personroles", [ ]).append(personrole.survexblock)
if personrole.survexblock.date: # avoid bad data from another bug
a = res.setdefault(personrole.survexblock.date, {})
a.setdefault("personroles", []).append(personrole.survexblock)
# build up the tables
rdates = sorted(list(res.keys()))
res2 = [ ]
res2 = []
for rdate in rdates:
personlogentries = res[rdate].get("personlogentries", [])
personroles = res[rdate].get("personroles", [])
for n in range(max(len(personlogentries), len(personroles) )):
res2.append(((n == 0 and rdate or "--"), (n < len(personlogentries) and personlogentries[n]), (n < len(personroles) and personroles[n]) ))
personlogentries = res[rdate].get("personlogentries", [])
personroles = res[rdate].get("personroles", [])
for n in range(max(len(personlogentries), len(personroles))):
res2.append(
(
(n == 0 and rdate or "--"),
(n < len(personlogentries) and personlogentries[n]),
(n < len(personroles) and personroles[n]),
)
)
return res2
def personexpedition(request, first_name='', last_name='', year=''):
person = Person.objects.get(first_name = first_name, last_name = last_name)
def personexpedition(request, first_name="", last_name="", year=""):
person = Person.objects.get(first_name=first_name, last_name=last_name)
this_expedition = Expedition.objects.get(year=year)
personexpedition = person.personexpedition_set.get(expedition=this_expedition)
personchronology = get_person_chronology(personexpedition)
#for pc in personchronology:
#print(pc)
return render(request,'personexpedition.html', {'personexpedition': personexpedition, 'personchronology':personchronology})
# for pc in personchronology:
# print(pc)
return render(
request, "personexpedition.html", {"personexpedition": personexpedition, "personchronology": personchronology}
)
def logbookentry(request, date, slug):
# start = time.time()
trips = LogbookEntry.objects.filter(date=date) # all the trips not just this one
trips = LogbookEntry.objects.filter(date=date) # all the trips not just this one
this_logbookentry = trips.filter(date=date, slug=slug)
if this_logbookentry:
if len(this_logbookentry)>1:
return render(request, 'object_list.html',{'object_list':this_logbookentry})
if len(this_logbookentry) > 1:
return render(request, "object_list.html", {"object_list": this_logbookentry})
else:
wallets = set()
allwallets = Wallet.objects.all()
refwallets = allwallets.filter(survexblock__date=date)
for r in refwallets:
wallets.add(r)
# Note that w.year() only works for wallets which have a valid JSON file existing
# This is very slow with a big lag as w.date() is a computed field
# Noticably slow with WSL2 and NTFS filesystem, even with caching as walletdate.
@ -221,20 +241,27 @@ def logbookentry(request, date, slug):
wallets.add(j)
svxothers = SurvexBlock.objects.filter(date=date)
this_logbookentry=this_logbookentry[0]
this_logbookentry = this_logbookentry[0]
# This is the only page that uses next_.. and prev_..
# and it is calculated on the fly in the model
return render(request, 'logbookentry.html',
{'logbookentry': this_logbookentry, 'trips': trips, 'svxothers': svxothers, 'wallets': wallets})
# and it is calculated on the fly in the model
return render(
request,
"logbookentry.html",
{"logbookentry": this_logbookentry, "trips": trips, "svxothers": svxothers, "wallets": wallets},
)
else:
msg =(f' Logbook entry slug:"{slug}" not found in database on date:"{date}" ')
msg = f' Logbook entry slug:"{slug}" not found in database on date:"{date}" '
print(msg)
return render(request, 'errors/generic.html',{'message':msg})
return render(request, "errors/generic.html", {"message": msg})
def get_people(request, expeditionslug):
exp = Expedition.objects.get(year = expeditionslug)
return render(request,'options.html', {"items": [(pe.slug, pe.name) for pe in exp.personexpedition_set.all()]})
exp = Expedition.objects.get(year=expeditionslug)
return render(request, "options.html", {"items": [(pe.slug, pe.name) for pe in exp.personexpedition_set.all()]})
def get_logbook_entries(request, expeditionslug):
exp = Expedition.objects.get(year = expeditionslug)
return render(request,'options.html', {"items": [(le.slug, f"{le.date} - {le.title}") for le in exp.logbookentry_set.all()]})
exp = Expedition.objects.get(year=expeditionslug)
return render(
request, "options.html", {"items": [(le.slug, f"{le.date} - {le.title}") for le in exp.logbookentry_set.all()]}
)

View File

@ -13,34 +13,41 @@ from django.template import Context, loader
from django.urls import reverse
from troggle.core.models.caves import Cave
from troggle.core.models.logbooks import QM, LogbookEntry #, PersonLogEntry
from troggle.core.models.logbooks import QM, LogbookEntry # , PersonLogEntry
from troggle.core.models.survex import DrawingFile
# from databaseReset import reinit_db # don't do this. databaseRest runs code *at import time*
from troggle.core.models.troggle import Expedition, Person, PersonExpedition
from troggle.parsers.imports import (import_caves, import_drawingsfiles,
import_logbooks, import_people,
import_QMs, import_survex,
import_surveyscans)
from troggle.parsers.imports import (
import_caves,
import_drawingsfiles,
import_logbooks,
import_people,
import_QMs,
import_survex,
import_surveyscans,
)
from .auth import login_required_if_public
'''Utility functions and code to serve the control panel and individual user's
"""Utility functions and code to serve the control panel and individual user's
progress and task list (deprecated as we do not have individual user login).
'''
"""
todo = '''
todo = """
- Use logbookdownloader to convert all older logbooks into the 2005-variant of HTML then we can
get rid of the parsers for older formats.
When we have done all the old logbooks, delete this function and the two templates.
OR invent a new format, e.g. using <article> and <section>?, which is better!
'''
"""
def todos(request, module):
'''produces todo text from module
def todos(request, module):
"""produces todo text from module
We could automate this to find all those strings automatically
'''
"""
from troggle.core.forms import todo as forms
from troggle.core.middleware import todo as middleware
from troggle.core.models.caves import todo as modelcaves
@ -53,164 +60,182 @@ def todos(request, module):
from troggle.parsers.drawings import todo as parsersdrawings
from troggle.parsers.logbooks import todo as parserslogbooks
from troggle.parsers.survex import todo as parserssurvex
tododict = {'views/other': todo,
'tests': tests,
'views/logbooks': viewlogbooks,
'views/survex': viewsurvex,
'views/caves': viewcaves,
'views/drawings': viewdrawings,
'parsers/caves': parserscaves,
'parsers/logbooks': parserslogbooks,
'parsers/drawings': parsersdrawings,
'parsers/survex': parserssurvex,
'core/models/caves': modelcaves,
'core/middleware': middleware,
'core/forms': forms}
return render(request,'core/todos.html', {'tododict': tododict})
def troggle404(request): # cannot get this to work. Handler404 in urls.py not right syntax
'''Custom 404 page to be used even when Debug=True
tododict = {
"views/other": todo,
"tests": tests,
"views/logbooks": viewlogbooks,
"views/survex": viewsurvex,
"views/caves": viewcaves,
"views/drawings": viewdrawings,
"parsers/caves": parserscaves,
"parsers/logbooks": parserslogbooks,
"parsers/drawings": parsersdrawings,
"parsers/survex": parserssurvex,
"core/models/caves": modelcaves,
"core/middleware": middleware,
"core/forms": forms,
}
return render(request, "core/todos.html", {"tododict": tododict})
def troggle404(request): # cannot get this to work. Handler404 in urls.py not right syntax
"""Custom 404 page to be used even when Debug=True
https://blog.juanwolf.fr/posts/programming/how-to-create-404-page-django/
'''
"""
context = RequestContext(request)
#context['caves'] = Cave.objects.all()
return render(request, ('errors/generic.html', context.flatten()))
# context['caves'] = Cave.objects.all()
return render(request, ("errors/generic.html", context.flatten()))
def frontpage(request):
'''never seen in common practice. Logon should redirect here when this is more useful'''
def frontpage(request):
"""never seen in common practice. Logon should redirect here when this is more useful"""
# the messages system does a popup on this page if there is a recent message, e.g. from the admin site actions.
# via django.contrib.messages.middleware.MessageMiddleware
# via django.contrib.messages.middleware.MessageMiddleware
# this is set in the templates.
if request.user.is_authenticated:
return render(request,'tasks.html')
return render(request, "tasks.html")
expeditions = Expedition.objects.order_by("-year")
expeditions = Expedition.objects.order_by("-year")
logbookentry = LogbookEntry
cave = Cave
#from django.contrib.admin.templatetags import log
return render(request,'frontpage.html', locals())
# from django.contrib.admin.templatetags import log
return render(request, "frontpage.html", locals())
@login_required_if_public
def controlpanel(request):
'''Admin requires expoadmin user logged on
"""Admin requires expoadmin user logged on
Mostly disabled apart from logbook export
DANGEROUS, these import functions kill the ground under your feet !
'''
jobs_completed=[]
"""
jobs_completed = []
def process_imports():
'''databaseReset.py
jq.enq("reinit",reinit_db)
jq.enq("caves",import_caves)
jq.enq("people",import_people)
jq.enq("scans",import_surveyscans)
jq.enq("logbooks",import_logbooks)
jq.enq("QMs",import_QMs)
jq.enq("drawings",import_drawingsfiles)
jq.enq("survex",import_survex)
'''
"""databaseReset.py
jq.enq("reinit",reinit_db)
jq.enq("caves",import_caves)
jq.enq("people",import_people)
jq.enq("scans",import_surveyscans)
jq.enq("logbooks",import_logbooks)
jq.enq("QMs",import_QMs)
jq.enq("drawings",import_drawingsfiles)
jq.enq("survex",import_survex)
"""
if request.POST.get("import_caves", False):
import_caves()
jobs_completed.append('Caves')
jobs_completed.append("Caves")
if request.POST.get("import_people", False):
import_people()
jobs_completed.append('People')
jobs_completed.append("People")
if request.POST.get("import_surveyscans", False):
import_surveyscans()
jobs_completed.append('Scans')
jobs_completed.append("Scans")
if request.POST.get("import_logbooks", False):
import_logbooks()
jobs_completed.append('Logbooks')
jobs_completed.append("Logbooks")
if request.POST.get("import_QMs", False):
import_QMs()
jobs_completed.append('QMs')
jobs_completed.append("QMs")
if request.POST.get("import_drawingsfiles", False):
import_drawingsfiles()
jobs_completed.append('Drawings')
jobs_completed.append("Drawings")
if request.POST.get("import_survex", False):
import_survex()
jobs_completed.append('Survex')
jobs_completed.append("Survex")
print("", flush=True)
if not request.user.is_superuser: # expoadmin is both .is_staff and ._is_superuser
return render(request,'controlPanel.html', {'error': 'You are logged in, but not logged in as "expoadmin". \nLogout and login again to contnue.'})
if not request.user.is_superuser: # expoadmin is both .is_staff and ._is_superuser
return render(
request,
"controlPanel.html",
{"error": 'You are logged in, but not logged in as "expoadmin". \nLogout and login again to contnue.'},
)
else:
if request.method=='POST':
#reinit_db()
if request.method == "POST":
# reinit_db()
process_imports()
return render(request,'controlPanel.html', {'expeditions':Expedition.objects.all(),'jobs_completed':jobs_completed})
return render(
request,
"controlPanel.html",
{"expeditions": Expedition.objects.all(), "jobs_completed": jobs_completed},
)
else:
return render(request,'controlPanel.html', {'expeditions':Expedition.objects.all(),'jobs_completed':jobs_completed})
return render(
request,
"controlPanel.html",
{"expeditions": Expedition.objects.all(), "jobs_completed": jobs_completed},
)
def exportlogbook(request,year=None,extension=None):
'''Constructs, from the database, a complete HTML formatted logbook
def exportlogbook(request, year=None, extension=None):
"""Constructs, from the database, a complete HTML formatted logbook
for the current year. Formats available are HTML2005 (others old & broken or not written yet)
There are no images stored in the database, so this is only a tool for a first pass, to be followed by
hand-editing. However links to images work in the HTML text of a logbook entry
NEED TO ADD IN THE MATERIAL WHICH IS NOT IN ANY LBE ! e.g. front matter.
NEED TO ADD IN THE MATERIAL WHICH IS NOT IN ANY LBE ! e.g. front matter.
This function is the recipient of the POST action os the export form in the control panel
'''
"""
def lbeKey(lbe):
"""This function goes into a lexicogrpahic sort function
"""
"""This function goes into a lexicogrpahic sort function"""
return str(lbe.date)
if not request.method=='POST':
return render(request,'controlPanel.html', {'expeditions':Expedition.objects.all(),'jobs_completed':""})
if not request.method == "POST":
return render(request, "controlPanel.html", {"expeditions": Expedition.objects.all(), "jobs_completed": ""})
else:
print(f'Logbook export {request.POST}')
print(f"Logbook export {request.POST}")
year = request.POST['year']
current_expedition=Expedition.objects.get(year=year)
logbook_entries=LogbookEntry.objects.filter(expedition=current_expedition).order_by('date') # need to be sorted by date!
print(f'Logbook has {len(logbook_entries)} entries in it.')
year = request.POST["year"]
current_expedition = Expedition.objects.get(year=year)
logbook_entries = LogbookEntry.objects.filter(expedition=current_expedition).order_by(
"date"
) # need to be sorted by date!
extension ='html'
response = HttpResponse(content_type='text/html')
style='2005'
filename='logbook-new-format.' + extension
template='logbook'+style+'style.'+ extension
response['Content-Disposition'] = 'attachment; filename='+filename
t=loader.get_template(template)
logbookfile = (t.render({'logbook_entries':logbook_entries}))
print(f"Logbook has {len(logbook_entries)} entries in it.")
extension = "html"
response = HttpResponse(content_type="text/html")
style = "2005"
filename = "logbook-new-format." + extension
template = "logbook" + style + "style." + extension
response["Content-Disposition"] = "attachment; filename=" + filename
t = loader.get_template(template)
logbookfile = t.render({"logbook_entries": logbook_entries})
endpath = Path(settings.EXPOWEB, "years", year, "endmatter.html")
endmatter = ""
if endpath.is_file():
if endpath.is_file():
try:
with open(endpath,"r") as end:
endmatter = end.read()
with open(endpath, "r") as end:
endmatter = end.read()
except:
print(" ! Very Bad Error opening " + endpath)
frontpath = Path(settings.EXPOWEB, "years", year, "frontmatter.html")
if frontpath.is_file():
if frontpath.is_file():
try:
with open(frontpath,"r") as front:
frontmatter = front.read()
with open(frontpath, "r") as front:
frontmatter = front.read()
except:
print(" ! Very Bad Error opening " + frontpath)
logbookfile = re.sub(r"<body>", "<body>\n"+frontmatter+endmatter , logbookfile)
logbookfile = re.sub(r"<body>", "<body>\n" + frontmatter + endmatter, logbookfile)
else:
logbookfile = re.sub(r"<body>", f"<body>\n<h1>Expo {year}</h1>\n"+endmatter, logbookfile)
logbookfile = re.sub(r"<body>", f"<body>\n<h1>Expo {year}</h1>\n" + endmatter, logbookfile)
dir = Path(settings.EXPOWEB) / "years" / year
filepath = Path(dir, filename)
with(open(filepath, 'w')) as lb:
with (open(filepath, "w")) as lb:
lb.writelines(logbookfile)
#print(f'Logbook exported to {filepath}')
# print(f'Logbook exported to {filepath}')
completed = f'Logbook exported to <a href="/years/{filename}">{filename}</a>'
return render(request,'controlPanel.html', {'expeditions':Expedition.objects.all(),'jobs_completed':[completed]})
return render(
request, "controlPanel.html", {"expeditions": Expedition.objects.all(), "jobs_completed": [completed]}
)

View File

@ -16,100 +16,95 @@ from troggle.parsers.locations import MapLocations
# from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
''' Generates the prospecting guide document.
""" Generates the prospecting guide document.
Also produces the overlay of points on top of a prospecting_image map - to be deleted.
Not working with recent PIL aka Pillow image package - removed.
'''
"""
AREANAMES = [
#('', 'Location unclear'),
('1a', '1a &ndash; Plateau: around Top Camp'),
('1b', '1b &ndash; Western plateau near 182'),
('1c', '1c &ndash; Eastern plateau near 204 walk-in path'),
('1d', '1d &ndash; Further plateau around 76'),
('2a', '2a &ndash; Southern Schwarzmooskogel near 201 path and the Nipple'),
('2b', '2b &ndash; Eish&ouml;hle area'),
('2b or 4 (unclear)', '2b or 4 (unclear)'),
('2c', '2c &ndash; Kaninchenh&ouml;hle area'),
('2d', '2d &ndash; Steinbr&uuml;ckenh&ouml;hle area'),
('3', '3 &ndash; Br&auml;uning Alm'),
('4', '4 &ndash; Kratzer valley'),
('5', '5 &ndash; Schwarzmoos-Wildensee'),
('6', '6 &ndash; Far plateau'),
('1626 or 6 (borderline)', '1626 or 6 (borderline)'),
('7', '7 &ndash; Egglgrube'),
('8a', '8a &ndash; Loser south face'),
('8b', '8b &ndash; Loser below Dimmelwand'),
('8c', '8c &ndash; Augst See'),
('8d', '8d &ndash; Loser-Hochganger ridge'),
('9', '9 &ndash; Gschwandt Alm'),
('10', '10 &ndash; Altaussee'),
('11', '11 &ndash; Augstbach')
]
# ('', 'Location unclear'),
("1a", "1a &ndash; Plateau: around Top Camp"),
("1b", "1b &ndash; Western plateau near 182"),
("1c", "1c &ndash; Eastern plateau near 204 walk-in path"),
("1d", "1d &ndash; Further plateau around 76"),
("2a", "2a &ndash; Southern Schwarzmooskogel near 201 path and the Nipple"),
("2b", "2b &ndash; Eish&ouml;hle area"),
("2b or 4 (unclear)", "2b or 4 (unclear)"),
("2c", "2c &ndash; Kaninchenh&ouml;hle area"),
("2d", "2d &ndash; Steinbr&uuml;ckenh&ouml;hle area"),
("3", "3 &ndash; Br&auml;uning Alm"),
("4", "4 &ndash; Kratzer valley"),
("5", "5 &ndash; Schwarzmoos-Wildensee"),
("6", "6 &ndash; Far plateau"),
("1626 or 6 (borderline)", "1626 or 6 (borderline)"),
("7", "7 &ndash; Egglgrube"),
("8a", "8a &ndash; Loser south face"),
("8b", "8b &ndash; Loser below Dimmelwand"),
("8c", "8c &ndash; Augst See"),
("8d", "8d &ndash; Loser-Hochganger ridge"),
("9", "9 &ndash; Gschwandt Alm"),
("10", "10 &ndash; Altaussee"),
("11", "11 &ndash; Augstbach"),
]
def prospecting(request):
'''This produces the multipage 'prospecting guide' document,
"""This produces the multipage 'prospecting guide' document,
intended to be printed and carried into the field - in 1999.
All the formatting and selection cleverness is in the template file.
This produces a vast number of bad 404 URLs as many URLs in the cave_data
XML files refer to other caves, assuming that they are in the same directory
as the prospecting guide. But since the introduction of the 1623/ level, this is
not true. e.g. 163 refers to 162 as href="../162.htm" which is valid in the cave
description page but not when navigating from the prospecting guide page.
Since this vast number of broken links is getting in the way of finding real errors, the guide
has been disabled.
'''
message = f'This prospecting guide text report contains many broken URLs because of a major redesign\n' +\
' to put caves into 1623/ and 1624/ folders in 2017. It was mostly useless because recent QM info was not in it anyway.\n\n' +\
'It is disabled in the python code in "prospecting(request):" in troggle/core/views/prospect.py'
return render(request,'errors/disabled.html', {'message': message})
has been disabled.
"""
message = (
f"This prospecting guide text report contains many broken URLs because of a major redesign\n"
+ " to put caves into 1623/ and 1624/ folders in 2017. It was mostly useless because recent QM info was not in it anyway.\n\n"
+ 'It is disabled in the python code in "prospecting(request):" in troggle/core/views/prospect.py'
)
return render(request, "errors/disabled.html", {"message": message})
areas = []
for key, name in AREANAMES:
a = Area.objects.get(short_name = key) # assumes unique
a = Area.objects.get(short_name=key) # assumes unique
caves = list(a.cave_set.all())
caves.sort(key=caveKey)
areas.append((name, a, caves))
return render(request, 'prospecting.html', {"areas": areas})
return render(request, "prospecting.html", {"areas": areas})
# Parameters for big map and zoomed subarea maps:
# big map first (zoom factor ignored)
# These are the values for the url /prospecting/[mapcode].png
# These are the values for the url /prospecting/[mapcode].png
maps = {
# id left top right bottom zoom
# G&K G&K G&K G&K factor
"all": [33810.4, 85436.5, 38192.0, 81048.2, 0.35,
"All"],
"40": [36275.6, 82392.5, 36780.3, 81800.0, 3.0,
"Eish&ouml;hle"],
"76": [35440.0, 83220.0, 36090.0, 82670.0, 1.3,
"Eislufth&ouml;hle"],
"204": [36354.1, 84154.5, 37047.4, 83300, 3.0,
"Steinbr&uuml;ckenh&ouml;hle"],
"tc": [35230.0, 82690.0, 36110.0, 82100.0, 3.0,
"Near Top Camp"],
"grieß":
[36000.0, 86300.0, 38320.0, 84400.0, 4.0,
"Grießkogel Area"],
# id left top right bottom zoom
# G&K G&K G&K G&K factor
"all": [33810.4, 85436.5, 38192.0, 81048.2, 0.35, "All"],
"40": [36275.6, 82392.5, 36780.3, 81800.0, 3.0, "Eish&ouml;hle"],
"76": [35440.0, 83220.0, 36090.0, 82670.0, 1.3, "Eislufth&ouml;hle"],
"204": [36354.1, 84154.5, 37047.4, 83300, 3.0, "Steinbr&uuml;ckenh&ouml;hle"],
"tc": [35230.0, 82690.0, 36110.0, 82100.0, 3.0, "Near Top Camp"],
"grieß": [36000.0, 86300.0, 38320.0, 84400.0, 4.0, "Grießkogel Area"],
}
for n in list(maps.keys()):
L, T, R, B, S, name = maps[n]
W = (R-L)/2
H = (T-B)/2
W = (R - L) / 2
H = (T - B) / 2
for i in range(2):
for j in range(2):
maps["%s%i%i" % (n, i, j)] = [L + i * W, T - j * H, L + (i + 1) * W, T - (j + 1) * H, S, name]
# Keys in the order in which we want the maps output
mapcodes = ["all", "grieß","40", "76", "204", "tc"]
mapcodes = ["all", "grieß", "40", "76", "204", "tc"]
# Field codes
L = 0
T = 1
@ -120,77 +115,78 @@ DESC = 5
SIZE = 5
areacolours = {
'1a' : '#00ffff',
'1b' : '#ff00ff',
'1c' : '#ffff00',
'1d' : '#ffffff',
'2a' : '#ff0000',
'2b' : '#00ff00',
'2c' : '#008800',
'2d' : '#ff9900',
'3' : '#880000',
'4' : '#0000ff',
'6' : '#000000', # doubles for surface fixed pts, and anything else
'7' : '#808080'
}
"1a": "#00ffff",
"1b": "#ff00ff",
"1c": "#ffff00",
"1d": "#ffffff",
"2a": "#ff0000",
"2b": "#00ff00",
"2c": "#008800",
"2d": "#ff9900",
"3": "#880000",
"4": "#0000ff",
"6": "#000000", # doubles for surface fixed pts, and anything else
"7": "#808080",
}
for FONT in [
"/usr/share/fonts/truetype/freefont/FreeSans.ttf",
"/usr/X11R6/lib/X11/fonts/truetype/arial.ttf",
"/mnt/c/windows/fonts/arial.ttf",
"C:\WINNT\Fonts\ARIAL.TTF"
]:
if os.path.isfile(FONT): break
"/usr/share/fonts/truetype/freefont/FreeSans.ttf",
"/usr/X11R6/lib/X11/fonts/truetype/arial.ttf",
"/mnt/c/windows/fonts/arial.ttf",
"C:\WINNT\Fonts\ARIAL.TTF",
]:
if os.path.isfile(FONT):
break
TEXTSIZE = 16
CIRCLESIZE =8
CIRCLESIZE = 8
LINEWIDTH = 2
#myFont = ImageFont.truetype(FONT, TEXTSIZE) # disabled as not importing PIL
#print(f' - myFont {myFont} {FONT} {TEXTSIZE}')
# myFont = ImageFont.truetype(FONT, TEXTSIZE) # disabled as not importing PIL
# print(f' - myFont {myFont} {FONT} {TEXTSIZE}')
def mungecoord(x, y, mapcode, img):
# Top of Zinken is 73 1201 = dataset 34542 81967
# Top of Hinter is 1073 562 = dataset 36670 83317
# image is 1417 by 2201
# FACTOR1 = 1000.0 / (36670.0-34542.0)
# FACTOR2 = (1201.0-562.0) / (83317 - 81967)
# FACTOR = (FACTOR1 + FACTOR2)/2
# The factors aren't the same as the scanned map's at a slight angle. I
# can't be bothered to fix this. Since we zero on the Hinter it makes
# very little difference for caves in the areas round 76 or 204.
# xoffset = (x - 36670)*FACTOR
# yoffset = (y - 83317)*FACTOR
# return (1073 + xoffset, 562 - yoffset)
# Top of Zinken is 73 1201 = dataset 34542 81967
# Top of Hinter is 1073 562 = dataset 36670 83317
# image is 1417 by 2201
# FACTOR1 = 1000.0 / (36670.0-34542.0)
# FACTOR2 = (1201.0-562.0) / (83317 - 81967)
# FACTOR = (FACTOR1 + FACTOR2)/2
# The factors aren't the same as the scanned map's at a slight angle. I
# can't be bothered to fix this. Since we zero on the Hinter it makes
# very little difference for caves in the areas round 76 or 204.
# xoffset = (x - 36670)*FACTOR
# yoffset = (y - 83317)*FACTOR
# return (1073 + xoffset, 562 - yoffset)
m = maps[mapcode]
factorX, factorY = img.size[0] / (m[R] - m[L]), img.size[1] / (m[T] - m[B])
return ((x - m[L]) * factorX, (m[T] - y) * factorY)
COL_TYPES = {True: "red", False: "#dddddd", "Reference": "#dddddd"}
m = maps[mapcode]
factorX, factorY = img.size[0] / (m[R] - m[L]), img.size[1] / (m[T] - m[B])
return ((x - m[L]) * factorX, (m[T] - y) * factorY)
COL_TYPES = {True: "red",
False: "#dddddd",
"Reference": "#dddddd"}
def prospecting_image(request, name):
'''This draws map outlines on an existing map image.
"""This draws map outlines on an existing map image.
But getting the entrances plotted is broken by later changes elsewhere in the system since this code was written.
SurvexStations are in x=latitude, y=longitude - these are what appear in essentials.gpx
SurvexStations are in x=latitude, y=longitude - these are what appear in essentials.gpx
Entrances are in northing, easting
which is why we can't simply plot all the Entrances...
We should replace all this with something that exports an overlay for Google Maps and OpenStreetView
'''
We should replace all this with something that exports an overlay for Google Maps and OpenStreetView
"""
mainImage = Image.open(os.path.join(settings.EXPOFILES, "location_maps", "pguidemap.jpg"))
# if settings.PUBLIC_SITE and not request.user.is_authenticated:
# mainImage = Image.new("RGB", mainImage.size, '#ffffff')
# mainImage = Image.new("RGB", mainImage.size, '#ffffff')
m = maps[name]
imgmaps = []
if name == "all":
img = mainImage
else:
M = maps['all']
M = maps["all"]
W, H = mainImage.size
l = int((m[L] - M[L]) / (M[R] - M[L]) * W)
t = int((m[T] - M[T]) / (M[B] - M[T]) * H)
@ -201,68 +197,71 @@ def prospecting_image(request, name):
h = int(round(m[ZOOM] * (m[B] - m[T]) / (M[B] - M[T]) * H))
img = img.resize((w, h), Image.BICUBIC)
draw = ImageDraw.Draw(img)
#draw.setfont(myFont)
# draw.setfont(myFont)
if name == "all":
for maparea in list(maps.keys()):
if maparea == "all":
continue
localm = maps[maparea]
l,t = mungecoord(localm[L], localm[T], "all", img)
r,b = mungecoord(localm[R], localm[B], "all", img)
l, t = mungecoord(localm[L], localm[T], "all", img)
r, b = mungecoord(localm[R], localm[B], "all", img)
text = maparea + " map"
textlen = draw.textsize(text)[0] + 3
draw.rectangle([l, t, l+textlen, t+TEXTSIZE+2], fill='#ffffff')
draw.text((l+2, t+1), text, fill="#000000", font=myFont)
imgmaps.append( [l, t, l+textlen, t+SIZE+2, "submap" + maparea, maparea + " subarea map"] )
draw.line([l, t, r, t], fill='#777777', width=LINEWIDTH)
draw.line([l, b, r, b], fill='#777777', width=LINEWIDTH)
draw.line([l, t, l, b], fill='#777777', width=LINEWIDTH)
draw.line([r, t, r, b], fill='#777777', width=LINEWIDTH)
draw.line([l, t, l+textlen, t], fill='#777777', width=LINEWIDTH)
draw.line([l, t+TEXTSIZE+2, l+textlen, t+TEXTSIZE+2], fill='#777777', width=LINEWIDTH)
draw.line([l, t, l, t+TEXTSIZE+2], fill='#777777', width=LINEWIDTH)
draw.line([l+textlen, t, l+textlen, t+TEXTSIZE+2], fill='#777777', width=LINEWIDTH)
#imgmaps[maparea] = []
draw.rectangle([l, t, l + textlen, t + TEXTSIZE + 2], fill="#ffffff")
draw.text((l + 2, t + 1), text, fill="#000000", font=myFont)
imgmaps.append([l, t, l + textlen, t + SIZE + 2, "submap" + maparea, maparea + " subarea map"])
draw.line([l, t, r, t], fill="#777777", width=LINEWIDTH)
draw.line([l, b, r, b], fill="#777777", width=LINEWIDTH)
draw.line([l, t, l, b], fill="#777777", width=LINEWIDTH)
draw.line([r, t, r, b], fill="#777777", width=LINEWIDTH)
draw.line([l, t, l + textlen, t], fill="#777777", width=LINEWIDTH)
draw.line([l, t + TEXTSIZE + 2, l + textlen, t + TEXTSIZE + 2], fill="#777777", width=LINEWIDTH)
draw.line([l, t, l, t + TEXTSIZE + 2], fill="#777777", width=LINEWIDTH)
draw.line([l + textlen, t, l + textlen, t + TEXTSIZE + 2], fill="#777777", width=LINEWIDTH)
# imgmaps[maparea] = []
# Draw scale bar
m100 = int(100 / (m[R] - m[L]) * img.size[0])
draw.line([10, TEXTSIZE*3, 10, TEXTSIZE*2], fill='#000000', width=LINEWIDTH)
draw.line([10, TEXTSIZE*2, 10+m100, TEXTSIZE*2], fill='#000000', width=LINEWIDTH)
draw.line([10+m100, TEXTSIZE * 3, 10+m100, TEXTSIZE*2], fill='#000000', width=LINEWIDTH)
draw.line([10, TEXTSIZE * 3, 10, TEXTSIZE * 2], fill="#000000", width=LINEWIDTH)
draw.line([10, TEXTSIZE * 2, 10 + m100, TEXTSIZE * 2], fill="#000000", width=LINEWIDTH)
draw.line([10 + m100, TEXTSIZE * 3, 10 + m100, TEXTSIZE * 2], fill="#000000", width=LINEWIDTH)
label = "100m"
draw.text([10 + (m100 - draw.textsize(label)[0]) / 2, TEXTSIZE/2], label, fill='#000000', font=myFont)
draw.text([10 + (m100 - draw.textsize(label)[0]) / 2, TEXTSIZE / 2], label, fill="#000000", font=myFont)
# Draw the circles for known points
# Draw the circles for known points
# Northing, Easting, Diameter - but N&E are swapped re database
for (N, E, D, num) in [(35975.37, 83018.21, 100, "177"), # Calculated from bearings
(35350.00, 81630.00, 50, "71"), # From Auer map
(36025.00, 82475.00, 50, "146"), # From mystery map
(35600.00, 82050.00, 50, "35"), # From Auer map
(35650.00, 82025.00, 50, "44"), # From Auer map
(36200.00, 82925.00, 50, "178"), # Calculated from bearings
(35232.64, 82910.37, 25, "181"), # Calculated from bearings
(35323.60, 81357.83, 50, "74") # From Auer map
]:
(N,E,D) = list(map(float, (N, E, D)))
maparea = Cave.objects.get(kataster_number = num).getArea().short_name
lo = mungecoord(N-D, E+D, name, img)
hi = mungecoord(N+D, E-D, name, img)
lpos = mungecoord(N-D, E, name, img)
draw.ellipse([lo,hi], outline="#000000")
draw.ellipse([lo[0]+1, lo[1]+1, hi[0]-1, hi[1]-1], outline=areacolours[maparea])
draw.ellipse([lo[0]+2, lo[1]+2, hi[0]-2, hi[1]-2], outline=areacolours[maparea])
draw.rectangle([lpos[0],lpos[1]-TEXTSIZE/2, lpos[0] + draw.textsize(name)[0], lpos[1]+TEXTSIZE/2], fill="#ffffff")
draw.text((lpos[0], lpos[1]-TEXTSIZE/2), num, fill="#000000")
#print(f' CIRCLES - {num} {(N,E,D)}')
for (N, E, D, num) in [
(35975.37, 83018.21, 100, "177"), # Calculated from bearings
(35350.00, 81630.00, 50, "71"), # From Auer map
(36025.00, 82475.00, 50, "146"), # From mystery map
(35600.00, 82050.00, 50, "35"), # From Auer map
(35650.00, 82025.00, 50, "44"), # From Auer map
(36200.00, 82925.00, 50, "178"), # Calculated from bearings
(35232.64, 82910.37, 25, "181"), # Calculated from bearings
(35323.60, 81357.83, 50, "74"), # From Auer map
]:
(N, E, D) = list(map(float, (N, E, D)))
maparea = Cave.objects.get(kataster_number=num).getArea().short_name
lo = mungecoord(N - D, E + D, name, img)
hi = mungecoord(N + D, E - D, name, img)
lpos = mungecoord(N - D, E, name, img)
draw.ellipse([lo, hi], outline="#000000")
draw.ellipse([lo[0] + 1, lo[1] + 1, hi[0] - 1, hi[1] - 1], outline=areacolours[maparea])
draw.ellipse([lo[0] + 2, lo[1] + 2, hi[0] - 2, hi[1] - 2], outline=areacolours[maparea])
draw.rectangle(
[lpos[0], lpos[1] - TEXTSIZE / 2, lpos[0] + draw.textsize(name)[0], lpos[1] + TEXTSIZE / 2], fill="#ffffff"
)
draw.text((lpos[0], lpos[1] - TEXTSIZE / 2), num, fill="#000000")
# print(f' CIRCLES - {num} {(N,E,D)}')
# ml = MapLocations()
# for p in ml.points():
# surveypoint, number, point_type, label = p
# print(f'{surveypoint}, {number}, {point_type}, {label}')
# plot(surveypoint, number, True, label, name, draw, img)
# surveypoint, number, point_type, label = p
# print(f'{surveypoint}, {number}, {point_type}, {label}')
# plot(surveypoint, number, True, label, name, draw, img)
# print(f'{name},\n{draw},\n{img}')
ents = Entrance.objects.all() # only has entrances and fixed points in it these days,
ents = Entrance.objects.all() # only has entrances and fixed points in it these days,
# but there are only 11 Entrances with northing, easting and a useable tag!
D = 50
for e in ents:
@ -279,40 +278,44 @@ def prospecting_image(request, name):
continue
if not e.northing:
continue
lo = mungecoord(N-D, E+D, st, img)
hi = mungecoord(N+D, E-D, st, img)
lpos = mungecoord(N-D, E, st, img)
draw.ellipse([lo,hi], outline="#000000")
draw.ellipse([lo[0]+1, lo[1]+1, hi[0]-1, hi[1]-1], outline="#ffffff")
draw.ellipse([lo[0]+2, lo[1]+2, hi[0]-2, hi[1]-2], outline="#ffffff")
draw.rectangle([lpos[0],lpos[1]-TEXTSIZE/2, lpos[0] + draw.textsize(st)[0], lpos[1]+TEXTSIZE/2], fill="#ffffff")
draw.text((lpos[0], lpos[1]-TEXTSIZE/2), num, fill="#000000")
lo = mungecoord(N - D, E + D, st, img)
hi = mungecoord(N + D, E - D, st, img)
lpos = mungecoord(N - D, E, st, img)
draw.ellipse([lo, hi], outline="#000000")
draw.ellipse([lo[0] + 1, lo[1] + 1, hi[0] - 1, hi[1] - 1], outline="#ffffff")
draw.ellipse([lo[0] + 2, lo[1] + 2, hi[0] - 2, hi[1] - 2], outline="#ffffff")
draw.rectangle(
[lpos[0], lpos[1] - TEXTSIZE / 2, lpos[0] + draw.textsize(st)[0], lpos[1] + TEXTSIZE / 2],
fill="#ffffff",
)
draw.text((lpos[0], lpos[1] - TEXTSIZE / 2), num, fill="#000000")
# draw.ellipse([(x-CIRCLESIZE,y-CIRCLESIZE),(x+CIRCLESIZE,y+CIRCLESIZE)], fill="red", outline="blue")
# draw.rectangle([(x+CIRCLESIZE, y-TEXTSIZE/2), (x+CIRCLESIZE*2+draw.textsize(shortnumber)[0], y+TEXTSIZE/2)], fill="#ffffff")
# draw.text((x+CIRCLESIZE * 1.5,y-TEXTSIZE/2), shortnumber, fill="#000000")
#print(f' SUCCESS - {st} {(E, N)} ')
# print(f' SUCCESS - {st} {(E, N)} ')
except:
#print(f' FAIL - {st} {(E, N)} ')
# print(f' FAIL - {st} {(E, N)} ')
pass
response = HttpResponse(content_type = "image/png")
response = HttpResponse(content_type="image/png")
del draw
img.save(response, "PNG")
return response
# def plot(surveypoint, number, point_type, label, mapcode, draw, img):
# try:
# ss = SurvexStation.objects.lookup(surveypoint)
# E, N = ss.x, ss.y
# shortnumber = number.replace("&mdash;","")
# (x,y) = list(map(int, mungecoord(E, N, mapcode, img)))
# imgmaps[maparea].append( [x-4, y-SIZE/2, x+4+draw.textsize(shortnumber)[0], y+SIZE/2, shortnumber, label] )
# draw.rectangle([(x+CIRCLESIZE, y-TEXTSIZE/2), (x+CIRCLESIZE*2+draw.textsize(shortnumber)[0], y+TEXTSIZE/2)], fill="#ffffff")
# draw.text((x+CIRCLESIZE * 1.5,y-TEXTSIZE/2), shortnumber, fill="#000000")
# draw.ellipse([(x-CIRCLESIZE,y-CIRCLESIZE),(x+CIRCLESIZE,y+CIRCLESIZE)], fill=COL_TYPES[point_type], outline="#000000")
# print(f' SUCCESS - YES {surveypoint}, {number}, {point_type}, {label}')
# except:
# print(f' - NO {surveypoint}, {number}, {point_type}, {label}')
# pass
# try:
# ss = SurvexStation.objects.lookup(surveypoint)
# E, N = ss.x, ss.y
# shortnumber = number.replace("&mdash;","")
# (x,y) = list(map(int, mungecoord(E, N, mapcode, img)))
# imgmaps[maparea].append( [x-4, y-SIZE/2, x+4+draw.textsize(shortnumber)[0], y+SIZE/2, shortnumber, label] )
# draw.rectangle([(x+CIRCLESIZE, y-TEXTSIZE/2), (x+CIRCLESIZE*2+draw.textsize(shortnumber)[0], y+TEXTSIZE/2)], fill="#ffffff")
# draw.text((x+CIRCLESIZE * 1.5,y-TEXTSIZE/2), shortnumber, fill="#000000")
# draw.ellipse([(x-CIRCLESIZE,y-CIRCLESIZE),(x+CIRCLESIZE,y+CIRCLESIZE)], fill=COL_TYPES[point_type], outline="#000000")
# print(f' SUCCESS - YES {surveypoint}, {number}, {point_type}, {label}')
# except:
# print(f' - NO {surveypoint}, {number}, {point_type}, {label}')
# pass

View File

@ -17,11 +17,11 @@ from troggle.core.models.survex import SingleScan, SurvexBlock, Wallet
from troggle.core.models.troggle import DataIssue, Expedition, Person
from troggle.core.views.expo import getmimetype
#from troggle.parsers.people import GetPersonExpeditionNameLookup
# from troggle.parsers.people import GetPersonExpeditionNameLookup
#import parsers.surveys
# import parsers.surveys
'''one of these views serves files as binary blobs, and simply set the mime type based on the file extension,
"""one of these views serves files as binary blobs, and simply set the mime type based on the file extension,
as does the urls.py dispatcher which sends them here. Here they should actually have the filetype checked
by looking inside the file before being served.
@ -35,80 +35,87 @@ TODO
cave for a wallet - just gets the last one, randomly. SHould make this a list or many:many ideally
add this file in to the todo list thinggy.
'''
"""
def populatewallet(w):
'''Copy survex data here just for display, not permanently
"""Copy survex data here just for display, not permanently
Only gets data from the survex file when it was parsed on import..
so doesn't work if there is no *ref value
'''
"""
survexpeople = []
blocks = SurvexBlock.objects.filter(scanswallet = w)
blocks = SurvexBlock.objects.filter(scanswallet=w)
for b in blocks:
for personrole in b.survexpersonrole_set.all():
for personrole in b.survexpersonrole_set.all():
survexpeople.append(personrole.personname)
w.persons = list(set(survexpeople))
w.persons = list(set(survexpeople))
def datewallet(w, earliest):
'''Gets the date of the youngest survexblock associated with the wallet
"""Gets the date of the youngest survexblock associated with the wallet
REFACTOR this to do the whole date-getting task
'''
"""
first = earliest
blocks = SurvexBlock.objects.filter(scanswallet = w)
blocks = SurvexBlock.objects.filter(scanswallet=w)
for b in blocks:
if b.date:
if b.date < first:
first = b.date
first = b.date
if first == earliest:
# no date found
w.date = None
else:
w.date = first.isoformat()
return w.date
def caveifywallet(w):
'''Gets the cave from the list of survex files,
"""Gets the cave from the list of survex files,
only selects one of them though. Only used for display.
'''
#print(f' - Caveify {w=}')
"""
# print(f' - Caveify {w=}')
blocknames = []
blocks = SurvexBlock.objects.filter(scanswallet = w)
for b in blocks:
blocks = SurvexBlock.objects.filter(scanswallet=w)
for b in blocks:
# NB b.cave is not populated by parser. Use b.survexfile.cave instead, or we could parse b.survexpath
if b.survexfile.cave:
w.caveobj = b.survexfile.cave # just gets the last one, randomly. SHould make this a list or many:many ideally
w.caveobj = (
b.survexfile.cave
) # just gets the last one, randomly. SHould make this a list or many:many ideally
w.cave = w.caveobj
if b.name:
blocknames.append(b.name)
if w.name():
w.displaynames = [w.name()]
else:
w.displaynames = blocknames
def fillblankpeople(w):
# this isn't working..? why? Because it needs a *ref and an import
wp = w.people()
w.persons = wp
if not wp:
populatewallet(w)
else:
if len(wp) == 1:
# print(f' - {wp=}')
nobody = wp[0].lower()
if nobody == 'unknown' or nobody == 'nobody' or nobody == ' ' or nobody == '':
# print(f' - {wp=} {nobody=}')
populatewallet(w)
# this isn't working..? why? Because it needs a *ref and an import
wp = w.people()
w.persons = wp
if not wp:
populatewallet(w)
else:
if len(wp) == 1:
# print(f' - {wp=}')
nobody = wp[0].lower()
if nobody == "unknown" or nobody == "nobody" or nobody == " " or nobody == "":
# print(f' - {wp=} {nobody=}')
populatewallet(w)
def fillblankothers(w):
if not w.walletdate:
earliest = datetime.datetime.now().date()
if not w.date(): # sets .walletdate as a side-effect, gets it from JSON
d =datewallet(w, earliest) # if nothing in JASON, it looks at the survex blocks
if not w.date(): # sets .walletdate as a side-effect, gets it from JSON
d = datewallet(w, earliest) # if nothing in JASON, it looks at the survex blocks
w.walletdate = d
w.save()
Gcavelookup = GetCaveLookup()
wcaveid = w.cave()
@ -118,58 +125,67 @@ def fillblankothers(w):
if type(wcaveid) == list:
for i in wcaveid:
if i in Gcavelookup:
w.caveobj = Gcavelookup[i] # just sets it to the last one found. nasty. bug waiting to happen
#print(f' - Found cave object from id {wcaveid}')
w.caveobj = Gcavelookup[i] # just sets it to the last one found. nasty. bug waiting to happen
# print(f' - Found cave object from id {wcaveid}')
else:
if wcaveid in Gcavelookup:
w.caveobj = Gcavelookup[wcaveid]
else:
print(f' - Failed to find cave object from id {wcaveid}')
print(f" - Failed to find cave object from id {wcaveid}")
def fixsurvextick(w, ticks):
ticks["S"] = w.fixsurvextick(ticks["S"])
def fixsurvextick(w, ticks):
ticks["S"] = w.fixsurvextick(ticks["S"])
def walletslistperson(request, first_name, last_name):
'''Page which displays a list of all the wallets for a specific person
"""Page which displays a list of all the wallets for a specific person
HORRIBLE linear search through everything. Index and do SQL query properly
'''
"""
# This is where we face having to re-do everything to do with names properly, rather than the horrible series of hacks over 20 years..
#GetPersonExpeditionNameLookup
# GetPersonExpeditionNameLookup
def tickspersonwallet(p):
manywallets = []
wallets = Wallet.objects.all()
for w in wallets:
w.persons = w.people() # ephemeral attribute for web page
w.persons = w.people() # ephemeral attribute for web page
fillblankpeople(w)
if w.persons:
if p.fullname in w.persons:
manywallets.append(w)
fillblankothers(w)
w.ticks = w.get_ticks() # the complaints in colour form
w.ticks = w.get_ticks() # the complaints in colour form
fixsurvextick(w, w.ticks)
return manywallets
print(f"-walletslistperson")
print(f"-walletslistperson")
try:
if last_name:
p = Person.objects.get(fullname= f'{first_name} {last_name}')
else:
p = Person.objects.get(fullname=f"{first_name} {last_name}")
else:
# special Wookey-hack
p = Person.objects.get(first_name= f'{first_name}')
p = Person.objects.get(first_name=f"{first_name}")
except:
#raise
return render(request, 'errors/generic.html', {'message': f'Unrecognised name of a expo person: "{first_name} {last_name}"'})
# raise
return render(
request,
"errors/generic.html",
{"message": f'Unrecognised name of a expo person: "{first_name} {last_name}"'},
)
manywallets = tickspersonwallet(p)
expeditions = Expedition.objects.all()
print(f"--")
return render(request, 'personwallets.html', { 'manywallets':manywallets, 'settings': settings, 'person': p, 'expeditions': expeditions})
print(f"--")
return render(
request,
"personwallets.html",
{"manywallets": manywallets, "settings": settings, "person": p, "expeditions": expeditions},
)
def setwalletsdates():
wallets = Wallet.objects.filter(walletdate=None)
wallets = Wallet.objects.filter(walletdate=None)
print(f"undated wallets: {len(wallets)}")
for w in wallets:
w.walletdate = w.date()
@ -177,50 +193,60 @@ def setwalletsdates():
def walletslistyear(request, year):
'''Page which displays a list of all the wallets in a specific year.
"""Page which displays a list of all the wallets in a specific year.
We have a field .walletyear, which we set on import.
'''
"""
def ticksyearwallet(year):
manywallets = []
wallets = Wallet.objects.filter(walletyear__year=year)
wallets = Wallet.objects.filter(walletyear__year=year)
for w in wallets:
manywallets.append(w)
fillblankpeople(w)
fillblankothers(w)
w.ticks = w.get_ticks() # the complaints in colour form, from the json file on disc
w.ticks = w.get_ticks() # the complaints in colour form, from the json file on disc
fixsurvextick(w, w.ticks)
return manywallets
print(f"-walletslistyear")
print(f"-walletslistyear")
if year < 1976 or year > 2050:
return render(request, 'errors/generic.html', {'message': 'Year out of range. Must be between 1976 and 2050'})
#return render(request, 'errors/generic.html', {'message': 'This page logic not implemented yet'})
return render(request, "errors/generic.html", {"message": "Year out of range. Must be between 1976 and 2050"})
# return render(request, 'errors/generic.html', {'message': 'This page logic not implemented yet'})
year = str(year)
manywallets = ticksyearwallet(year)
expeditions = Expedition.objects.all()
expedition = expeditions.filter(year=year)
print(f"--")
return render(request, 'yearwallets.html', { 'manywallets':manywallets, 'settings': settings, 'year': year, 'expeditions': expeditions, 'expedition': expedition})
print(f"--")
return render(
request,
"yearwallets.html",
{
"manywallets": manywallets,
"settings": settings,
"year": year,
"expeditions": expeditions,
"expedition": expedition,
},
)
def cavewallets(request, caveid):
'''Returns all the wallets for just one cave
'''
print(f"-cavewalletsl")
"""Returns all the wallets for just one cave"""
print(f"-cavewalletsl")
Gcavelookup = GetCaveLookup()
if caveid in Gcavelookup:
cave = Gcavelookup[caveid]
else:
return render(request,'errors/badslug.html', {'badslug': caveid})
return render(request, "errors/badslug.html", {"badslug": caveid})
# remove duplication. Sorting is done in the template
# But this only gets wallets which have survex files attached..
wallets = set(Wallet.objects.filter(survexblock__survexfile__cave=cave))
# all the ones without a survexblock attached via a *ref, search for match in JSON
zilchwallets = set(Wallet.objects.exclude(survexblock__survexfile__cave=cave))
for z in zilchwallets:
@ -235,35 +261,37 @@ def cavewallets(request, caveid):
wurl = f"/scanupload/{z.walletname.replace('#',':')}"
message = f" ! In {z.walletname} there is an unrecognised cave name '{zcaveid}' (out of {len(Gcavelookup):,} cave names and aliases)"
print(message)
DataIssue.objects.update_or_create(parser='scans', message=message, url=wurl)
DataIssue.objects.update_or_create(parser="scans", message=message, url=wurl)
manywallets = list(set(wallets))
for w in manywallets:
fillblankpeople(w)
fillblankothers(w)
w.ticks = w.get_ticks() # the complaints in colour form, from the json file on disc
fillblankothers(w)
w.ticks = w.get_ticks() # the complaints in colour form, from the json file on disc
fixsurvextick(w, w.ticks)
expeditions = Expedition.objects.all()
print(f"--")
return render(request, 'cavewallets.html', { 'manywallets':manywallets, 'settings': settings, 'cave': cave, 'expeditions': expeditions})
print(f"--")
return render(
request,
"cavewallets.html",
{"manywallets": manywallets, "settings": settings, "cave": cave, "expeditions": expeditions},
)
def oldwallet(request, path):
'''Now called only for non-standard wallet structures for pre-2000 wallets
'''
"""Now called only for non-standard wallet structures for pre-2000 wallets"""
# print([ s.walletname for s in Wallet.objects.all() ])
print(f'! - oldwallet path:{path}')
print(f"! - oldwallet path:{path}")
try:
wallet = Wallet.objects.get(walletname=urlunquote(path))
return render(request, 'wallet_old.html', { 'wallet':wallet, 'settings': settings })
wallet = Wallet.objects.get(walletname=urlunquote(path))
return render(request, "wallet_old.html", {"wallet": wallet, "settings": settings})
except:
message = f'Scan folder error or not found \'{path}\' .'
return render(request, 'errors/generic.html', {'message': message})
message = f"Scan folder error or not found '{path}' ."
return render(request, "errors/generic.html", {"message": message})
def scansingle(request, path, file):
'''sends a single binary file to the user for display - browser decides how using mimetype
'''
"""sends a single binary file to the user for display - browser decides how using mimetype"""
try:
wallet = Wallet.objects.get(walletname=urlunquote(path))
singlescan = SingleScan.objects.get(wallet=wallet, name=file)
@ -271,25 +299,26 @@ def scansingle(request, path, file):
if imagefile.is_file():
message = f" - scansingle {imagefile} {path}:{file}:{getmimetype(file)}:"
print(message)
return HttpResponse(content=open(imagefile,"rb"), content_type=getmimetype(file)) # any type of image
return HttpResponse(content=open(imagefile, "rb"), content_type=getmimetype(file)) # any type of image
else:
message = f'Scan folder file \'{imagefile}\' not found. {path=} {file=}'
message = f"Scan folder file '{imagefile}' not found. {path=} {file=}"
print(message)
return render(request, 'errors/generic.html', {'message': message})
return render(request, "errors/generic.html", {"message": message})
except:
message = f'Scan folder or scan item access error \'{path}\' and \'{file}\'.'
return render(request, 'errors/generic.html', {'message': message})
message = f"Scan folder or scan item access error '{path}' and '{file}'."
return render(request, "errors/generic.html", {"message": message})
def allscans(request):
'''Returns all the wallets in the system, we would like to use
"""Returns all the wallets in the system, we would like to use
the Django queryset SQL optimisation https://docs.djangoproject.com/en/3.2/ref/models/querysets/#prefetch-related
to get the related singlescan and survexblock objects but that requires rewriting this to do the query on those, not on
the wallets
'''
manywallets = Wallet.objects.all() # NB all of them
"""
manywallets = Wallet.objects.all() # NB all of them
# manywallets = Wallet.objects.all().prefetch_related('singlescan') fails as the link is defined on 'singlescan' not on 'wallet'
expeditions = Expedition.objects.all()
return render(request, 'manywallets.html', { 'manywallets':manywallets, 'settings': settings, 'expeditions': expeditions })
return render(
request, "manywallets.html", {"manywallets": manywallets, "settings": settings, "expeditions": expeditions}
)

View File

@ -16,96 +16,97 @@ import troggle.settings as settings
from troggle.core.models.caves import Cave, Entrance
from troggle.core.models.logbooks import LogbookEntry
from troggle.core.models.survex import SurvexBlock, SurvexStation
from troggle.core.models.troggle import (DataIssue, Expedition, Person,
PersonExpedition)
from troggle.parsers.people import (GetPersonExpeditionNameLookup,
foreign_friends)
from troggle.core.models.troggle import DataIssue, Expedition, Person, PersonExpedition
from troggle.parsers.people import GetPersonExpeditionNameLookup, foreign_friends
#from django.views.generic.list import ListView
'''Very simple report pages summarizing data about the whole set of expeditions and of
# from django.views.generic.list import ListView
"""Very simple report pages summarizing data about the whole set of expeditions and of
the status of data inconsistencies
'''
"""
def therionissues(request):
"""Page displaying contents of a file produced during data import"""
logname = "therionrefs.log"
logpath = (Path(settings.PYTHON_PATH, logname))
logpath = Path(settings.PYTHON_PATH, logname)
therionlog = []
newlog = []
if Path(logpath).is_file:
with open(logpath, "r") as f:
therionlog = f.readlines()
print(f"{logpath} has {len(therionlog)} entries")
therionlog = f.readlines()
print(f"{logpath} has {len(therionlog)} entries")
else:
print(f"{logpath} NOT FOUND {len(therionlog)}")
for line in therionlog:
line = line.replace("! Un-parsed image filename:", "")
newlog.append(line)
return render(request, 'therionreport.html', {"therionlog":newlog})
return render(request, "therionreport.html", {"therionlog": newlog})
def surveximport(request):
"""Page displaying contents of a file produced during data import"""
logname = "svxlinear.log"
logpath = (Path(settings.PYTHON_PATH, logname))
logpath = Path(settings.PYTHON_PATH, logname)
if Path(logpath).is_file:
with open(logpath, "r") as f:
contents = f.read()
contents = f.read()
else:
print(f"{logpath} NOT FOUND {len(contents)}")
return render(request, 'survexreport.html', {"log":contents})
return render(request, "survexreport.html", {"log": contents})
def survexdebug(request):
"""Page displaying contents of a file produced during data import"""
logname = "svxblks.log"
logpath = (Path(settings.PYTHON_PATH, logname))
logpath = Path(settings.PYTHON_PATH, logname)
if Path(logpath).is_file:
with open(logpath, "r") as f:
contents = f.read()
contents = f.read()
else:
print(f"{logpath} NOT FOUND {len(contents)}")
return render(request, 'survexdebug.html', {"log":contents})
return render(request, "survexdebug.html", {"log": contents})
def pathsreport(request):
"""The CONSTANTs declared in the settings and localsettings and how they have
been evaluated for this specific installation - live """
been evaluated for this specific installation - live"""
pathsdict = OrderedDict()
try:
pathsdict = {
# "BOGUS" : str( settings.BOGUS),
"JSLIB_URL" : str( settings.JSLIB_URL),
"JSLIB_ROOT" : str( settings.JSLIB_ROOT),
# "CSSLIB_URL" : str( settings.CSSLIB_URL),
"CAVEDESCRIPTIONS" : str( settings.CAVEDESCRIPTIONS),
"DIR_ROOT" : str( settings.DIR_ROOT),
"ENTRANCEDESCRIPTIONS" : str( settings.ENTRANCEDESCRIPTIONS),
"EXPOUSER_EMAIL" : str( settings.EXPOUSER_EMAIL),
"EXPOUSERPASS" : str("<redacted>"),
"EXPOUSER" : str( settings.EXPOUSER),
"EXPOWEB" : str( settings.EXPOWEB),
"EXPOWEB_URL" : str( settings.EXPOWEB_URL),
# "FILES" : str( settings.FILES),
"LIBDIR" : str( settings.LIBDIR),
"LOGFILE" : str( settings.LOGFILE),
"LOGIN_REDIRECT_URL" : str( settings.LOGIN_REDIRECT_URL),
"MEDIA_ROOT" : str( settings.MEDIA_ROOT),
"MEDIA_URL" : str( settings.MEDIA_URL),
"PHOTOS_URL" : str( settings.PHOTOS_URL),
"PYTHON_PATH" : str( settings.PYTHON_PATH),
"REPOS_ROOT_PATH" : str( settings.REPOS_ROOT_PATH),
"ROOT_URLCONF" : str( settings.ROOT_URLCONF),
"STATIC_URL" : str( settings.STATIC_URL),
"SURVEX_DATA" : str( settings.SURVEX_DATA),
"SCANS_ROOT" : str( settings.SCANS_ROOT),
# "SURVEYS" : str( settings.SURVEYS),
# "SCANS_URL" : str( settings.SCANS_URL),
"SURVEXPORT" : str( settings.SURVEXPORT),
"DRAWINGS_DATA" : str( settings.DRAWINGS_DATA),
"URL_ROOT" : str( settings.URL_ROOT)
# "BOGUS" : str( settings.BOGUS),
"JSLIB_URL": str(settings.JSLIB_URL),
"JSLIB_ROOT": str(settings.JSLIB_ROOT),
# "CSSLIB_URL" : str( settings.CSSLIB_URL),
"CAVEDESCRIPTIONS": str(settings.CAVEDESCRIPTIONS),
"DIR_ROOT": str(settings.DIR_ROOT),
"ENTRANCEDESCRIPTIONS": str(settings.ENTRANCEDESCRIPTIONS),
"EXPOUSER_EMAIL": str(settings.EXPOUSER_EMAIL),
"EXPOUSERPASS": str("<redacted>"),
"EXPOUSER": str(settings.EXPOUSER),
"EXPOWEB": str(settings.EXPOWEB),
"EXPOWEB_URL": str(settings.EXPOWEB_URL),
# "FILES" : str( settings.FILES),
"LIBDIR": str(settings.LIBDIR),
"LOGFILE": str(settings.LOGFILE),
"LOGIN_REDIRECT_URL": str(settings.LOGIN_REDIRECT_URL),
"MEDIA_ROOT": str(settings.MEDIA_ROOT),
"MEDIA_URL": str(settings.MEDIA_URL),
"PHOTOS_URL": str(settings.PHOTOS_URL),
"PYTHON_PATH": str(settings.PYTHON_PATH),
"REPOS_ROOT_PATH": str(settings.REPOS_ROOT_PATH),
"ROOT_URLCONF": str(settings.ROOT_URLCONF),
"STATIC_URL": str(settings.STATIC_URL),
"SURVEX_DATA": str(settings.SURVEX_DATA),
"SCANS_ROOT": str(settings.SCANS_ROOT),
# "SURVEYS" : str( settings.SURVEYS),
# "SCANS_URL" : str( settings.SCANS_URL),
"SURVEXPORT": str(settings.SURVEXPORT),
"DRAWINGS_DATA": str(settings.DRAWINGS_DATA),
"URL_ROOT": str(settings.URL_ROOT),
}
except:
pathsdict["! EXCEPTION !"] = "missing or exta string constant in troggle/settings"
@ -113,36 +114,36 @@ def pathsreport(request):
pathstype = OrderedDict()
try:
pathstype = {
# "BOGUS" : type(settings.BOGUS),
"JSLIB_URL" : type(settings.JSLIB_URL),
"JSLIB_ROOT" : type( settings.JSLIB_ROOT),
# "CSSLIB_URL" : type(settings.CSSLIB_URL),
"CAVEDESCRIPTIONS" : type(settings.CAVEDESCRIPTIONS),
"DIR_ROOT" : type(settings.DIR_ROOT),
"ENTRANCEDESCRIPTIONS" : type(settings.ENTRANCEDESCRIPTIONS),
"EXPOUSER_EMAIL" : type(settings.EXPOUSER_EMAIL),
"EXPOUSERPASS" : type(settings.EXPOUSERPASS),
"EXPOUSER" : type(settings.EXPOUSER),
"EXPOWEB" : type(settings.EXPOWEB),
"EXPOWEB_URL" : type(settings.EXPOWEB_URL),
# "FILES" : type(settings.FILES),
"LIBDIR" : type( settings.LIBDIR),
"LOGFILE" : type(settings.LOGFILE),
"LOGIN_REDIRECT_URL" : type(settings.LOGIN_REDIRECT_URL),
"MEDIA_ROOT" : type(settings.MEDIA_ROOT),
"MEDIA_URL" : type(settings.MEDIA_URL),
"PHOTOS_URL" : type(settings.PHOTOS_URL),
"PYTHON_PATH" : type(settings.PYTHON_PATH),
"REPOS_ROOT_PATH" : type(settings.REPOS_ROOT_PATH),
"ROOT_URLCONF" : type(settings.ROOT_URLCONF),
"STATIC_URL" : type(settings.STATIC_URL),
"SURVEX_DATA" : type(settings.SURVEX_DATA),
"SCANS_ROOT" : type(settings.SCANS_ROOT),
# "SURVEYS" : type(settings.SURVEYS),
# "SCANS_URL" : type(settings.SCANS_URL),
"SURVEXPORT" : type(settings.SURVEXPORT),
"DRAWINGS_DATA" : type(settings.DRAWINGS_DATA),
"URL_ROOT" : type(settings.URL_ROOT)
# "BOGUS" : type(settings.BOGUS),
"JSLIB_URL": type(settings.JSLIB_URL),
"JSLIB_ROOT": type(settings.JSLIB_ROOT),
# "CSSLIB_URL" : type(settings.CSSLIB_URL),
"CAVEDESCRIPTIONS": type(settings.CAVEDESCRIPTIONS),
"DIR_ROOT": type(settings.DIR_ROOT),
"ENTRANCEDESCRIPTIONS": type(settings.ENTRANCEDESCRIPTIONS),
"EXPOUSER_EMAIL": type(settings.EXPOUSER_EMAIL),
"EXPOUSERPASS": type(settings.EXPOUSERPASS),
"EXPOUSER": type(settings.EXPOUSER),
"EXPOWEB": type(settings.EXPOWEB),
"EXPOWEB_URL": type(settings.EXPOWEB_URL),
# "FILES" : type(settings.FILES),
"LIBDIR": type(settings.LIBDIR),
"LOGFILE": type(settings.LOGFILE),
"LOGIN_REDIRECT_URL": type(settings.LOGIN_REDIRECT_URL),
"MEDIA_ROOT": type(settings.MEDIA_ROOT),
"MEDIA_URL": type(settings.MEDIA_URL),
"PHOTOS_URL": type(settings.PHOTOS_URL),
"PYTHON_PATH": type(settings.PYTHON_PATH),
"REPOS_ROOT_PATH": type(settings.REPOS_ROOT_PATH),
"ROOT_URLCONF": type(settings.ROOT_URLCONF),
"STATIC_URL": type(settings.STATIC_URL),
"SURVEX_DATA": type(settings.SURVEX_DATA),
"SCANS_ROOT": type(settings.SCANS_ROOT),
# "SURVEYS" : type(settings.SURVEYS),
# "SCANS_URL" : type(settings.SCANS_URL),
"SURVEXPORT": type(settings.SURVEXPORT),
"DRAWINGS_DATA": type(settings.DRAWINGS_DATA),
"URL_ROOT": type(settings.URL_ROOT),
}
except:
pathstype["! EXCEPTION !"] = "missing or exta string constant in troggle/settings"
@ -150,15 +151,15 @@ def pathsreport(request):
# settings are unique by paths are not
ncodes = len(pathsdict)
bycodeslist = sorted(pathsdict.items()) # a list of tuples
bycodeslist = sorted(pathsdict.items()) # a list of tuples
bycodeslist2 = []
for k, p in bycodeslist:
bycodeslist2.append((k, p, str(pathstype[k])))
bypaths = sorted(pathsdict.values()) # a list
bypaths = sorted(pathsdict.values()) # a list
bypathslist = []
for p in bypaths:
for k in pathsdict.keys():
if pathsdict[k] == p:
@ -166,80 +167,92 @@ def pathsreport(request):
del pathsdict[k]
break
return render(request, 'pathsreport.html', {
"pathsdict":pathsdict,
"bycodeslist":bycodeslist2,
"bypathslist":bypathslist,
"ncodes":ncodes})
return render(
request,
"pathsreport.html",
{"pathsdict": pathsdict, "bycodeslist": bycodeslist2, "bypathslist": bypathslist, "ncodes": ncodes},
)
def stats(request):
statsDict={}
statsDict['expoCount'] = f"{Expedition.objects.count():,}"
statsDict['caveCount'] = f"{Cave.objects.count():,}"
statsDict['personCount'] = f"{Person.objects.count():,}"
statsDict['logbookEntryCount'] = f"{LogbookEntry.objects.count():,}"
statsDict = {}
statsDict["expoCount"] = f"{Expedition.objects.count():,}"
statsDict["caveCount"] = f"{Cave.objects.count():,}"
statsDict["personCount"] = f"{Person.objects.count():,}"
statsDict["logbookEntryCount"] = f"{LogbookEntry.objects.count():,}"
legsbyexpo = [ ]
legsbyexpo = []
addupsurvexlength = 0
addupsurvexlegs = 0
for expedition in Expedition.objects.all():
survexblocks = expedition.survexblock_set.all()
legsyear=0
legsyear = 0
survexleglength = 0.0
for survexblock in survexblocks:
survexleglength += survexblock.legslength
legsyear += int(survexblock.legsall)
addupsurvexlength += survexleglength
addupsurvexlegs += legsyear
legsbyexpo.append((expedition, {"nsurvexlegs": f"{legsyear:,}",
"survexleglength":f"{survexleglength:,.0f}"}))
legsbyexpo.reverse()
legsbyexpo.append((expedition, {"nsurvexlegs": f"{legsyear:,}", "survexleglength": f"{survexleglength:,.0f}"}))
legsbyexpo.reverse()
renderDict = {
**statsDict,
**{"addupsurvexlength": addupsurvexlength / 1000, "legsbyexpo": legsbyexpo, "nsurvexlegs": addupsurvexlegs},
} # new syntax
return render(request, "statistics.html", renderDict)
renderDict = {**statsDict, **{ "addupsurvexlength":addupsurvexlength/1000, "legsbyexpo":legsbyexpo, "nsurvexlegs":addupsurvexlegs }} # new syntax
return render(request,'statistics.html', renderDict)
def dataissues(request):
'''Each issue has a parser, a message and a url linking to the offending object after loading
'''
"""Each issue has a parser, a message and a url linking to the offending object after loading"""
def myFunc(di):
return di.parser.lower() + di.message.lower()
dilist = list(DataIssue.objects.all())
dilist.sort(key = myFunc)
return render(request,'dataissues.html', {'didict': dilist})
dilist.sort(key=myFunc)
return render(request, "dataissues.html", {"didict": dilist})
def eastings(request):
'''report each Northing/Easting pair wherever recorded
'''
"""report each Northing/Easting pair wherever recorded"""
ents = []
entrances = Entrance.objects.all()
for e in entrances:
if e.easting or e.northing:
ents.append(e)
stations = SurvexStation.objects.all()
return render(request,'eastings.html', {'ents': ents, 'stations': stations})
return render(request, "eastings.html", {"ents": ents, "stations": stations})
def aliases(request, year):
'''Page which displays a list of all the person aliases in a specific year
'''
"""Page which displays a list of all the person aliases in a specific year"""
if not year:
year = 1998
expo = Expedition.objects.filter(year=year)[0] # returns a set, even though we know there is only one
expo = Expedition.objects.filter(year=year)[0] # returns a set, even though we know there is only one
personexpeditions = PersonExpedition.objects.filter(expedition=expo)
persons = list(Person.objects.all().order_by('last_name'))
aliases = GetPersonExpeditionNameLookup(expo)
aliasdict={}
for i in sorted(aliases):
aliasdict[i]=aliases[i]
invert ={}
persons = list(Person.objects.all().order_by("last_name"))
return render(request,'aliases.html', {'year': year, 'aliasdict': aliasdict,
'foreign_friends': foreign_friends, 'invert': invert,'personexpeditions': personexpeditions, 'persons': persons})
aliases = GetPersonExpeditionNameLookup(expo)
aliasdict = {}
for i in sorted(aliases):
aliasdict[i] = aliases[i]
invert = {}
return render(
request,
"aliases.html",
{
"year": year,
"aliasdict": aliasdict,
"foreign_friends": foreign_friends,
"invert": invert,
"personexpeditions": personexpeditions,
"persons": persons,
},
)

View File

@ -15,20 +15,19 @@ from django.views.decorators.csrf import ensure_csrf_cookie
import parsers.survex
import troggle.settings as settings
from troggle.core.models.caves import Cave
from troggle.core.models.logbooks import LogbookEntry #, PersonLogEntry
from troggle.core.models.survex import (SurvexBlock, SurvexDirectory,
SurvexFile, SurvexPersonRole)
from troggle.core.models.logbooks import LogbookEntry # , PersonLogEntry
from troggle.core.models.survex import SurvexBlock, SurvexDirectory, SurvexFile, SurvexPersonRole
from troggle.core.models.troggle import Expedition, Person, PersonExpedition
from troggle.core.utils import WriteAndCommitError, only_commit
from troggle.parsers.people import GetPersonExpeditionNameLookup
'''Everything that views survexfiles
"""Everything that views survexfiles
but also displays data on a cave or caves when there is ambiguity
'''
"""
todo='''survexcavesingle is not properly producing any result for Homecoming, 1626-359, 2018-dm-07
todo = """survexcavesingle is not properly producing any result for Homecoming, 1626-359, 2018-dm-07
even though there are dozens of surveys.
'''
"""
survexdatasetpath = Path(settings.SURVEX_DATA)
@ -105,44 +104,45 @@ survextemplatefile = """; *** THIS IS A TEMPLATE FILE NOT WHAT YOU MIGHT BE EXPE
class SvxForm(forms.Form):
'''Two-pane form, upper half is the raw survex file, lower half (with green background)
"""Two-pane form, upper half is the raw survex file, lower half (with green background)
is the output of running 'cavern' on the survex file.
'''
dirname = forms.CharField(widget=forms.TextInput(attrs={"readonly":True}))
filename = forms.CharField(widget=forms.TextInput(attrs={"readonly":True}))
datetime = forms.DateTimeField(widget=forms.TextInput(attrs={"readonly":True}))
outputtype = forms.CharField(widget=forms.TextInput(attrs={"readonly":True}))
code = forms.CharField(widget=forms.Textarea(attrs={"cols":140, "rows":36}))
"""
dirname = forms.CharField(widget=forms.TextInput(attrs={"readonly": True}))
filename = forms.CharField(widget=forms.TextInput(attrs={"readonly": True}))
datetime = forms.DateTimeField(widget=forms.TextInput(attrs={"readonly": True}))
outputtype = forms.CharField(widget=forms.TextInput(attrs={"readonly": True}))
code = forms.CharField(widget=forms.Textarea(attrs={"cols": 140, "rows": 36}))
template = False
def GetDiscCode(self):
fname = survexdatasetpath / (self.data['filename'] + ".svx")
fname = survexdatasetpath / (self.data["filename"] + ".svx")
if not fname.is_file():
print(">>> >>> WARNING - svx file not found, showing TEMPLATE SVX",fname, flush=True)
print(">>> >>> WARNING - svx file not found, showing TEMPLATE SVX", fname, flush=True)
self.template = True
return survextemplatefile
try:
fin = open(fname, "r",encoding='utf8',newline='')
svxtext = fin.read()
fin = open(fname, "r", encoding="utf8", newline="")
svxtext = fin.read()
fin.close()
except:
# hack. Replace this with something better.
fin = open(fname, "r",encoding='iso-8859-1',newline='')
svxtext = fin.read()
fin = open(fname, "r", encoding="iso-8859-1", newline="")
svxtext = fin.read()
fin.close()
return svxtext
def DiffCode(self, rcode):
code = self.GetDiscCode()
difftext = difflib.unified_diff(code.splitlines(), rcode.splitlines())
difflist = [ diffline.strip() for diffline in difftext if not re.match(r"\s*$", diffline) ]
difflist = [diffline.strip() for diffline in difftext if not re.match(r"\s*$", diffline)]
return difflist
def SaveCode(self, rcode):
fname = survexdatasetpath / (self.data['filename'] + ".svx")
fname = survexdatasetpath / (self.data["filename"] + ".svx")
if not os.path.isfile(fname):
if re.search(r"\[|\]", rcode):
if re.search(r"\[|\]", rcode):
return "Error: remove all []s from the text. They are only template guidance."
mbeginend = re.search(r"(?s)\*begin\s+(\w+).*?\*end\s+(\w+)", rcode)
if not mbeginend:
@ -152,228 +152,243 @@ class SvxForm(forms.Form):
# Make this create new survex folders if needed
try:
fout = open(fname, "w", encoding='utf8',newline='\n')
fout = open(fname, "w", encoding="utf8", newline="\n")
except FileNotFoundError:
pth = os.path.dirname(self.data['filename'])
pth = os.path.dirname(self.data["filename"])
newpath = survexdatasetpath / pth
if not os.path.exists(newpath):
os.makedirs(newpath)
fout = open(fname, "w", encoding='utf8',newline='\n')
os.makedirs(newpath)
fout = open(fname, "w", encoding="utf8", newline="\n")
except PermissionError:
return "CANNOT save this file.\nPERMISSIONS incorrectly set on server for this file. Ask a nerd to fix this."
return (
"CANNOT save this file.\nPERMISSIONS incorrectly set on server for this file. Ask a nerd to fix this."
)
# javascript seems to insert CRLF on WSL1 whatever you say. So fix that:
res = fout.write(rcode.replace("\r",""))
res = fout.write(rcode.replace("\r", ""))
res = fout.write("\n")
fout.close()
if socket.gethostname() == "expo":
comment = f"Online survex edit: {self.data['filename']}.svx"
else:
comment = f"Online survex edit: {self.data['filename']}.svx on dev machine '{socket.gethostname()}' "
comment = f"Online survex edit: {self.data['filename']}.svx on dev machine '{socket.gethostname()}' "
only_commit(fname, comment)
return "SAVED and committed to git"
def Process(self):
print(">>>>....\n....Processing\n")
froox = os.fspath(survexdatasetpath / (self.data['filename'] + ".svx"))
froog = os.fspath(survexdatasetpath / (self.data['filename'] + ".log"))
froox = os.fspath(survexdatasetpath / (self.data["filename"] + ".svx"))
froog = os.fspath(survexdatasetpath / (self.data["filename"] + ".log"))
cwd = os.getcwd()
os.chdir(os.path.split(froox)[0])
os.system(settings.CAVERN + " --log " + froox )
os.system(settings.CAVERN + " --log " + froox)
os.chdir(cwd)
# Update this to use the new syntax..
# sp = subprocess.run([settings.CAVERN, "--log", f'--output={outputdir}', f'{fullpath}.svx'],
# capture_output=True, check=False, text=True)
# if sp.returncode != 0:
# message = f' ! Error running {settings.CAVERN}: {fullpath}'
# Update this to use the new syntax..
# sp = subprocess.run([settings.CAVERN, "--log", f'--output={outputdir}', f'{fullpath}.svx'],
# capture_output=True, check=False, text=True)
# if sp.returncode != 0:
# message = f' ! Error running {settings.CAVERN}: {fullpath}'
# DataIssue.objects.create(parser='entrances', message=message)
# print(message)
# print(f'stderr:\n\n' + str(sp.stderr) + '\n\n' + str(sp.stdout) + '\n\nreturn code: ' + str(sp.returncode))
filepatherr = Path(survexdatasetpath / str(self.data['filename'] + ".err"))
filepatherr = Path(survexdatasetpath / str(self.data["filename"] + ".err"))
if filepatherr.is_file():
if filepatherr.stat().st_size == 0:
filepatherr.unlink() # delete empty closure error file
filepatherr.unlink() # delete empty closure error file
fin = open(froog, "r",encoding='utf8')
fin = open(froog, "r", encoding="utf8")
log = fin.read()
fin.close()
#log = re.sub("(?s).*?(Survey contains)", "\\1", log) # this omits any ERROR MESSAGES ! Don't do it.
for s in ["Removing trailing traverses...\n\n",
"Concatenating traverses...\n\n"
"Simplifying network...\n\n",
# log = re.sub("(?s).*?(Survey contains)", "\\1", log) # this omits any ERROR MESSAGES ! Don't do it.
for s in [
"Removing trailing traverses...\n\n",
"Concatenating traverses...\n\n" "Simplifying network...\n\n",
"Calculating network...\n\n",
"Calculating traverses...\n\n",
"Calculating trailing traverses...\n\n",
"Calculating statistics...\n\n"]:
log = log.replace(s,"")
"Calculating statistics...\n\n",
]:
log = log.replace(s, "")
return log
@ensure_csrf_cookie
def svx(request, survex_file):
'''Displays a single survex file in an textarea window (using a javascript online editor to enable
"""Displays a single survex file in an textarea window (using a javascript online editor to enable
editing) with buttons which allow SAVE, check for DIFFerences from saved, and RUN (which runs the
cavern executable and displays the output below the main textarea).
cavern executable and displays the output below the main textarea).
Requires CSRF to be set up correct;ly, and requires permission to write to the filesystem.
'''
"""
warning = False
# get the basic data from the file given in the URL
dirname = os.path.split(survex_file)[0]
dirname += "/"
nowtime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
nowtime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
outputtype = "normal"
form = SvxForm({'filename':survex_file, 'dirname':dirname, 'datetime':nowtime, 'outputtype':outputtype})
form = SvxForm({"filename": survex_file, "dirname": dirname, "datetime": nowtime, "outputtype": outputtype})
# if the form has been returned
difflist = [ ]
difflist = []
logmessage = ""
message = ""
if request.method == 'POST': # If the form has been submitted...
rform = SvxForm(request.POST) #
if rform.is_valid(): # All validation rules pass (how do we check it against the filename and users?)
rcode = rform.cleaned_data['code']
outputtype = rform.cleaned_data['outputtype']
if request.method == "POST": # If the form has been submitted...
rform = SvxForm(request.POST) #
if rform.is_valid(): # All validation rules pass (how do we check it against the filename and users?)
rcode = rform.cleaned_data["code"]
outputtype = rform.cleaned_data["outputtype"]
difflist = form.DiffCode(rcode)
#print(">>>> ", rform.data)
# print(">>>> ", rform.data)
if "revert" in rform.data:
pass
if "process" in rform.data:
if not difflist:
logmessage = form.Process()
if logmessage:
message = f"OUTPUT FROM PROCESSING\n{logmessage}"
message = f"OUTPUT FROM PROCESSING\n{logmessage}"
else:
message = "SAVE FILE FIRST"
form.data['code'] = rcode
form.data["code"] = rcode
if "save" in rform.data:
if request.user.is_authenticated:
message = form.SaveCode(rcode)
else:
message = "You do not have authority to save this file. Please log in."
if message != "SAVED":
form.data['code'] = rcode
form.data["code"] = rcode
if "diff" in rform.data:
print("Differences: ")
form.data['code'] = rcode
#process(survex_file)
if 'code' not in form.data:
form.data['code'] = form.GetDiscCode()
form.data["code"] = rcode
# process(survex_file)
if "code" not in form.data:
form.data["code"] = form.GetDiscCode()
if form.template:
warning = True
if not difflist:
difflist.append("No differences - file was saved")
if message:
difflist.insert(0, message)
#print [ form.data['code'] ]
svxincludes = re.findall(r'(?i)\*include\s+(\S+)', form.data['code'] or "")
vmap = {'settings': settings,
'warning': warning,
'has_3d': (Path(survexdatasetpath) / Path(survex_file + ".3d")).is_file(),
'title': survex_file,
'svxincludes': svxincludes,
'difflist': difflist,
'logmessage':logmessage,
'form':form}
# vmap.update(csrf(request)) # this now refreshes to the wrong value, now that we user render(request,
# print [ form.data['code'] ]
svxincludes = re.findall(r"(?i)\*include\s+(\S+)", form.data["code"] or "")
vmap = {
"settings": settings,
"warning": warning,
"has_3d": (Path(survexdatasetpath) / Path(survex_file + ".3d")).is_file(),
"title": survex_file,
"svxincludes": svxincludes,
"difflist": difflist,
"logmessage": logmessage,
"form": form,
}
# vmap.update(csrf(request)) # this now refreshes to the wrong value, now that we user render(request,
if outputtype == "ajax":
return render(request, 'svxfiledifflistonly.html', vmap)
return render(request, 'svxfile.html', vmap)
return render(request, "svxfiledifflistonly.html", vmap)
return render(request, "svxfile.html", vmap)
# The cavern running function. This is NOT where it is run inside the form! see SvxForm.Process() for that
def process(survex_file):
'''This runs cavern only where a .3d, .log or .err file is requested.
'''
filepathsvx = survexdatasetpath / str( survex_file + ".svx")
"""This runs cavern only where a .3d, .log or .err file is requested."""
filepathsvx = survexdatasetpath / str(survex_file + ".svx")
cwd = os.getcwd()
os.chdir(os.path.split(os.fspath(survexdatasetpath / survex_file))[0])
os.chdir(os.path.split(os.fspath(survexdatasetpath / survex_file))[0])
os.system(settings.CAVERN + " --log " + str(filepathsvx))
os.chdir(cwd)
# Update this to use the new syntax..
# sp = subprocess.run([settings.CAVERN, "--log", f'--output={outputdir}', f'{fullpath}.svx'],
# capture_output=True, check=False, text=True)
# capture_output=True, check=False, text=True)
# if sp.returncode != 0:
# message = f' ! Error running {settings.CAVERN}: {fullpath}'
# DataIssue.objects.create(parser='entrances', message=message)
# print(message)
# print(f'stderr:\n\n' + str(sp.stderr) + '\n\n' + str(sp.stdout) + '\n\nreturn code: ' + str(sp.returncode))
# message = f' ! Error running {settings.CAVERN}: {fullpath}'
# DataIssue.objects.create(parser='entrances', message=message)
# print(message)
# print(f'stderr:\n\n' + str(sp.stderr) + '\n\n' + str(sp.stdout) + '\n\nreturn code: ' + str(sp.returncode))
filepatherr = Path(survexdatasetpath / str(survex_file + ".err"))
if filepatherr.is_file():
if filepatherr.stat().st_size == 0:
filepatherr.unlink() # delete empty closure error file
filepatherr.unlink() # delete empty closure error file
def threed(request, survex_file):
filepath3d = survexdatasetpath / str(survex_file + ".3d")
filepathlog = survexdatasetpath / str(survex_file + ".log")
filepath3d = survexdatasetpath / str(survex_file + ".3d")
filepathlog = survexdatasetpath / str(survex_file + ".log")
if filepath3d.is_file():
threed = open(filepath3d, "rb")
return HttpResponse(threed, content_type="application/x-aven")
else:
process(survex_file) # should not need to do this if it already exists, as it should.
log = open(survexdatasetpath / str(survex_file + ".log"), "r",encoding='utf-8')
process(survex_file) # should not need to do this if it already exists, as it should.
log = open(survexdatasetpath / str(survex_file + ".log"), "r", encoding="utf-8")
return HttpResponse(log, content_type="text")
def svxlog(request, survex_file):
'''Used for rendering .log files from survex outputtype'''
filepathlog = survexdatasetpath / str(survex_file + ".log")
"""Used for rendering .log files from survex outputtype"""
filepathlog = survexdatasetpath / str(survex_file + ".log")
if not filepathlog.is_file():
process(survex_file)
process(survex_file)
log = open(filepathlog, "r")
return HttpResponse(log,content_type="text/plain; charset=utf-8") #default: "text/html; charset=utf-8"
return HttpResponse(log, content_type="text/plain; charset=utf-8") # default: "text/html; charset=utf-8"
def err(request, survex_file):
filepatherr = survexdatasetpath / str(survex_file + ".err")
if not filepatherr.is_file(): # probably not there because it was empty, but re-run anyway
process(survex_file)
filepatherr = survexdatasetpath / str(survex_file + ".err")
if not filepatherr.is_file(): # probably not there because it was empty, but re-run anyway
process(survex_file)
process(survex_file)
if filepatherr.is_file():
err = open(filepatherr, "r")
return HttpResponse(err, content_type="text/plain; charset=utf-8")
else:
return HttpResponse(f'No closure errors. \nEmpty {filepatherr} file produced. \nSee the .log file.', content_type="text/plain; charset=utf-8")
return HttpResponse(
f"No closure errors. \nEmpty {filepatherr} file produced. \nSee the .log file.",
content_type="text/plain; charset=utf-8",
)
def identifycavedircontents(gcavedir):
# find the primary survex file in each cave directory
# this should be in a configuration, not buried in the code...
name = os.path.split(gcavedir)[1]
subdirs = [ ]
subsvx = [ ]
subdirs = []
subsvx = []
primesvx = None
for f in os.listdir(gcavedir): # These may get outdated as data gets tidied up. This should not be in the code!
for f in os.listdir(gcavedir): # These may get outdated as data gets tidied up. This should not be in the code!
if name == "204" and (f in ["skel.svx", "template.svx", "204withents.svx"]):
pass
elif name == "136" and (f in ["136-noents.svx"]):
pass
elif name == "115" and (f in ["115cufix.svx", "115fix.svx"]):
pass
elif os.path.isdir(os.path.join(gcavedir, f)):
if f[0] != ".":
subdirs.append(f)
elif f[-4:] == ".svx":
nf = f[:-4]
if nf.lower() == name.lower() or nf[:3] == "all" or (name, nf) in [("resurvey2005", "145-2005"), ("cucc", "cu115")]:
if (
nf.lower() == name.lower()
or nf[:3] == "all"
or (name, nf) in [("resurvey2005", "145-2005"), ("cucc", "cu115")]
):
if primesvx:
if nf[:3] == "all":
#assert primesvx[:3] != "all", (name, nf, primesvx, gcavedir, subsvx)
# assert primesvx[:3] != "all", (name, nf, primesvx, gcavedir, subsvx)
primesvx = nf
else:
#assert primesvx[:3] == "all", (name, nf, primesvx, gcavedir, subsvx)
# assert primesvx[:3] == "all", (name, nf, primesvx, gcavedir, subsvx)
pass
else:
primesvx = nf
@ -381,151 +396,165 @@ def identifycavedircontents(gcavedir):
subsvx.append(nf)
else:
pass
#assert re.match(".*?(?:.3d|.log|.err|.txt|.tmp|.diff|.e?spec|~)$", f), (gcavedir, f)
# assert re.match(".*?(?:.3d|.log|.err|.txt|.tmp|.diff|.e?spec|~)$", f), (gcavedir, f)
subsvx.sort()
#assert primesvx, (gcavedir, subsvx)
# assert primesvx, (gcavedir, subsvx)
if primesvx:
subsvx.insert(0, primesvx)
return subdirs, subsvx
def get_survexareapath(area):
return survexdatasetpath / str("caves-" + area)
return survexdatasetpath / str("caves-" + area)
# direct local non-database browsing through the svx file repositories
# every time the page is viewed! Should cache this.
def survexcaveslist(request):
'''This reads the entire list of caves in the Loser repo directory and produces a complete report.
"""This reads the entire list of caves in the Loser repo directory and produces a complete report.
It can find caves which have not yet been properly registered in the system by Databasereset.py because
someone may have uploaded the survex files without doing the rest of the integration process.
'''
"""
# TO DO - filter out the non-public caves from display UNLESS LOGGED IN
# This is very impenetrable code, original from Aaron Curtis I think.
onefilecaves = [ ]
multifilecaves = [ ]
subdircaves = [ ]
fnumlist = [ ]
onefilecaves = []
multifilecaves = []
subdircaves = []
fnumlist = []
for area in ["1623", "1626", "1624", "1627"]:
cavesdir = get_survexareapath(area)
arealist = sorted([ (area, -int(re.match(r"\d*", f).group(0) or "0"), f) for f in os.listdir(cavesdir) ])
arealist = sorted([(area, -int(re.match(r"\d*", f).group(0) or "0"), f) for f in os.listdir(cavesdir)])
fnumlist += arealist
#print(fnumlist)
# print(fnumlist)
# go through the list and identify the contents of each cave directory
for area, num, cavedir in fnumlist:
# these have sub dirs /cucc/ /arge/ /old/ but that is no reason to hide them in this webpage
# so these are now treated the same as 142 and 113 which also have a /cucc/ sub dir
#if cavedir in ["144", "40"]:
# if cavedir in ["144", "40"]:
# continue
# This all assumes that the first .svx file has the same name as the cave name,
# This all assumes that the first .svx file has the same name as the cave name,
# which usually but not always true. e.g. caves-1623/78/allkaese.svx not caves-1623/78/78.svx
# which is why we now also pass through the cavedir
# Still fails for loutitohoehle etc even though this is set correctly when the pending cave is created
cavesdir = get_survexareapath(area)
gcavedir = os.path.join(cavesdir, cavedir)
if os.path.isdir(gcavedir) and cavedir[0] != ".":
subdirs, subsvx = identifycavedircontents(gcavedir)
caveid = check_cave_registered(area, cavedir) # should do this only once per database load or it will be slow
survdirobj = [ ]
caveid = check_cave_registered(
area, cavedir
) # should do this only once per database load or it will be slow
survdirobj = []
for lsubsvx in subsvx:
survdirobj.append(("caves-" +area+ "/" +cavedir+"/"+lsubsvx, lsubsvx))
survdirobj.append(("caves-" + area + "/" + cavedir + "/" + lsubsvx, lsubsvx))
# caves with subdirectories
if subdirs:
subsurvdirs = [ ]
subsurvdirs = []
for subdir in subdirs:
dsubdirs, dsubsvx = identifycavedircontents(os.path.join(gcavedir, subdir))
# assert not dsubdirs # handle case of empty sub directory
lsurvdirobj = [ ]
lsurvdirobj = []
for lsubsvx in dsubsvx:
lsurvdirobj.append(("caves-" +area+ "/" +cavedir+"/"+subdir+"/"+lsubsvx, lsubsvx))
lsurvdirobj.append(("caves-" + area + "/" + cavedir + "/" + subdir + "/" + lsubsvx, lsubsvx))
if len(dsubsvx) >= 1:
subsurvdirs.append((subdir,lsurvdirobj[0], lsurvdirobj[0:])) # list now includes the first item too
subsurvdirs.append(
(subdir, lsurvdirobj[0], lsurvdirobj[0:])
) # list now includes the first item too
subdircaves.append((cavedir, (survdirobj[0], survdirobj[1:]), subsurvdirs))
# multifile caves
elif len(survdirobj) > 1:
multifilecaves.append((survdirobj[0], cavedir, survdirobj[1:]))
# single file caves
elif len(survdirobj) == 1:
onefilecaves.append(survdirobj[0])
return render(request, 'svxfilecavelist.html', {'settings': settings, "onefilecaves":onefilecaves, "multifilecaves":multifilecaves, "subdircaves":subdircaves })
return render(
request,
"svxfilecavelist.html",
{
"settings": settings,
"onefilecaves": onefilecaves,
"multifilecaves": multifilecaves,
"subdircaves": subdircaves,
},
)
def survexcavesingle(request, survex_cave):
'''parsing all the survex files of a single cave and showing that it's consistent and can find all
"""parsing all the survex files of a single cave and showing that it's consistent and can find all
the files and people. Should explicitly fix the kataster number thing.
kataster numbers are not unique across areas. This used to be a db constraint but we need to manage
kataster numbers are not unique across areas. This used to be a db constraint but we need to manage
this ourselves as we don't want the parser aborting with an error message.
Should use getCave() from models_caves
'''
"""
sc = survex_cave
try:
cave = Cave.objects.get(kataster_number=sc) # This may not be unique.
return render(request, 'svxcavesingle.html', {'settings': settings, "cave":cave })
cave = Cave.objects.get(kataster_number=sc) # This may not be unique.
return render(request, "svxcavesingle.html", {"settings": settings, "cave": cave})
except ObjectDoesNotExist:
# can get here if the survex file is in a directory labelled with unofficial number not kataster number.
# maybe - and _ mixed up, or CUCC-2017- instead of 2017-CUCC-, or CUCC2015DL01 . Let's not get carried away..
# or it might be an exact search for a specific survefile but just missing the '.svx.
if (Path(survexdatasetpath) / Path(survex_cave + ".svx")).is_file():
return svx(request, survex_cave)
for unoff in [sc, sc.replace('-','_'), sc.replace('_','-'), sc.replace('-',''), sc.replace('_','')]:
for unoff in [sc, sc.replace("-", "_"), sc.replace("_", "-"), sc.replace("-", ""), sc.replace("_", "")]:
try:
cave = Cave.objects.get(unofficial_number=unoff) # return on first one we find
return render(request, 'svxcavesingle.html', {'settings': settings, "cave":cave })
cave = Cave.objects.get(unofficial_number=unoff) # return on first one we find
return render(request, "svxcavesingle.html", {"settings": settings, "cave": cave})
except ObjectDoesNotExist:
continue # next attempt in for loop
return render(request, 'errors/svxcavesingle404.html', {'settings': settings, "cave":sc })
continue # next attempt in for loop
return render(request, "errors/svxcavesingle404.html", {"settings": settings, "cave": sc})
except MultipleObjectsReturned:
caves = Cave.objects.filter(kataster_number=survex_cave)
return render(request, 'svxcaveseveral.html', {'settings': settings, "caves":caves })
caves = Cave.objects.filter(kataster_number=survex_cave)
return render(request, "svxcaveseveral.html", {"settings": settings, "caves": caves})
except:
return render(request, 'errors/svxcavesingle404.html', {'settings': settings, "cave":sc })
return render(request, "errors/svxcavesingle404.html", {"settings": settings, "cave": sc})
def check_cave_registered(area, survex_cave):
'''Checks whether a cave has been properly registered when it is found in the Loser repo
"""Checks whether a cave has been properly registered when it is found in the Loser repo
This should really be called by Databasereset not here in a view
Currently Caves are only registered if they are listed in :expoweb: settings.CAVEDESCRIPTIONS
so we need to add in any more here.
This function runs but does not seem to be used?!
A serious bodge anyway.
'''
"""
try:
cave = Cave.objects.get(kataster_number=survex_cave)
cave = Cave.objects.get(kataster_number=survex_cave)
return str(cave)
except MultipleObjectsReturned:
caves = Cave.objects.filter(kataster_number=survex_cave)
for c in caves:
if str(c) == area + "-" + survex_cave :
return str(c) # just get the first that matches
return None # many returned but none in correct area
caves = Cave.objects.filter(kataster_number=survex_cave)
for c in caves:
if str(c) == area + "-" + survex_cave:
return str(c) # just get the first that matches
return None # many returned but none in correct area
except ObjectDoesNotExist:
pass
try:
cave = Cave.objects.get(unofficial_number=survex_cave) # should be unique!
cave = Cave.objects.get(unofficial_number=survex_cave) # should be unique!
if cave.kataster_number:
return str(cave)
else:
return None
except ObjectDoesNotExist:
pass
return None

File diff suppressed because it is too large Load Diff