mirror of
https://expo.survex.com/repositories/troggle/.git
synced 2024-11-25 08:41:51 +00:00
remove unused templatetags code
This commit is contained in:
parent
9a69ce50f9
commit
7368942488
@ -15,9 +15,7 @@ from troggle.core.models.caves import Cave, LogbookEntry, QM, Entrance, CaveAndE
|
|||||||
Some are not used and need renovating or destroying.
|
Some are not used and need renovating or destroying.
|
||||||
'''
|
'''
|
||||||
|
|
||||||
todo = '''Remove UploadFileForm - replace by Simple variant
|
todo = '''Re-enable TinyMCE
|
||||||
Re engineer Simple upload to not use a Django form object
|
|
||||||
|
|
||||||
'''
|
'''
|
||||||
|
|
||||||
class CaveForm(ModelForm):
|
class CaveForm(ModelForm):
|
||||||
|
@ -2,9 +2,11 @@ from django.conf import settings
|
|||||||
from django import http
|
from django import http
|
||||||
from django.urls import reverse, resolve,Resolver404
|
from django.urls import reverse, resolve,Resolver404
|
||||||
"""Non-standard django middleware is loaded from this file.
|
"""Non-standard django middleware is loaded from this file.
|
||||||
It needs re-writing to be compatible with Django v2.0+
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
todo = '''SmartAppendSlashMiddleware(object) Not Working.
|
||||||
|
It needs re-writing to be compatible with Django v2.0 and later
|
||||||
|
'''
|
||||||
class SmartAppendSlashMiddleware(object):
|
class SmartAppendSlashMiddleware(object):
|
||||||
"""
|
"""
|
||||||
"SmartAppendSlash" middleware for taking care of URL rewriting.
|
"SmartAppendSlash" middleware for taking care of URL rewriting.
|
||||||
|
@ -1,10 +0,0 @@
|
|||||||
import django
|
|
||||||
from django import template
|
|
||||||
|
|
||||||
register = template.Library()
|
|
||||||
# if django.VERSION[0] >=1 and django.VERSION[1] > 1:
|
|
||||||
# pass
|
|
||||||
# else:
|
|
||||||
|
|
||||||
# @register.simple_tag
|
|
||||||
# def csrf_token(): return ""
|
|
@ -1,63 +0,0 @@
|
|||||||
from django import template
|
|
||||||
from django.utils.html import conditional_escape
|
|
||||||
from django.template.defaultfilters import stringfilter
|
|
||||||
from django.utils.safestring import mark_safe
|
|
||||||
import re
|
|
||||||
|
|
||||||
register = template.Library()
|
|
||||||
'''Now entirely defunct.
|
|
||||||
|
|
||||||
Simple use in svxfile.html produced a textarea, double-spacing and no colouring.
|
|
||||||
so this would take some work, and we are better off not using it but getting
|
|
||||||
syntax colouring to work with CodeMirror instead. PPhilip S. 28 March 2021.
|
|
||||||
|
|
||||||
The only template which used it, survexblock.html, has been removed as unnecessary.
|
|
||||||
|
|
||||||
'''
|
|
||||||
# seems to add extra lines between the commented lines, which isn't so great.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
regexes = []
|
|
||||||
regexes.append((re.compile(r"(;.*)$", re.IGNORECASE|re.MULTILINE),
|
|
||||||
r'<span class = "comment">\1</span>\n'))
|
|
||||||
regexes.append((re.compile(r"^(\s*)(\*include)(\s+)([^\s]*)(.svx)$", re.IGNORECASE|re.MULTILINE),
|
|
||||||
r'\1<span class = "command">\2</span>\3<a href="\4.index">\4\5</a>'))
|
|
||||||
regexes.append((re.compile(r"^(\s*)(\*include)(\s+)([^\s]*)$", re.IGNORECASE|re.MULTILINE),
|
|
||||||
r'\1<span class = "command">\2</span>\3<a href="\4.index">\4</a>'))
|
|
||||||
regexes.append((re.compile(r"^(\s*)(\*team\s+(?:notes|tape|insts|pics))(\s+)(.*)$", re.IGNORECASE|re.MULTILINE),
|
|
||||||
r'\1<span class = "command">\2</span>\3\4'))
|
|
||||||
regexes.append((re.compile(r"^(\s*)(\*(?:begin|end|copyright|date|entrance|equate|export|fix|prefix|require|SOLVE|title|truncate))(\s+)(.*)$", re.IGNORECASE|re.MULTILINE),
|
|
||||||
r'\1<span class = "command">\2</span>\3\4'))
|
|
||||||
regexes.append((re.compile(r"^(\s*)(\*calibrate\s+(?:TAPE|COMPASS|CLINO|COUNTER|DEPTH|DECLINATION|X|Y|Z)+)(\s+)(.*)$", re.IGNORECASE|re.MULTILINE),
|
|
||||||
r'\1<span class = "command">\2</span>\3\4'))
|
|
||||||
regexes.append((re.compile(r"^(\s*)(\*data\s+(?:DEFAULT|NORMAL|DIVING|CARTESIAN|TOPOFIL|CYLPOLAR|NOSURVEY|passage)(?:\s+station|\s+from|\s+to|\s+FROMDEPTH|\s+TODEPTH|\s+DEPTHCHANGE|\s+newline|\s+direction|\s+tape|\s+compass|\s+clino|\s+northing|\s+easting|\s+altitude|\s+length|\s+bearing|\s+gradient|\s+ignoreall|\sleft|\sright|\sup|\sdown)*)$", re.IGNORECASE|re.MULTILINE),
|
|
||||||
r'\1<span class = "command">\2</span>'))
|
|
||||||
regexes.append((re.compile(r"^(\s*)(\*default\s+(?:CALIBRATE|DATA|UNITS)+)(\s+)(.*)$", re.IGNORECASE|re.MULTILINE),
|
|
||||||
r'\1<span class = "command">\2</span>\3\4'))
|
|
||||||
regexes.append((re.compile(r"^(\s*)(\*flags\s+(?:DUPLICATE|SPLAY|SURFACE|not DUPLICATE|not SPLAY|not SURFACE))(\s+)(.*)$", re.IGNORECASE|re.MULTILINE),
|
|
||||||
r'\1<span class = "command">\2</span>\3\4'))
|
|
||||||
regexes.append((re.compile(r"^(\s*)(\*infer\s+(?:plumbs|equates|exports))(\s+)(.*)$", re.IGNORECASE|re.MULTILINE),
|
|
||||||
r'\1<span class = "command">\2</span>\3\4'))
|
|
||||||
regexes.append((re.compile(r"^(\s*)(\*instrument\s+(?:compass|clino|tape))(\s+)(.*)$", re.IGNORECASE|re.MULTILINE),
|
|
||||||
r'\1<span class = "command">\2</span>\3\4'))
|
|
||||||
regexes.append((re.compile(r"^(\s*)(\*instrument\s+(?:compass|clino|tape))(\s+)(.*)$", re.IGNORECASE|re.MULTILINE),
|
|
||||||
r'\1<span class = "command">\2</span>\3\4'))
|
|
||||||
regexes.append((re.compile(r"^(\s*)(\*sd\s+(?:TAPE|COMPASS|CLINO|COUNTER|DEPTH|DECLINATION|DX|DY|DZ))(\s+)(.*)$", re.IGNORECASE|re.MULTILINE),
|
|
||||||
r'\1<span class = "command">\2</span>\3\4'))
|
|
||||||
regexes.append((re.compile(r"^(\s*)(\*set\s+(?:BLANK|COMMENT|DECIMAL|EOL|KEYWORD|MINUS|NAMES|OMIT|PLUS|ROOT|SEPARATOR))(\s+)(.*)$", re.IGNORECASE|re.MULTILINE),
|
|
||||||
r'\1<span class = "command">\2</span>\3\4'))
|
|
||||||
regexes.append((re.compile(r"^(\s*)(\*units\s+(?:TAPE|LENGTH|COMPASS|BEARING|CLINO|GRADIENT|COUNTER|DEPTH|DECLINATION|X|Y|Z))(\s+)(.*)$", re.IGNORECASE|re.MULTILINE),
|
|
||||||
r'\1<span class = "command">\2</span>\3\4'))
|
|
||||||
regexes.append((re.compile(r"^(.*)$", re.IGNORECASE|re.MULTILINE),
|
|
||||||
r'<div>\1 </div>\n'))
|
|
||||||
|
|
||||||
@register.filter()
|
|
||||||
@stringfilter
|
|
||||||
def survex_to_html(value, autoescape=None):
|
|
||||||
if autoescape:
|
|
||||||
value = conditional_escape(value)
|
|
||||||
for regex, sub in regexes:
|
|
||||||
print(sub)
|
|
||||||
value = regex.sub(sub, value)
|
|
||||||
return mark_safe(value)
|
|
@ -1,189 +0,0 @@
|
|||||||
from django import template
|
|
||||||
from django.utils.html import conditional_escape
|
|
||||||
from django.template.defaultfilters import stringfilter
|
|
||||||
from django.utils.safestring import mark_safe
|
|
||||||
from django.conf import settings
|
|
||||||
from troggle.core.models.caves import LogbookEntry, QM, Cave
|
|
||||||
import re, urllib.parse
|
|
||||||
|
|
||||||
'''Originally for the wiki format entries, later re-used simply to clean up HTML escape chars.
|
|
||||||
Now entirely defunct.
|
|
||||||
|
|
||||||
'''
|
|
||||||
todo = '''Checked. Not used anywhere anymore. Replaced by standard Django template filters.
|
|
||||||
'''
|
|
||||||
register = template.Library()
|
|
||||||
|
|
||||||
|
|
||||||
@register.filter()
|
|
||||||
def plusone(n):
|
|
||||||
'''used in templates/svxcaveseveral.html and templates/svxcavessingle.html for formatting
|
|
||||||
'''
|
|
||||||
return n + 1
|
|
||||||
|
|
||||||
|
|
||||||
def wiki_list(line, listdepth):
|
|
||||||
'''Does not seem to be used anywhere except photoSrcRepl() below'''
|
|
||||||
l = ""
|
|
||||||
for d in listdepth:
|
|
||||||
l += d
|
|
||||||
mstar = re.match(l + "\*(.*)", line)
|
|
||||||
if mstar:
|
|
||||||
listdepth.append("\*")
|
|
||||||
return ("<ul>\n" + " " * len(listdepth) + "<li>%s</li>\n" % mstar.groups()[0], listdepth)
|
|
||||||
mhash = re.match(l + "#(.*)", line)
|
|
||||||
if mhash:
|
|
||||||
listdepth.append("#")
|
|
||||||
return ("<ol>\n" + " " * len(listdepth) + "<li>%s</li>\n" % mhash.groups()[0], listdepth)
|
|
||||||
mflat = re.match(l + "(.*)", line)
|
|
||||||
if mflat and listdepth:
|
|
||||||
return (" " * len(listdepth) + "<li>%s</li>\n" % mflat.groups()[0], listdepth)
|
|
||||||
if listdepth:
|
|
||||||
prev = listdepth.pop()
|
|
||||||
if prev == "\*":
|
|
||||||
t, l = wiki_list(line, listdepth)
|
|
||||||
return ("</ul>\n" + t, l)
|
|
||||||
if prev == "#":
|
|
||||||
t, l = wiki_list(line, listdepth)
|
|
||||||
return ("</ol>\n" + t, l)
|
|
||||||
return (line, listdepth)
|
|
||||||
|
|
||||||
@register.filter()
|
|
||||||
@stringfilter
|
|
||||||
def wiki_to_html(value, autoescape=None):
|
|
||||||
"""
|
|
||||||
This is the tag which turns wiki syntax into html. It is intended for long pieces of wiki.
|
|
||||||
Hence it splits the wiki into HTML paragraphs based on double line feeds.
|
|
||||||
|
|
||||||
But it is used as a filter when rendering many, many fields, e.g.
|
|
||||||
epersonexpedition.person|wiki_to_html_short in presonexpedition.html
|
|
||||||
"""
|
|
||||||
#find paragraphs
|
|
||||||
outValue = ""
|
|
||||||
for paragraph in re.split("\n\s*?\n", value, re.DOTALL):
|
|
||||||
outValue += "<p>"
|
|
||||||
outValue += wiki_to_html_short(paragraph, autoescape)
|
|
||||||
outValue += "</p>\n"
|
|
||||||
return mark_safe(outValue)
|
|
||||||
|
|
||||||
@register.filter()
|
|
||||||
@stringfilter
|
|
||||||
def wiki_to_html_short(value, autoescape=None):
|
|
||||||
"""
|
|
||||||
I suspect this is only used for HTML escapes. And we should be using the standard Django
|
|
||||||
filter |safe https://docs.djangoproject.com/en/dev/ref/templates/builtins/#safe
|
|
||||||
|
|
||||||
|
|
||||||
This is the tag which turns wiki syntax into html. It is intended for short pieces of wiki.
|
|
||||||
Hence it is not split the wiki into paragraphs using where it finds double line feeds.
|
|
||||||
|
|
||||||
But it is used as a filter when rendering many, many fields, e.g.
|
|
||||||
entrance.entrance_description|wiki_to_html in extrance.html
|
|
||||||
|
|
||||||
"""
|
|
||||||
if autoescape:
|
|
||||||
value = conditional_escape(value)
|
|
||||||
#deescape doubly escaped characters
|
|
||||||
value = re.sub("&(.*?);", r"&\1;", value, re.DOTALL)
|
|
||||||
#italics and bold
|
|
||||||
value = re.sub("''''([^']+)''''", r"<b><i>\1</i></b>", value, re.DOTALL)
|
|
||||||
value = re.sub("'b''([^']+)'''", r"<b>\1</b>", value, re.DOTALL)
|
|
||||||
value = re.sub("''([^']+)''", r"<i>\1</i>", value, re.DOTALL)
|
|
||||||
|
|
||||||
#make headers
|
|
||||||
def headerrepl(matchobj):
|
|
||||||
number=len(matchobj.groups()[0])
|
|
||||||
num=str(number)
|
|
||||||
if number>1:
|
|
||||||
return '<h'+num+'>'+matchobj.groups()[1]+'</h'+num+'>'
|
|
||||||
else:
|
|
||||||
print('morethanone')
|
|
||||||
return matchobj.group()
|
|
||||||
value = re.sub(r"(?m)^(=+)([^=]+)(=+)$",headerrepl,value)
|
|
||||||
|
|
||||||
#make qm links. this takes a little doing
|
|
||||||
qmMatchPattern=settings.QM_PATTERN
|
|
||||||
def qmrepl(matchobj):
|
|
||||||
"""
|
|
||||||
A function for replacing wikicode qm links with html qm links.
|
|
||||||
Given a matchobj matching a wikilink in the format
|
|
||||||
[[QM:C204-1999-24]]
|
|
||||||
If the QM does not exist, the function will return a link for creating it.
|
|
||||||
"""
|
|
||||||
qmdict={'urlroot':settings.URL_ROOT,'cave':matchobj.groups()[2],'year':matchobj.groups()[1],'number':matchobj.groups()[3]}
|
|
||||||
try:
|
|
||||||
qm=QM.objects.get(found_by__cave__kataster_number = qmdict['cave'],
|
|
||||||
found_by__date__year = qmdict['year'],
|
|
||||||
number = qmdict['number'])
|
|
||||||
return r'<a href="%s" id="q%s">%s</a>' % (qm.get_absolute_url(), qm.code, str(qm))
|
|
||||||
except QM.DoesNotExist: #bother aaron to make him clean up the below code - AC
|
|
||||||
try:
|
|
||||||
placeholder=LogbookEntry.objects.get(date__year=qmdict['year'],cave__kataster_number=qmdict['cave'], title__icontains='placeholder')
|
|
||||||
except LogbookEntry.DoesNotExist:
|
|
||||||
placeholder=LogbookEntry(
|
|
||||||
date='01-01'+qmdict['year'],
|
|
||||||
cave=Cave.objects.get(kataster_number=qmdict['cave']),
|
|
||||||
title='placeholder'
|
|
||||||
)
|
|
||||||
qm=QM(found_by = placeholder, number = qmdict['number'])
|
|
||||||
return r'<a class="redtext" href="%s" id="q%s">%s</a>' % (qm.get_absolute_url(), qm.code, str(qm))
|
|
||||||
|
|
||||||
value = re.sub(qmMatchPattern,qmrepl, value, re.DOTALL)
|
|
||||||
|
|
||||||
#make photo links for [[photo:filename]] or [[photo:filename linktext]], and
|
|
||||||
#insert photos for [[display:left photo:filename]]
|
|
||||||
photoLinkPattern="\[\[\s*photo:(?P<photoName>[^\s]+)\s*(?P<linkText>.*)\]\]"
|
|
||||||
photoSrcPattern="\[\[\s*display:(?P<style>[^\s]+) photo:(?P<photoName>[^\s]+)\s*\]\]"
|
|
||||||
def photoLinkRepl(matchobj):
|
|
||||||
'''Does not seem to be used anywhere except photoSrcRepl() below'''
|
|
||||||
matchdict=matchobj.groupdict()
|
|
||||||
try:
|
|
||||||
linkText=matchdict['linkText']
|
|
||||||
except KeyError:
|
|
||||||
linkText=None
|
|
||||||
|
|
||||||
# try:
|
|
||||||
# photo=DPhoto.objects.get(file=matchdict['photoName'])
|
|
||||||
# if not linkText:
|
|
||||||
# linkText=str(photo)
|
|
||||||
# res=r'<a href=' + photo.get_admin_url() +'>' + linkText + '</a>'
|
|
||||||
# except Photo.DoesNotExist:
|
|
||||||
# res = r'<a class="redtext" href="">make new photo</a>'
|
|
||||||
return res
|
|
||||||
|
|
||||||
def photoSrcRepl(matchobj):
|
|
||||||
'''Does not seem to be used anywhere'''
|
|
||||||
matchdict=matchobj.groupdict()
|
|
||||||
style=matchdict['style']
|
|
||||||
try:
|
|
||||||
photo=Photo.objects.get(file=matchdict['photoName'])
|
|
||||||
res=r'<a href='+photo.file.url+'><img src=' + photo.thumbnail_image.url +' class='+style+' /></a>'
|
|
||||||
except Photo.DoesNotExist:
|
|
||||||
res = r'<a class="redtext" href="">make new photo</a>'
|
|
||||||
return res
|
|
||||||
value = re.sub(photoLinkPattern,photoLinkRepl, value, re.DOTALL)
|
|
||||||
value = re.sub(photoSrcPattern,photoSrcRepl, value, re.DOTALL)
|
|
||||||
|
|
||||||
#make cave links
|
|
||||||
value = re.sub(r"\[\[\s*cave:([^\s]+)\s*\s*\]\]", r'<a href="%scave/\1/">\1</a>' % settings.URL_ROOT, value, re.DOTALL)
|
|
||||||
#make people links
|
|
||||||
value = re.sub(r"\[\[\s*person:(.+)\|(.+)\]\]",r'<a href="%sperson/\1/">\2</a>' % settings.URL_ROOT, value, re.DOTALL)
|
|
||||||
#make cavedescription links
|
|
||||||
value = re.sub(r"\[\[\s*cavedescription:(.+)\|(.+)\]\]",r'<a href="%scavedescription/\1/">\2</a>' % settings.URL_ROOT, value, re.DOTALL)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#Make lists from lines starting with lists of [stars and hashes]
|
|
||||||
outValue = ""
|
|
||||||
listdepth = []
|
|
||||||
for line in value.split("\n"):
|
|
||||||
t, listdepth = wiki_list(line, listdepth)
|
|
||||||
outValue += t
|
|
||||||
for item in listdepth:
|
|
||||||
if item == "\*":
|
|
||||||
outValue += "</ul>\n"
|
|
||||||
elif item == "#":
|
|
||||||
outValue += "</ol>\n"
|
|
||||||
return mark_safe(outValue)
|
|
||||||
|
|
||||||
wiki_to_html.needs_autoescape = True
|
|
130
core/unused.py
130
core/unused.py
@ -1,130 +0,0 @@
|
|||||||
import sys
|
|
||||||
import re
|
|
||||||
|
|
||||||
from django.conf import settings
|
|
||||||
from django.shortcuts import render
|
|
||||||
|
|
||||||
"""Oddball mixture of apparently now superfluous functions which should
|
|
||||||
be deleted
|
|
||||||
|
|
||||||
|
|
||||||
various HTML/wiki functions presumably for logbooks?
|
|
||||||
|
|
||||||
Use unknown:
|
|
||||||
weighted_choice(lst)
|
|
||||||
randomLogbookSentence()
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
def weighted_choice(lst):
|
|
||||||
n = random.uniform(0,1)
|
|
||||||
for item, weight in lst:
|
|
||||||
if n < weight:
|
|
||||||
break
|
|
||||||
n = n - weight
|
|
||||||
return item
|
|
||||||
|
|
||||||
def randomLogbookSentence():
|
|
||||||
from troggle.core.models import LogbookEntry
|
|
||||||
randSent={}
|
|
||||||
|
|
||||||
# needs to handle empty logbooks without crashing
|
|
||||||
|
|
||||||
#Choose a random logbook entry
|
|
||||||
randSent['entry']=LogbookEntry.objects.order_by('?')[0]
|
|
||||||
|
|
||||||
#Choose again if there are no sentances (this happens if it is a placeholder entry)
|
|
||||||
while len(re.findall('[A-Z].*?\.',randSent['entry'].text))==0:
|
|
||||||
randSent['entry']=LogbookEntry.objects.order_by('?')[0]
|
|
||||||
|
|
||||||
#Choose a random sentence from that entry. Store the sentence as randSent['sentence'], and the number of that sentence in the entry as randSent['number']
|
|
||||||
sentenceList=re.findall('[A-Z].*?\.',randSent['entry'].text)
|
|
||||||
randSent['number']=random.randrange(0,len(sentenceList))
|
|
||||||
randSent['sentence']=sentenceList[randSent['number']]
|
|
||||||
|
|
||||||
return randSent
|
|
||||||
|
|
||||||
|
|
||||||
re_body = re.compile(r"\<body[^>]*\>(.*)\</body\>", re.DOTALL)
|
|
||||||
re_title = re.compile(r"\<title[^>]*\>(.*)\</title\>", re.DOTALL)
|
|
||||||
|
|
||||||
def get_html_body(text):
|
|
||||||
return get_single_match(re_body, text)
|
|
||||||
|
|
||||||
def get_html_title(text):
|
|
||||||
return get_single_match(re_title, text)
|
|
||||||
|
|
||||||
def get_single_match(regex, text):
|
|
||||||
match = regex.search(text)
|
|
||||||
|
|
||||||
if match:
|
|
||||||
return match.groups()[0]
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
re_subs = [(re.compile(r"\<b[^>]*\>(.*?)\</b\>", re.DOTALL), r"'''\1'''"),
|
|
||||||
(re.compile(r"\<i\>(.*?)\</i\>", re.DOTALL), r"''\1''"),
|
|
||||||
(re.compile(r"\<h1[^>]*\>(.*?)\</h1\>", re.DOTALL), r"=\1="),
|
|
||||||
(re.compile(r"\<h2[^>]*\>(.*?)\</h2\>", re.DOTALL), r"==\1=="),
|
|
||||||
(re.compile(r"\<h3[^>]*\>(.*?)\</h3\>", re.DOTALL), r"===\1==="),
|
|
||||||
(re.compile(r"\<h4[^>]*\>(.*?)\</h4\>", re.DOTALL), r"====\1===="),
|
|
||||||
(re.compile(r"\<h5[^>]*\>(.*?)\</h5\>", re.DOTALL), r"=====\1====="),
|
|
||||||
(re.compile(r"\<h6[^>]*\>(.*?)\</h6\>", re.DOTALL), r"======\1======"),
|
|
||||||
(re.compile(r'(<a href="?(?P<target>.*)"?>)?<img class="?(?P<class>\w*)"? src="?t/?(?P<source>[\w/\.]*)"?(?P<rest>></img>|\s/>(</a>)?)', re.DOTALL),r'[[display:\g<class> photo:\g<source>]]'), #
|
|
||||||
(re.compile(r"\<a\s+id=['\"]([^'\"]*)['\"]\s*\>(.*?)\</a\>", re.DOTALL), r"[[subcave:\1|\2]]"), #assumes that all links with id attributes are subcaves. Not great.
|
|
||||||
#interpage link needed
|
|
||||||
(re.compile(r"\<a\s+href=['\"]#([^'\"]*)['\"]\s*\>(.*?)\</a\>", re.DOTALL), r"[[cavedescription:\1|\2]]"), #assumes that all links with target ids are cave descriptions. Not great.
|
|
||||||
(re.compile(r"\[\<a\s+href=['\"][^'\"]*['\"]\s+id=['\"][^'\"]*['\"]\s*\>([^\s]*).*?\</a\>\]", re.DOTALL), r"[[qm:\1]]"),
|
|
||||||
# (re.compile(r'<a\shref="?(?P<target>.*)"?>(?P<text>.*)</a>'),href_to_wikilinks),
|
|
||||||
]
|
|
||||||
|
|
||||||
def html_to_wiki(text, codec = "utf-8"):
|
|
||||||
if isinstance(text, str):
|
|
||||||
text = str(text, codec)
|
|
||||||
text = re.sub("</p>", r"", text)
|
|
||||||
text = re.sub("<p>$", r"", text)
|
|
||||||
text = re.sub("<p>", r"\n\n", text)
|
|
||||||
out = ""
|
|
||||||
lists = ""
|
|
||||||
#lists
|
|
||||||
while text:
|
|
||||||
mstar = re.match(r"^(.*?)<ul[^>]*>\s*<li[^>]*>(.*?)</li>(.*)$", text, re.DOTALL)
|
|
||||||
munstar = re.match(r"^(\s*)</ul>(.*)$", text, re.DOTALL)
|
|
||||||
mhash = re.match(r"^(.*?)<ol[^>]*>\s*<li[^>]*>(.*?)</li>(.*)$", text, re.DOTALL)
|
|
||||||
munhash = re.match(r"^(\s*)</ol>(.*)$", text, re.DOTALL)
|
|
||||||
mitem = re.match(r"^(\s*)<li[^>]*>(.*?)</li>(.*)$", text, re.DOTALL)
|
|
||||||
ms = [len(m.groups()[0]) for m in [mstar, munstar, mhash, munhash, mitem] if m]
|
|
||||||
def min_(i, l):
|
|
||||||
try:
|
|
||||||
v = i.groups()[0]
|
|
||||||
l.remove(len(v))
|
|
||||||
return len(v) < min(l, 1000000000)
|
|
||||||
except:
|
|
||||||
return False
|
|
||||||
if min_(mstar, ms):
|
|
||||||
lists += "*"
|
|
||||||
pre, val, post = mstar.groups()
|
|
||||||
out += pre + "\n" + lists + " " + val
|
|
||||||
text = post
|
|
||||||
elif min_(mhash, ms):
|
|
||||||
lists += "#"
|
|
||||||
pre, val, post = mhash.groups()
|
|
||||||
out += pre + "\n" + lists + " " + val
|
|
||||||
text = post
|
|
||||||
elif min_(mitem, ms):
|
|
||||||
pre, val, post = mitem.groups()
|
|
||||||
out += "\n" + lists + " " + val
|
|
||||||
text = post
|
|
||||||
elif min_(munstar, ms):
|
|
||||||
lists = lists[:-1]
|
|
||||||
text = munstar.groups()[1]
|
|
||||||
elif min_(munhash, ms):
|
|
||||||
lists.pop()
|
|
||||||
text = munhash.groups()[1]
|
|
||||||
else:
|
|
||||||
out += text
|
|
||||||
text = ""
|
|
||||||
#substitutions
|
|
||||||
for regex, repl in re_subs:
|
|
||||||
out = regex.sub(repl, out)
|
|
||||||
return out
|
|
@ -45,6 +45,7 @@ def todos(request, module):
|
|||||||
from troggle.parsers.logbooks import todo as parserslogbooks
|
from troggle.parsers.logbooks import todo as parserslogbooks
|
||||||
from troggle.parsers.survex import todo as parserssurvex
|
from troggle.parsers.survex import todo as parserssurvex
|
||||||
from troggle.core.models.caves import todo as modelcaves
|
from troggle.core.models.caves import todo as modelcaves
|
||||||
|
from troggle.core.middleware import todo as middleware
|
||||||
from troggle.core.forms import todo as forms
|
from troggle.core.forms import todo as forms
|
||||||
tododict = {'views/other': todo,
|
tododict = {'views/other': todo,
|
||||||
'tests': tests,
|
'tests': tests,
|
||||||
@ -53,6 +54,7 @@ def todos(request, module):
|
|||||||
'parsers/logbooks': parserslogbooks,
|
'parsers/logbooks': parserslogbooks,
|
||||||
'parsers/survex': parserssurvex,
|
'parsers/survex': parserssurvex,
|
||||||
'core/models/caves': modelcaves,
|
'core/models/caves': modelcaves,
|
||||||
|
'core/middleware': middleware,
|
||||||
'core/forms': forms}
|
'core/forms': forms}
|
||||||
return render(request,'core/todos.html', {'tododict': tododict})
|
return render(request,'core/todos.html', {'tododict': tododict})
|
||||||
|
|
||||||
@ -263,68 +265,3 @@ def scanupload(request, wallet=None):
|
|||||||
|
|
||||||
return render(request, 'scanuploadform.html',
|
return render(request, 'scanuploadform.html',
|
||||||
{'form': form, 'wallet': wallet, 'year': year, 'prev': prev, 'next': next, 'prevy': prevy, 'nexty': nexty, 'files': files, 'dirs': dirs, 'filesaved': filesaved, 'actual_saved': actual_saved})
|
{'form': form, 'wallet': wallet, 'year': year, 'prev': prev, 'next': next, 'prevy': prevy, 'nexty': nexty, 'files': files, 'dirs': dirs, 'filesaved': filesaved, 'actual_saved': actual_saved})
|
||||||
|
|
||||||
|
|
||||||
# @login_required_if_public
|
|
||||||
# def verysimplescanupload(request, wallet=None):
|
|
||||||
# '''Upload one scanned image file into a wallet on /expofiles
|
|
||||||
# '''
|
|
||||||
# print(f'VERY SIMPLE')
|
|
||||||
# filesaved = False
|
|
||||||
# actual_saved = []
|
|
||||||
# print(f'! - FORM scanupload - start {wallet}')
|
|
||||||
# if wallet is None:
|
|
||||||
# wallet = "2021#01" # improve this later
|
|
||||||
# if not re.match('(19|20)\d\d:\d\d', wallet):
|
|
||||||
# wallet = "2021:01" # improve this later
|
|
||||||
|
|
||||||
# year = wallet[:4]
|
|
||||||
# nexty = f'{int(year)+1}'
|
|
||||||
# prevy = f'{int(year)-1}'
|
|
||||||
# wnumber = wallet[5:]
|
|
||||||
# next = f'{int(wnumber)+1:02d}'
|
|
||||||
# prev = f'{int(wnumber)-1:02d}'
|
|
||||||
|
|
||||||
# if int(wnumber) == 0:
|
|
||||||
# prev = f'{int(wnumber):02d}'
|
|
||||||
|
|
||||||
# wallet = wallet.replace(':','#')
|
|
||||||
# dirpath = Path(settings.SURVEY_SCANS, year, wallet)
|
|
||||||
|
|
||||||
# form = MyForm()
|
|
||||||
|
|
||||||
# if request.method == 'POST':
|
|
||||||
# form = MyForm(request.POST,request.FILES)
|
|
||||||
# if form.is_valid():
|
|
||||||
# #form.save() # comment out so nothing saved in MEDIA_ROOT/fileuploads
|
|
||||||
# f = request.FILES["simplefile"]
|
|
||||||
# w = request.POST["title"]
|
|
||||||
# multiple = request.FILES.getlist('simplefile')
|
|
||||||
# fs = FileSystemStorage(os.path.join(settings.SURVEY_SCANS, year, w))
|
|
||||||
|
|
||||||
# actual_saved = []
|
|
||||||
# if multiple:
|
|
||||||
# for f in multiple:
|
|
||||||
# actual_saved.append( fs.save(f.name, content=f) )
|
|
||||||
# #print(f'! - FORM scanupload multiple {actual_saved}')
|
|
||||||
# filesaved = True
|
|
||||||
|
|
||||||
# files = []
|
|
||||||
# dirs = []
|
|
||||||
# #print(f'! - FORM scanupload - start {wallet} {dirpath}')
|
|
||||||
# try:
|
|
||||||
# for f in dirpath.iterdir():
|
|
||||||
# if f.is_dir():
|
|
||||||
# dirs.append(f.name)
|
|
||||||
# if f.is_file():
|
|
||||||
# if f.name != 'contents.json' and f.name != 'walletindex.html':
|
|
||||||
# files.append(f.name)
|
|
||||||
# except FileNotFoundError:
|
|
||||||
# files.append('(no wallet yet - would be created)')
|
|
||||||
# if len(files) ==0 :
|
|
||||||
# files.append('(no image files in wallet)')
|
|
||||||
|
|
||||||
|
|
||||||
# return render(request, 'scanuploadform.html',
|
|
||||||
# {'form': form, 'wallet': wallet, 'year': year, 'prev': prev, 'next': next, 'prevy': prevy, 'nexty': nexty, 'files': files, 'dirs': dirs, 'filesaved': filesaved, 'actual_saved': actual_saved})
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user