ruff removed unused imports

This commit is contained in:
Philip Sargent 2023-01-19 21:34:09 +00:00
parent 89b0c0862e
commit 1be3a3892c
12 changed files with 64 additions and 117 deletions

View File

@ -448,10 +448,6 @@ if __name__ == "__main__":
jq.enq("survex", import_loadpos)
elif "drawings" in sys.argv:
jq.enq("drawings", import_drawingsfiles)
elif "dumplogbooks" in sys.argv: # untested in 2020
dumplogbooks()
# elif "writecaves" in sys.argv: # untested in 2020 - will overwrite input files!!
# writeCaves()
elif "profile" in sys.argv:
if runlabel == "del":
jq.loadprofiles()

View File

@ -1,11 +1,10 @@
import csv
import os
import re
from datetime import date
from django.conf import settings
from troggle.core.models.caves import QM, Cave, LogbookEntry
from troggle.core.models.caves import QM, Cave
from troggle.core.models.troggle import DataIssue
from troggle.core.utils import save_carefully
@ -79,7 +78,7 @@ def parseCaveQMs(cave, inputFile, ticked=False):
try:
n += 1
year = int(line[0][1:5])
logslug = f"PH_{int(year)}_{int(n):02d}"
f"PH_{int(year)}_{int(n):02d}"
QMnum = re.match(r".*?-\d*?-X?(?P<numb>\d*)", line[0]).group("numb")
newQM = QM()
# newQM.found_by=placeholder
@ -108,7 +107,7 @@ def parseCaveQMs(cave, inputFile, ticked=False):
number=QMnum, found_by__date__year=year
) # if we don't have this one in the DB, save it
if (
preexistingQM.new_since_parsing == False
preexistingQM.new_since_parsing is False
): # if the pre-existing QM has not been modified, overwrite it - VERY OLD THING
preexistingQM.delete()
newQM.expoyear = year

View File

@ -1,6 +1,5 @@
import os
import re
import sys
from pathlib import Path
from django.conf import settings
@ -44,7 +43,7 @@ def dummy_entrance(k, slug, msg="DUMMY"):
if ent:
ent.save() # must save to have id before foreign keys work.
try: # Now create a entrance slug ID
es = EntranceSlug(entrance=ent, slug=slug, primary=False)
EntranceSlug(entrance=ent, slug=slug, primary=False)
except:
message = f" ! {k:11s} {msg}-{slug} entrance create failure"
DataIssue.objects.create(parser="caves", message=message, url=f"{slug}")
@ -66,9 +65,8 @@ def set_dummy_entrance(id, slug, cave, msg="DUMMY"):
global entrances_xslug
try:
entrance = dummy_entrance(id, slug, msg="DUMMY")
letter = ""
entrances_xslug[slug] = entrance
ce = CaveAndEntrance.objects.update_or_create(cave=cave, entrance_letter="", entrance=entrance)
CaveAndEntrance.objects.update_or_create(cave=cave, entrance_letter="", entrance=entrance)
message = f" ! Warning: Dummy Entrance successfully set for entrance {id} on cave {cave}"
DataIssue.objects.create(parser="caves", message=message, url=f"{cave.url}")
@ -134,37 +132,37 @@ def do_pending_cave(k, url, area):
print(message)
return
default_note = f"_Survex file found in loser repo but no description in expoweb <br><br><br>\n"
default_note += f"INSTRUCTIONS: First open 'This survex file' (link above the CaveView panel) to find the date and info. Then "
default_note += f'<br><br>\n\n - (0) look in the <a href="/noinfo/cave-number-index">cave number index</a> for notes on this cave, '
default_note += f"<br><br>\n\n - (1) search in the survex file for the *ref to find a "
default_note += f"relevant wallet, e.g.<a href='/survey_scans/2009%252311/'>2009#11</a> and read the notes image files <br>\n - "
default_note = "_Survex file found in loser repo but no description in expoweb <br><br><br>\n"
default_note += "INSTRUCTIONS: First open 'This survex file' (link above the CaveView panel) to find the date and info. Then "
default_note += '<br><br>\n\n - (0) look in the <a href="/noinfo/cave-number-index">cave number index</a> for notes on this cave, '
default_note += "<br><br>\n\n - (1) search in the survex file for the *ref to find a "
default_note += "relevant wallet, e.g.<a href='/survey_scans/2009%252311/'>2009#11</a> and read the notes image files <br>\n - "
default_note += (
f"<br><br>\n\n - (2) search in the Expo for that year e.g. <a href='/expedition/2009'>2009</a> to find a "
"<br><br>\n\n - (2) search in the Expo for that year e.g. <a href='/expedition/2009'>2009</a> to find a "
)
default_note += f"relevant logbook entry, remember that the date may have been recorded incorrectly, "
default_note += "relevant logbook entry, remember that the date may have been recorded incorrectly, "
default_note += (
f"so check for trips i.e. logbook entries involving the same people as were listed in the survex file, "
"so check for trips i.e. logbook entries involving the same people as were listed in the survex file, "
)
default_note += (
f"and you should also check the scanned copy of the logbook (linked from each logbook entry page) "
"and you should also check the scanned copy of the logbook (linked from each logbook entry page) "
)
default_note += f"just in case a vital trip was not transcribed, then <br>\n - "
default_note += "just in case a vital trip was not transcribed, then <br>\n - "
default_note += (
f"click on 'Edit this cave' and copy the information you find in the survex file and the logbook"
"click on 'Edit this cave' and copy the information you find in the survex file and the logbook"
)
default_note += f"and delete all the text in the 'Notes' section - which is the text you are reading now."
default_note += f"<br><br>\n\n - Only two fields on this form are essential. "
default_note += f"Documentation of all the fields on 'Edit this cave' form is in <a href='/handbook/survey/caveentryfields.html'>handbook/survey/caveentryfields</a>"
default_note += f"<br><br>\n\n - "
default_note += f"You will also need to create a new entrance from the 'Edit this cave' page. Ignore the existing dummy one, it will evaporate on the next full import."
default_note += f"<br><br>\n\n - "
default_note += f"When you Submit it will create a new file in expoweb/cave_data/ "
default_note += "and delete all the text in the 'Notes' section - which is the text you are reading now."
default_note += "<br><br>\n\n - Only two fields on this form are essential. "
default_note += "Documentation of all the fields on 'Edit this cave' form is in <a href='/handbook/survey/caveentryfields.html'>handbook/survey/caveentryfields</a>"
default_note += "<br><br>\n\n - "
default_note += "You will also need to create a new entrance from the 'Edit this cave' page. Ignore the existing dummy one, it will evaporate on the next full import."
default_note += "<br><br>\n\n - "
default_note += "When you Submit it will create a new file in expoweb/cave_data/ "
default_note += (
f"<br><br>\n\n - Now you can edit the entrance info: click on Edit below for the dummy entrance. "
"<br><br>\n\n - Now you can edit the entrance info: click on Edit below for the dummy entrance. "
)
default_note += f"and then Submit to save it (if you forget to do this, a dummy entrance will be created for your new cave description)."
default_note += f"<br><br>\n\n - Finally, you need to find a nerd to edit the file '<var>expoweb/cave_data/pending.txt</var>' "
default_note += "and then Submit to save it (if you forget to do this, a dummy entrance will be created for your new cave description)."
default_note += "<br><br>\n\n - Finally, you need to find a nerd to edit the file '<var>expoweb/cave_data/pending.txt</var>' "
default_note += (
f"to remove the line <br><var>{slug}</var><br> as it is no longer 'pending' but 'done. Well Done."
)
@ -187,7 +185,7 @@ def do_pending_cave(k, url, area):
print(message)
try: # Now create a cave slug ID
cs = CaveSlug.objects.update_or_create(cave=cave, slug=slug, primary=False)
CaveSlug.objects.update_or_create(cave=cave, slug=slug, primary=False)
except:
message = f" ! {k:11s} PENDING cave SLUG create failure"
DataIssue.objects.create(parser="caves", message=message)
@ -292,7 +290,7 @@ def readentrance(filename):
for slug in slugs:
# print("entrance slug:{} filename:{}".format(slug, filename))
try:
cs = EntranceSlug.objects.update_or_create(entrance=e, slug=slug, primary=primary)
EntranceSlug.objects.update_or_create(entrance=e, slug=slug, primary=primary)
except:
# need to cope with duplicates
message = f" ! FAILED to get precisely one ENTRANCE when updating using: cave_entrance/{filename}"
@ -303,10 +301,9 @@ def readentrance(filename):
DataIssue.objects.create(parser="caves", message=message, url=f"/cave/{slug}/edit/")
print(message)
for k in kents:
if k.slug() != None:
if k.slug() is not None:
print(" ! - OVERWRITING this one: slug:" + str(k.slug()))
k.notes = "DUPLICATE entrance found on import. Please fix\n" + k.notes
c = k
primary = False
# else: # more than one item in long list. But this is not an error, and the max and min have been checked by getXML
# slug = Path(filename).stem
@ -417,7 +414,7 @@ def readcave(filename):
DataIssue.objects.create(parser="caves", message=message)
print(message)
for k in kaves:
if k.slug() != None:
if k.slug() is not None:
print(" ! - OVERWRITING this one: slug:" + str(k.slug()))
k.notes = "DUPLICATE kataster number found on import. Please fix\n" + k.notes
c = k
@ -466,7 +463,7 @@ def readcave(filename):
else:
entrance = Entrance.objects.get(entranceslug__slug=eslug)
entrances_xslug[eslug] = entrance
ce = CaveAndEntrance.objects.update_or_create(
CaveAndEntrance.objects.update_or_create(
cave=c, entrance_letter=letter, entrance=entrance
)
except:
@ -586,7 +583,7 @@ def readcaves():
readcave(filename)
print(" - Setting up all the variously useful alias names")
mycavelookup = GetCaveLookup()
GetCaveLookup()
print(" - Setting pending caves")
# Do this last, so we can detect if they are created and no longer 'pending'

View File

@ -1,17 +1,11 @@
import csv
import datetime
import os
import re
import stat
import sys
import types
from functools import reduce
from pathlib import Path
import settings
from troggle.core.models.survex import DrawingFile, SingleScan, Wallet
from troggle.core.models.survex import DrawingFile, Wallet
from troggle.core.models.troggle import DataIssue
from troggle.core.utils import save_carefully
"""Searches through all the :drawings: repository looking
for tunnel and therion files

View File

@ -1,11 +1,6 @@
import os
import sys
import django
from django.contrib.auth.models import User
from django.core import management
from django.db import close_old_connections, connection, connections, transaction
from django.http import HttpResponse
from django.db import transaction
import troggle.parsers.caves
import troggle.parsers.drawings

View File

@ -1,17 +1,14 @@
import csv
import os
import re
import time
from datetime import date, datetime
from pathlib import Path
from random import randint
from django.conf import settings
from django.template.defaultfilters import slugify
from django.utils.timezone import get_current_timezone, make_aware
from parsers.people import GetPersonExpeditionNameLookup
from troggle.core.models.caves import Cave, GetCaveLookup, LogbookEntry, PersonTrip
from troggle.core.models.caves import GetCaveLookup, LogbookEntry, PersonTrip
from troggle.core.models.troggle import DataIssue, Expedition
from troggle.core.utils import TROG, save_carefully
@ -254,7 +251,7 @@ def EnterLogIntoDbase(date, place, title, text, trippeople, expedition, logtime_
text = text.replace("\n\n\n", "\n\n")
# Check for an existing copy of the current entry, and save
expeditionday = expedition.get_expedition_day(date)
expedition.get_expedition_day(date)
lookupAttribs = {"date": date, "title": title}
# 'cave' is converted to a string doing this, which renders as the cave slug.
# but it is a db query which we should try to avoid - rewrite this
@ -574,7 +571,6 @@ def parser_blog(year, expedition, txt, sq=""):
"""
global logentries
global logdataissues
errorcount = 0
tripheads = re.findall(
r"<article class=\"message message--post js-post js-inlineModContainer\s*\"\s*([\s\S]*?)(?=</article)", txt
@ -791,9 +787,9 @@ def LoadLogbooks():
DataIssue.objects.filter(parser="logbooks").delete()
expos = Expedition.objects.all()
if len(expos) <= 1:
message = f" ! - No expeditions found. Load 'people' first"
message = " ! - No expeditions found. Load 'people' first"
DataIssue.objects.create(parser="logbooks", message=message)
logdataissues[f"sqlfail 0000"] = message
logdataissues["sqlfail 0000"] = message
print(message)
return
@ -807,7 +803,6 @@ def LoadLogbooks():
nologbook = noexpo + lostlogbook + sqlfail
nlbe = {}
expd = {}
loglist = []
bloglist = []

View File

@ -1,8 +1,6 @@
import csv
import datetime
import os
import re
import shutil
from html import unescape
from pathlib import Path
@ -10,7 +8,7 @@ from django.conf import settings
from unidecode import unidecode
from troggle.core.models.troggle import DataIssue, Expedition, Person, PersonExpedition
from troggle.core.utils import TROG, save_carefully
from troggle.core.utils import save_carefully
"""These functions do not match how the stand-alone folk script works. So the script produces an HTML file which has
href links to pages in troggle which troggle does not think are right.

View File

@ -1,21 +1,9 @@
import csv
import datetime
import filecmp
import os
import re
import shutil
import stat
import subprocess
import sys
import types
from functools import reduce
from pathlib import Path
import settings
from troggle.core.models.survex import DrawingFile, SingleScan, Wallet
from troggle.core.models.survex import SingleScan, Wallet
from troggle.core.models.troggle import DataIssue
from troggle.core.utils import save_carefully
from troggle.core.views.scans import datewallet
"""Searches through all the survey scans directories (wallets) in expofiles, looking for images to be referenced.
"""
@ -138,7 +126,7 @@ def load_all_scans():
# but we also need to check if JSON exists, even if there are no uploaded scan files.
# Here we know there is a rigid folder structure, so no need to look for sub folders
print(f"\n - Checking for wallets where JSON exists, but there may be no uploaded scan files:")
print("\n - Checking for wallets where JSON exists, but there may be no uploaded scan files:")
print(" ", end="")
wjson = 0
contents_path = Path(settings.DRAWINGS_DATA, "walletjson")

View File

@ -4,13 +4,12 @@ import re
import subprocess
import sys
import time
from datetime import date, datetime, timedelta, timezone
from datetime import datetime, timezone
from pathlib import Path
from django.utils.timezone import get_current_timezone, make_aware
import troggle.settings as settings
from troggle.core.models.caves import QM, Cave, Entrance, LogbookEntry
from troggle.core.models.caves import QM, Cave, Entrance
from troggle.core.models.survex import SurvexBlock, SurvexDirectory, SurvexFile, SurvexPersonRole, SurvexStation, Wallet
from troggle.core.models.troggle import DataIssue, Expedition
from troggle.core.utils import chaosmonkey, get_process_memory
@ -335,7 +334,7 @@ class LoadingSurvex:
if tm:
record_team_member(tm, survexblock)
else:
if not mteammember.group(2).lower() in ("none", "both"):
if mteammember.group(2).lower() not in ("none", "both"):
message = f"! Weird *team '{mteammember.group(2)}' newstyle line: '{line}' ({survexblock}) {survexblock.survexfile.path}"
print(self.insp + message)
DataIssue.objects.create(
@ -793,15 +792,13 @@ class LoadingSurvex:
# Older troggle/CSV assumes a logbook entry 'found_by' for each QM, with a date.
# We don't need this anymore so we don't need to create a placeholder logbook entry.
qmyear = str(survexblock.date)[:4]
str(survexblock.date)[:4]
blockname = survexblock.name[:6] + survexblock.name[-1:]
# logslug = f'D{int(qmyear)}_{blockname}_{int(qm_no):03d}'
if survexblock.survexfile.cave:
caveslug = survexblock.survexfile.cave.slug()
place = survexblock.survexfile.cave
survexblock.survexfile.cave.slug()
else:
caveslug = None
place = None
pass
try:
qm = QM.objects.create(
@ -930,7 +927,7 @@ class LoadingSurvex:
# if self.flagsstar["duplicate"] == True or self.flagsstar["surface"] == True or self.flagsstar["splay"] == True:
# actually we do want to count duplicates as this is for "effort expended in surveying underground"
if self.flagsstar["surface"] == True or self.flagsstar["splay"] == True:
if self.flagsstar["surface"] is True or self.flagsstar["splay"] is True:
self.flagsstar["skiplegs"] = True
if debugprint:
print(
@ -1147,7 +1144,6 @@ class LoadingSurvex:
def LoadSurvexSetup(self, survexblock, survexfile):
self.depthbegin = 0
self.datastar = self.datastardefault
blocklegs = self.legsnumber
print(
self.insp
+ f" - MEM:{get_process_memory():.3f} Reading. parent:{survexblock.survexfile.path} <> {survexfile.path} "
@ -1181,7 +1177,7 @@ class LoadingSurvex:
slengthtotal = 0.0
nlegstotal = 0
self.relativefilename = path
cave = self.IdentifyCave(path) # this will produce null for survex files which are geographic collections
self.IdentifyCave(path) # this will produce null for survex files which are geographic collections
self.currentsurvexfile = survexblock.survexfile
self.currentsurvexfile.save() # django insists on this although it is already saved !?
@ -1628,7 +1624,7 @@ class LoadingSurvex:
DataIssue.objects.create(parser="xEntrances", message=message, url=url)
print(message)
print(
f"stderr:\n\n" + str(sp.stderr) + "\n\n" + str(sp.stdout) + "\n\nreturn code: " + str(sp.returncode)
"stderr:\n\n" + str(sp.stderr) + "\n\n" + str(sp.stdout) + "\n\nreturn code: " + str(sp.returncode)
)
self.caverncount += 1
@ -1643,7 +1639,7 @@ class LoadingSurvex:
svxpath = Path(fullpath + ".svx")
logpath = Path(fullpath + ".log")
outputdir = Path(svxpath).parent
Path(svxpath).parent
if not svxpath.is_file():
message = f' ! BAD survex file "{fullpath}" specified in *include in {calledpath} '
@ -1664,7 +1660,7 @@ class LoadingSurvex:
DataIssue.objects.create(parser="entrances", message=message)
print(message)
print(
f"stderr:\n\n" + str(sp.stderr) + "\n\n" + str(sp.stdout) + "\n\nreturn code: " + str(sp.returncode)
"stderr:\n\n" + str(sp.stderr) + "\n\n" + str(sp.stdout) + "\n\nreturn code: " + str(sp.returncode)
)
self.caverndate = os.path.getmtime(sp.stdout.strip())
else:
@ -1786,7 +1782,7 @@ def FindAndLoadSurvex(survexblockroot):
f"\n - {len(unseens)} survex files found which were not included in main tree. ({len(svx_scan.svxfileslist)} in main tree)",
file=sys.stderr,
)
print(f" -- Now loading the previously-omitted survex files.", file=sys.stderr)
print(" -- Now loading the previously-omitted survex files.", file=sys.stderr)
with open(Path(settings.SURVEX_DATA, "_unseens.svx"), "w") as u:
u.write(
@ -1794,10 +1790,10 @@ def FindAndLoadSurvex(survexblockroot):
)
u.write(f"; autogenerated by parser/survex.py from databasereset.py on '{datetime.now(timezone.utc)}'\n")
u.write(f"; omitting any file beginning with {excpts}\n\n")
u.write(f"*begin unseens\n")
u.write("*begin unseens\n")
for x in sorted(unseens):
u.write(f" *include {x}\n")
u.write(f"*end unseens\n")
u.write("*end unseens\n")
survexfileroot = survexblockroot.survexfile # i.e. SURVEX_TOPNAME only
@ -2065,7 +2061,6 @@ def LoadPositions():
print(f" - Generating a list of Pos from {topdata}.svx and then loading...")
found = 0
skip = {}
print("\n") # extra line because cavern overwrites the text buffer somehow
# cavern defaults to using same cwd as supplied input file
@ -2113,7 +2108,7 @@ def LoadPositions():
try:
survexblockroot = SurvexBlock.objects.get(id=1)
except:
message = f" ! FAILED to find root SurvexBlock"
message = " ! FAILED to find root SurvexBlock"
print(message)
DataIssue.objects.create(parser="entrances", message=message)
raise
@ -2131,17 +2126,16 @@ def LoadPositions():
try:
sbqs = SurvexBlock.objects.filter(survexpath=blockpath)
if len(sbqs) == 1:
sb = sbqs[0]
sbqs[0]
if len(sbqs) > 1:
message = f" ! MULTIPLE SurvexBlocks {len(sbqs):3} matching Entrance point {blockpath} {sid} '{id}'"
print(message)
DataIssue.objects.create(parser="entrances", message=message)
sb = sbqs[0]
sbqs[0]
elif len(sbqs) <= 0:
message = f" ! ZERO SurvexBlocks matching Entrance point {blockpath} {sid} '{id}'"
print(message)
DataIssue.objects.create(parser="entrances", message=message)
sb = survexblockroot
except:
message = f" ! FAIL in getting SurvexBlock matching Entrance point {blockpath} {sid}"
print(message)

View File

@ -1,6 +1,5 @@
import os
import shutil
from pprint import pprint
"""Cleans all django-created files and compiled python. Used by the
pre-run.sh script which cleans and initialises everything before
@ -79,7 +78,7 @@ def main():
delete_sqlite3()
print("All cleanup operations performed successfully.")
except Exception as e:
except Exception:
print("There was some error! Aaargh. \n")
raise

View File

@ -14,10 +14,7 @@ https://docs.djangoproject.com/en/dev/ref/settings/
# 3.Local application/library specific imports.
# 4.You should put a blank line between each group of imports.
import os
import urllib.parse
import django
print("* importing troggle/settings.py")

15
urls.py
View File

@ -1,15 +1,12 @@
from django.conf import settings
from django.contrib import admin, auth
from django.urls import include, path, re_path, resolve, reverse
from django.views.generic.base import RedirectView
from django.views.generic.edit import UpdateView
from django.views.generic.list import ListView
from django.contrib import admin
from django.urls import include, path, re_path
from troggle.core.views import statistics, survex
from troggle.core.views.auth import expologin, expologout
from troggle.core.views.caves import (cave3d, caveEntrance, caveindex,
cavepage, caveQMs, edit_cave,
edit_entrance, ent, get_entrances, qm)
edit_entrance, get_entrances, qm)
from troggle.core.views.drawings import dwgallfiles, dwgfilesingle
from troggle.core.views.editor_helpers import image_selector, new_image_form
from troggle.core.views.expo import (editexpopage, expofiles_redirect,
@ -21,12 +18,10 @@ from troggle.core.views.logbooks import (Expeditions_jsonListView,
logbookentry, notablepersons, person,
personexpedition)
from troggle.core.views.other import (controlpanel, exportlogbook, frontpage,
todos, troggle404)
from troggle.core.views.prospect import prospecting, prospecting_image
todos)
from troggle.core.views.prospect import prospecting
from troggle.core.views.scans import (allscans, cavewallets, scansingle,
walletslistperson, walletslistyear)
from troggle.core.views.statistics import dataissues, pathsreport, stats
from troggle.core.views.survex import survexcavesingle, survexcaveslist, svx
from troggle.core.views.uploads import dwgupload, photoupload, scanupload
"""This sets the actualurlpatterns[] and urlpatterns[] lists which django uses