diff --git a/noinfo/make.py b/noinfo/make.py
new file mode 100644
index 000000000..10a8e3056
--- /dev/null
+++ b/noinfo/make.py
@@ -0,0 +1,252 @@
+#!/usr/bin/env python 
+import os, operator, urllib, json, re
+
+# 2018-08-27 edited Philip Sargent
+# 2017 originally by Martin Green
+
+loser_dir = "/home/expo/loser/"
+html_base = "<html><body>%(body)s</body></html>"
+html_year_index = html_base % {"body": "<H1>%(year)s surveys</H1><H2>Persons</H2><UL>%(persons)s</UL><H2>Wallets</H2><table>%(wallets)s</table><H2>Needing Scanning</H2><ul>%(needing scanning)s</ul><H2>Website needing updating</H2><ul>%(website needing updating)s</ul>"}
+html_year_person = "<li><a href='%(person)s.html'>%(person)s</a><ul>%(complaints)s</ul></li>"
+html_year_wallet_entry = "<tr><td><a href='%(walletindex)s'>%(walletname)s %(cave)s %(name)s</a></td> <td>%(complaints)s</td></tr>"
+html_person_wallet_entry = "<li><a href='%(walletindex)s'>%(walletname)s</a> <ul>%(complaints)s</ul></li>"
+html_year_scanning_entry = "<li><a href='%(walletindex)s'>%(walletname)s %(cave)s %(name)s</a></li>"
+html_wallet_file_entry = "<li><a href='%(fileurl)s'>%(filename)s</a></li>"
+html_wallet_index = html_base % {"body": "<H1>%(title)s : %(cave)s : %(name)s</H1><p>Date: %(date)s</p><p>People: %(people)s</p><p> <a href='/%(description)s'>Description</a></p><p>Survex file: %(survex)s</p><H2>Issues</H2>%(complaints)s<H2>Files</H2><UL>%(files)s</UL>"}
+html_survex_required = {True: "Survex ", False: ""}
+html_plan_scanned = {True: "", False: "Plan "}
+html_elev_scanned = {True: "", False: "Elev "}
+html_description_written = {True: "", False: "Desc "}
+html_qms_written = {True: "", False: "QMs "}
+html_status = {True: "Issues: ", False: ""}
+html_person = html_base % {"body": "<H1>%(person)s</H1><H2>Outstanding Wallets</H2><UL>%(wallets)s</UL>"}
+html_complaint_items = "<li>%(count)i %(complaint)s</li>"
+html_items = "<li>%s</li>"
+
+blank_json = {"survex file": "",
+ "survex not required": False,
+ "plan not required": False,
+ "elev not required": False,
+ "plan drawn": False,
+ "elev drawn": False,
+ "description written": False,
+ "qms written": False,
+ "website updated": False,
+ "electronic survey": False,
+ "elev not required": False,
+ "date": "",
+ "people": ["Unknown"],
+ "description url": "",
+ "cave": "",
+ "name": ""}
+
+#need to use wallets as a dict/tuple (id,cave,name) - not sure how.
+wallets = []
+wallets_needing_scanning = set()
+website_needing_updating = set()
+people = {}
+
+#use dir this file is in to get current year
+path,year = os.path.split(os.path.dirname(os.path.realpath(__file__)))
+
+for item in os.listdir("."):
+    if os.path.isdir(item):
+        files = []
+        for f in os.listdir(os.path.join(".", item)):
+            if f not in ["contents.json", "contents.json~","index.html"] and os.path.isfile(os.path.join(".", item, f)):
+                files.append(f)
+        contents_path = os.path.join(".", item, "contents.json")
+        print "Reading file %s" % (contents_path) 
+        if not os.path.isfile(contents_path):
+            json_file = open(contents_path, "w")
+            json.dump(blank_json, json_file, indent = 1) 
+            json_file.close()
+        json_file = open(contents_path)
+        #print json_file
+        data = json.load(json_file)
+        json_file.close()
+        write_required = False
+        try:
+            wallet, cave, name = re.match("(\d\d\d\d#\d\d)-(.*) (.*)", item).groups()
+        except:
+            wallet, cave, name = "", "", ""
+        #print data
+        for k, v in blank_json.items():
+            if not data.has_key(k):
+                if k == "cave":
+                    data[k] = cave
+                elif k == "name":    
+                    data[k] = name
+                else:    
+                    data[k] = v
+                write_required = True
+        #print write_required
+        if write_required:
+            json_file = open(contents_path, "w")
+            json.dump(data, json_file, indent = 1)
+            json_file.close() 
+
+
+        #make wallet descriptions
+        
+        
+        #Survex
+        survex_required = (data["survex not required"] and data["survex file"] == "") or \
+                          not (not data["survex not required"] and os.path.isfile(os.path.join(loser_dir, data["survex file"])))
+        survex_complaint = ""
+        if data["survex not required"] and data["survex file"] != "":
+            survex_complaint = "Survex is not required and yet there is a survex file!"    
+        if not data["survex not required"] and data["survex file"] == "":
+            survex_complaint = "A survex file is required, but has not been specified!"    
+        if not data["survex not required"] and not os.path.isfile(os.path.join(loser_dir, data["survex file"])):
+            survex_complaint = "The specified survex file (%s) does not exist here!" % data["survex file"]
+        complaints = []
+        person_complaints = []
+        if survex_required:
+            complaints.append(survex_complaint)
+            person_complaints.append(survex_complaint)
+        
+        #Notes
+        notes_scanned = reduce(operator.or_, [f.startswith("note") for f in files], False)
+        if not notes_scanned:
+            complaints.append("The notes needs scanning (no noteN.jpg file found)") 
+            wallets_needing_scanning.add(item)
+        
+        #Plan drawing required
+        plan_scanned = reduce(operator.or_, [f.startswith("plan") for f in files], False)
+        plan_drawing_required = not (plan_scanned or data["plan drawn"])
+        if plan_drawing_required:
+            complaints.append("The plan needs drawing (no planN.jpg file found)") 
+            person_complaints.append(" plan(s) needs drawing (no planN.jpg file found)") 
+        if not plan_drawing_required and not plan_scanned:
+            complaints.append("The plan needs scanning (no planN.jpg file found)") 
+            wallets_needing_scanning.add(item)
+
+        
+        #Elev drawing required
+        elev_scanned = reduce(operator.or_, [f.startswith("elev") for f in files], False)
+        elev_drawing_required = not (elev_scanned or data["elev drawn"])
+        if elev_drawing_required:
+            complaints.append("The elev needs drawing (no elevN.jpg file found)") 
+            person_complaints.append(" elev(s) needs drawing (no elevN.jpg file found)") 
+        if not elev_drawing_required and not elev_scanned:
+            complaints.append("The elev needs scanning (no elevN.jpg file found)")
+            wallets_needing_scanning.add(item)
+
+        #Description
+        if not data["description written"]:
+            complaints.append("The description needs writing") 
+            person_complaints.append(" description(s) needs writing") 
+
+        #QMS
+        if not data["qms written"]:
+            complaints.append("The QMs needs writing") 
+            person_complaints.append(" set(s) of QMs needs writing") 
+
+        #Website
+        if not data["website updated"]:
+            complaints.append("The guidebook description on website needs updating")
+            website_needing_updating.add(item) 
+
+        #Electronic Surveys
+        if not data["electronic survey"]:
+            complaints.append("Tunnel / Therion files need drawing") 
+
+        if data["survex file"]:
+            survex_description = data["survex file"]
+        else:
+            survex_description = "Not specified"
+
+        wallet_index_file = open(os.path.join(item, "index.html"), "w")
+        wallet_index_file.write(html_wallet_index % {"title": item,
+                                                     "cave": data["cave"],
+                                                     "name": data["name"],
+                                                     "date": data["date"],
+                                                     "people": reduce(operator.add, [" %s," % person for person in data["people"]], ""),
+                                                     "description": data["description url"],
+                                                     "survex": survex_description,
+                                                     "complaints": reduce(operator.add, ["<p>" + complaint + "</p>" for complaint in complaints], ""),
+                                                     "files": reduce(operator.add, 
+                                                                     [html_wallet_file_entry % {"fileurl": urllib.quote(f), 
+                                                                                                "filename": f} 
+                                                                      for f
+                                                                      in files],
+                                                                     "")})
+        wallet_index_file.close()
+        wallets.append((item,  data["cave"],  data["name"], survex_required, plan_scanned, elev_scanned, data["description written"], data["qms written"]))
+        
+        #People
+
+        for person in data["people"]:
+            # delete all person.html as we are recreating all the ones that matter and old ones have old data
+            if os.path.isfile(person + ".html"):
+                os.remove(person + ".html")
+        if person_complaints:
+            for person in data["people"]:
+                if not people.has_key(person):
+                    people[person] = []
+                people[person].append((item, person_complaints))
+            
+
+wallets.sort()
+website_needing_updating = list(website_needing_updating)
+website_needing_updating.sort()
+wallets_needing_scanning = list(wallets_needing_scanning)
+wallets_needing_scanning.sort()
+
+person_summary = []
+for person, person_wallets in people.items():
+    complaints = reduce(operator.add, [complaints for wallet, complaints in person_wallets], [])
+    complaints_summary = []
+    for complaint in set(complaints):
+        complaints_summary.append((complaint, complaints.count(complaint)))
+    person_summary.append((person, complaints_summary))
+        
+person_summary = dict(person_summary)
+
+year_index_file = open("index.html", "w")
+year_index_file.write(html_year_index % {"year": year, "persons": reduce(operator.add,  [html_year_person % {"person": person,
+                                                                                               "complaints": reduce(operator.add,
+                                                                                                                    [html_complaint_items % {"complaint": complaint, 
+                                                                                                                                             "count": count}
+                                                                                                                     for complaint, count
+                                                                                                                     in complaints],
+                                                                                                                    "")} 
+                                                            for person, complaints
+                                                            in person_summary.items()], ""),
+                                         "needing scanning": reduce(operator.add,  [html_year_scanning_entry % {"walletname": wallet, 
+                                                                                      "cave": cave,
+                                                                                      "name": name,
+                                                                                      "walletindex": urllib.quote(wallet) + "/index.html"} 
+                                                            for (wallet)
+                                                            in wallets_needing_scanning], ""),
+                                         "website needing updating": reduce(operator.add,  [html_year_scanning_entry % {"walletname": wallet,
+                                                                                      "cave": cave,
+                                                                                      "name": name,
+                                                                                      "walletindex": urllib.quote(wallet) + "/index.html"} 
+                                                            for (wallet)
+                                                            in website_needing_updating], ""),
+                                         "wallets": reduce(operator.add, 
+                                                           [html_year_wallet_entry % {"walletname": wallet,
+                                                                                      "cave": cave,
+                                                                                      "name": name,
+                                                                                      "walletindex": urllib.quote(wallet) + "/index.html", 
+                                                                                      "complaints": html_status[survex_required or not plan_scanned or not elev_scanned or description_written] + html_survex_required[survex_required] + html_plan_scanned[plan_scanned] + html_elev_scanned[elev_scanned] + html_description_written[description_written] + html_qms_written[qms_written] } 
+                                                            for (wallet, cave, name, survex_required, plan_scanned, elev_scanned, description_written, qms_written) 
+                                                            in wallets])})
+year_index_file.close()
+
+for person, item_complaint_list in people.items():
+    person_file = open(person + ".html", "w")
+    person_file.write(html_person % {"person": person, 
+                                     "wallets": reduce(operator.add, [html_person_wallet_entry % {"walletname": wallet, 
+                                                                                                "walletindex": urllib.quote(wallet) + "/index.html", 
+                                                                                                "complaints": reduce(operator.add, 
+                                                                                                                     [html_items % complaint 
+                                                                                                                      for complaint 
+                                                                                                                      in complaints], 
+           "") } 
+                                                                      for wallet, complaints 
+                                                                      in item_complaint_list], "")
+                                     })
+    person_file.close()
diff --git a/noinfo/wallets.py b/noinfo/wallets.py
new file mode 100755
index 000000000..1a7dbb9bf
--- /dev/null
+++ b/noinfo/wallets.py
@@ -0,0 +1,276 @@
+#!/usr/bin/env python 
+import sys, os, operator, urllib, json, re, time
+
+# 2017 originally by Martin Green
+# 2018-08-27 edited Philip Sargent
+# 2019-03-02 extended to take command line argument of loser_dir and set mod time of index.html to be sane as json file
+
+loser_dir = "/home/expo/loser/"
+#loser_dir = "/mnt/d/CUCC-Expo/loser/"  # when running on Win10/bash
+
+if len(sys.argv) > 1 :
+	if sys.argv[1] != "":
+		loser_dir = sys.argv[1]
+
+print loser_dir
+
+html_base = "<html><head><style>div { column-count:5;}</style></head><body>%(body)s</body></html>"
+html_year_index = html_base % {"body": "<H1>%(year)s surveys: wallets status</H1><H2>Persons</H2><UL>%(persons)s</UL><H2>Wallets</H2><table>%(wallets)s</table><H2>Needing Scanning</H2><div><ul>%(needing scanning)s</ul></div><H2>Website (Guidebook description) needing updating</H2><div><ul>%(website needing updating)s</ul></div>"}
+html_year_person = "<li><a href='%(person)s.html'>%(person)s</a><ul>%(complaints)s</ul></li>"
+html_year_wallet_entry = "<tr><td><a href='%(walletindex)s'>%(walletname)s %(cave)s %(name)s</a></td> <td>%(complaints)s</td></tr>"
+html_person_wallet_entry = "<li><a href='%(walletindex)s'>%(walletname)s</a> <ul>%(complaints)s</ul></li>"
+html_year_scanning_entry = "<li><a href='%(walletindex)s'>%(walletname)s %(cave)s %(name)s</a></li>"
+html_wallet_file_entry = "<li><a href='%(fileurl)s'>%(filename)s</a></li>"
+html_wallet_index = html_base % {"body": "<H1>%(title)s : %(cave)s : %(name)s</H1><p>Date: %(date)s</p><p>People: %(people)s</p><p>Cave <a href='%(description)s'>Guidebook description</a> - %(description_needed)s <p>Survex file:<br>&nbsp;&nbsp;Local (Windows): <a href='%(loser_dirw)s%(survex)s'>%(loser_dirw)s%(survex)s</a><br>&nbsp;&nbsp;Local (Linux): <a href='%(loser_dir)s%(survex)s' download>%(loser_dir)s%(survex)s</a><br>&nbsp;&nbsp;Server: &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<a href='http://expo.survex.com/survexfile/%(survex)s'>%(survex)s</a></p><a href='../index.html'>Wallet index for this year</a> </p><H2>Issues</H2>%(complaints)s<H2>Files</H2><UL>%(files)s</UL>"}
+html_survex_required = {True: "Survex ", False: ""}
+html_plan_scanned = {True: "", False: "Plan "}
+html_elev_scanned = {True: "", False: "Elev "}
+html_description_written = {True: "", False: "Desc "}
+html_qms_written = {True: "", False: "QMs "}
+html_status = {True: "Issues: ", False: ""}
+html_person = html_base % {"body": "<H1>%(person)s</H1><H2>Outstanding Wallets</H2><UL>%(wallets)s</UL>"}
+html_complaint_items = "<li>%(count)i %(complaint)s</li>"
+html_items = "<li>%s</li>"
+
+blank_json = {
+ "cave": "", 
+ "date": "", 
+ "description url": "", 
+ "description written": False, 
+ "electronic survey": False, 
+ "elev drawn": False, 
+ "elev not required": False, 
+ "name": "", 
+ "people": [
+  "Unknown"
+ ], 
+ "plan drawn": False, 
+ "plan not required": False, 
+ "qms written": False, 
+ "survex file": "", 
+ "survex not required": False, 
+ "website updated": False}
+
+#need to use wallets as a dict/tuple (id,cave,name) - not sure how.
+wallets = []
+wallets_needing_scanning = set()
+website_needing_updating = set()
+people = {}
+
+#use dir this file is in to get current year
+path,year = os.path.split(os.path.dirname(os.path.realpath(__file__)))
+
+for item in sorted(os.listdir(".")):
+    if os.path.isdir(item):
+        files = []
+        for f in os.listdir(os.path.join(".", item)):
+            if f not in ["contents.json", "contents.json~","index.html"] and os.path.isfile(os.path.join(".", item, f)):
+                files.append(f)
+        contents_path = os.path.join(".", item, "contents.json")
+#       print "Trying to read file %s" % (contents_path) 
+        if not os.path.isfile(contents_path):
+            print "Creating file %s from template" % (contents_path) 
+            json_file = open(contents_path, "w")
+            json.dump(blank_json, json_file, sort_keys=True, indent = 1) 
+            json_file.close()
+        # print "Reading file %s" % (contents_path) 
+        json_file = open(contents_path)
+        #print json_file
+        data = json.load(json_file)
+        json_file.close()
+        write_required = False
+        try:
+            wallet, cave, name = re.match("(\d\d\d\d#\d\d)-(.*) (.*)", item).groups()
+        except:
+            wallet, cave, name = "", "", ""
+        #print data
+        for k, v in blank_json.items():
+            if not data.has_key(k):
+                if k == "cave":
+                    data[k] = cave
+                elif k == "name":    
+                    data[k] = name
+                else:    
+                    data[k] = v
+                write_required = True
+        #print write_required
+        if write_required:
+            print "Writing file %s" % (contents_path) 
+            json_file = open(contents_path, "w")
+            json.dump(data, json_file, indent = 1)
+            json_file.close() 					
+	# Get modification time of contents.json
+	# print("json last modified: %s" % time.ctime(os.path.getmtime(contents_path)))
+	json_mtime = os.path.getmtime(contents_path)
+
+        #make wallet descriptions
+        
+        #Survex
+        survex_required = (data["survex not required"] and data["survex file"] == "") or \
+                          not (not data["survex not required"] and os.path.isfile(os.path.join(loser_dir, data["survex file"])))
+        survex_complaint = ""
+        if data["survex not required"] and data["survex file"] != "":
+            survex_complaint = "Survex is not required and yet there is a survex file!"    
+        if not data["survex not required"] and data["survex file"] == "":
+            survex_complaint = "A survex file is required, but has not been specified!"    
+        if not data["survex not required"] and not os.path.isfile(os.path.join(loser_dir, data["survex file"])):
+            survex_complaint = "The specified survex file (%s) does not exist here!" % os.path.join(loser_dir, data["survex file"])
+        complaints = []
+        person_complaints = []
+        if survex_required:
+            complaints.append(survex_complaint)
+            person_complaints.append(survex_complaint)
+        
+        #Notes
+        notes_scanned = reduce(operator.or_, [f.startswith("note") for f in files], False)
+        if not notes_scanned:
+            complaints.append("The notes needs scanning (no noteN.jpg file found)") 
+            wallets_needing_scanning.add(item)
+        
+        #Plan drawing required
+        plan_scanned = reduce(operator.or_, [f.startswith("plan") for f in files], False)
+        plan_drawing_required = not (plan_scanned or data["plan drawn"])
+        if plan_drawing_required:
+            complaints.append("The plan needs drawing (no planN.jpg file found)") 
+            person_complaints.append(" plan(s) needs drawing (no planN.jpg file found)") 
+        if not plan_drawing_required and not plan_scanned:
+            complaints.append("The plan needs  <em>scanning</em> (no planN.jpg file found)") 
+            wallets_needing_scanning.add(item)
+
+        
+        #Elev drawing required
+        elev_scanned = reduce(operator.or_, [f.startswith("elev") for f in files], False)
+        elev_drawing_required = not (elev_scanned or data["elev drawn"])
+        if elev_drawing_required:
+            complaints.append("The elev needs drawing (no elevN.jpg file found)") 
+            person_complaints.append(" elev(s) needs drawing (no elevN.jpg file found)") 
+        if not elev_drawing_required and not elev_scanned:
+            complaints.append("The elev needs <em>scanning</em> (no elevN.jpg file found)")
+            wallets_needing_scanning.add(item)
+
+        #Description
+        if not data["description written"]: 
+           complaints.append("The description needs writing") 
+           person_complaints.append(" description(s) needs writing")
+	description_needed = "Not present, needs doing."
+ 	
+
+        #QMS
+        if not data["qms written"]:
+            complaints.append("The QMs needs writing") 
+            person_complaints.append(" set(s) of QMs needs writing") 
+
+        #Website
+        if not data["website updated"]:
+            complaints.append("The guidebook description on the website needs updating")
+            website_needing_updating.add(item) 
+
+        #Electronic Surveys
+        if not data["electronic survey"]:
+            complaints.append("Tunnel / Therion files need drawing") 
+
+        if data["survex file"]:
+            survex_description = data["survex file"]
+        else:
+            survex_description = "Not specified"
+
+        wallet_index_file = open(os.path.join(item, "index.html"), "w")
+        wallet_index_file.write(html_wallet_index % {"title": item,
+                                                     "cave": data["cave"],
+                                                     "name": data["name"],
+                                                     "date": data["date"],
+                                                     "people": reduce(operator.add, [" %s," % person for person in data["people"]], ""),
+                                                     "description": "http://expo.survex.com"+data["description url"],
+																										 "description_needed": description_needed,
+																										 "loser_dir": loser_dir,
+																										 "loser_dirw": loser_dir[5].upper() + ':/' + loser_dir[7:],
+                                                     "survex": survex_description,
+                                                     "complaints": reduce(operator.add, ["<p>" + complaint + "</p>" for complaint in complaints], ""),
+                                                     "files": reduce(operator.add, 
+                                                                     [html_wallet_file_entry % {"fileurl": urllib.quote(f), 
+                                                                                                "filename": f} 
+                                                                      for f
+                                                                      in files],
+                                                                     "")})
+        wallet_index_file.close()
+        wallets.append((item,  data["cave"],  data["name"], survex_required, plan_scanned, elev_scanned, data["description written"], data["qms written"]))
+	# Set modification time to be the same as that of contents.json
+	index_file = item+"/index.html"
+	os.utime(index_file, ( json_mtime,json_mtime))
+
+				
+        #People
+
+        for person in data["people"]:
+            # delete all person.html as we are recreating all the ones that matter and old ones have old data
+            if os.path.isfile(person + ".html"):
+                os.remove(person + ".html")
+        if person_complaints:
+            for person in data["people"]:
+                if not people.has_key(person):
+                    people[person] = []
+                people[person].append((item, person_complaints))
+            
+
+wallets.sort()
+website_needing_updating = list(website_needing_updating)
+website_needing_updating.sort()
+wallets_needing_scanning = list(wallets_needing_scanning)
+wallets_needing_scanning.sort()
+
+person_summary = []
+for person, person_wallets in people.items():
+    complaints = reduce(operator.add, [complaints for wallet, complaints in person_wallets], [])
+    complaints_summary = []
+    for complaint in set(complaints):
+        complaints_summary.append((complaint, complaints.count(complaint)))
+    person_summary.append((person, complaints_summary))
+        
+person_summary = dict(person_summary)
+
+year_index_file = open("index.html", "w")
+year_index_file.write(html_year_index % {"year": year, "persons": reduce(operator.add,  [html_year_person % {"person": person,
+                                                                                               "complaints": reduce(operator.add,
+                                                                                                                    [html_complaint_items % {"complaint": complaint, 
+                                                                                                                                             "count": count}
+                                                                                                                     for complaint, count
+                                                                                                                     in complaints],
+                                                                                                                    "")} 
+                                                            for person, complaints
+                                                            in person_summary.items()], ""),
+                                         "needing scanning": reduce(operator.add,  [html_year_scanning_entry % {"walletname": wallet, 
+                                                                                      "cave": cave,
+                                                                                      "name": name,
+                                                                                      "walletindex": urllib.quote(wallet) + "/index.html"} 
+                                                            for (wallet)
+                                                            in wallets_needing_scanning], ""),
+                                         "website needing updating": reduce(operator.add,  [html_year_scanning_entry % {"walletname": wallet,
+                                                                                      "cave": cave,
+                                                                                      "name": name,
+                                                                                      "walletindex": urllib.quote(wallet) + "/index.html"} 
+                                                            for (wallet)
+                                                            in website_needing_updating], ""),
+                                         "wallets": reduce(operator.add, 
+                                                           [html_year_wallet_entry % {"walletname": wallet,
+                                                                                      "cave": cave,
+                                                                                      "name": name,
+                                                                                      "walletindex": urllib.quote(wallet) + "/index.html", 
+                                                                                      "complaints": html_status[survex_required or not plan_scanned or not elev_scanned or description_written] + html_survex_required[survex_required] + html_plan_scanned[plan_scanned] + html_elev_scanned[elev_scanned] + html_description_written[description_written] + html_qms_written[qms_written] } 
+                                                            for (wallet, cave, name, survex_required, plan_scanned, elev_scanned, description_written, qms_written) 
+                                                            in wallets])})
+year_index_file.close()
+
+for person, item_complaint_list in people.items():
+    person_file = open(person + ".html", "w")
+    person_file.write(html_person % {"person": person, 
+                                     "wallets": reduce(operator.add, [html_person_wallet_entry % {"walletname": wallet, 
+                                                                                                "walletindex": urllib.quote(wallet) + "/index.html", 
+                                                                                                "complaints": reduce(operator.add, 
+                                                                                                                     [html_items % complaint 
+                                                                                                                      for complaint 
+                                                                                                                      in complaints], 
+           "") } 
+                                                                      for wallet, complaints 
+                                                                      in item_complaint_list], "")
+                                     })
+    person_file.close()
diff --git a/years/2018/topcamplist.html b/years/2018/topcamplist.html
index d426c2919..a3e9c39b9 100644
--- a/years/2018/topcamplist.html
+++ b/years/2018/topcamplist.html
@@ -9,7 +9,7 @@
 <p>This inventory done Saturday 18th August 2018
 <p>Photographs of notes taken at the time by Ruairidh Macleod are 
 <a href="../../../expofiles/photos/2018/RuairidhMacleod/topcamp-low-res/">
-online here</a>. <p>Pages 1,2,3,4,5 still to transcribe.
+online here</a>. <p>Pages 2,3,4,5 still to transcribe.
 
 <h2>Food</h2>
 <ul>
@@ -66,11 +66,6 @@ online here</a>. <p>Pages 1,2,3,4,5 still to transcribe.
 <li>clear plastic bag continaing: 2 large crowbars, pot, 2 spoons and 2 cups, 
 <li>unmarked drum containing flapjacks (?)
 </ul>
-
-<li>Page 6:
-<ul>
-<li>....
-</ul>
 <li>Page 5:
 <ul>
 <li>....
@@ -83,13 +78,73 @@ online here</a>. <p>Pages 1,2,3,4,5 still to transcribe.
 <ul>
 <li>....
 </ul>
-<li>Page 2:
+<li>Page 2: In Traungold
 <ul>
-<li>....
+<li>tub of candles (about 50) (? Hilti?)
+<li>Campbeds: 15 rolled up, about 80 legs, one disassembled (T=16)
+<li>Gryke (toilet) stick and tongs
+<li>Gryke(toilet) seat
+<li>rescue stretcher bag
+<li>rescue grab bag:
+<ul>
+<li>80m of 10mm rope
+<li>1x short rope
+<li>Gri gi + krab
+<li>pulley + krab
+<li>Petzl traxion
+<li>8 krabs
+<li>2 slings
+<li>10 ring hangers
 </ul>
-<li>Page 1:
+<li>tackle sack (Fat Freddy) full of bivvy rope
+<li>1300 Peli Case first aid kit (yellow)
+<li>1300 Peli Case first aid kit (orange)
+<li>1st response grab bag:
 <ul>
-<li>....
+<li>4-man bothy bag
+<li>1 survival bag
+<li>2 krabs
+<li>1 pulley
+<li>2 ~sqiggles~ (??)
+<li>2x 0.5 litre bottles
+<li>1 flapjack tub
+<li>1 brew kit
+</ul>
+<li>1 bag of 5 4-6mm terra-nova bothy bagsli>
+<li>c.100 reflectors - in bag
+<li>8 large measuring tapes
+<li>whiteboard
+<li>6 large rectangular green tarps (inc. water tarp.)
+<li>6x10m tarp (in package) 
+<li>200m green 9mm rope (new)
+<li>1 small red rucksack (for drill)
+</ul>
+<li>Grey Box 1 (Page 1)
+<ul>
+<li>1 empty red box
+<li>6 complete first aid kit boxes
+<li>5 Quechua pot/pan sets (packed within the stoves)
+<li>5 standard gas stoves
+<li>1 MST pocket stove
+<li>2 black pot/pan sets
+<li>6 square blue plastic cups
+<li>lighters tin: 18 lighters, 3 packets of matches
+<li>4 440g propane/bute gas canisters (mostly full)
+<li>4 230g propane/bute gas canisters (mostly full)
+<li>4 100g propane/bute gas canisters (mostly full)
+<li>1 roll of conservation tape
+<li>2/3 clear plastic box of Smash
+<li>2 1/2 packets 100x cable ties
+<li>12 rolls BioM&uuml;hl bags
+<li>1 bottle hand santizer
+<li>2 bottles sun cream
+<li>2 tubs of ground coffee (how big is a tub?)
+<li>1potof whiteboard pens
+<li>1 box of 50 propelling pencils (half full)
+<li>1 unopened packetfilter coffee
+<li>6 tubs peanut butter
+<li>2 tubs of smooth (? - peanut butter?)
+<li>1 can of WD40
 </ul>
 </ul>
 ...to be transcribed from <a href="../../../expofiles/photos/2018/RuairidhMacleod/topcamp-low-res/">